Exclude resources by inventory path in vsphere input (#6859)
This commit is contained in:
parent
d7b3f1f4ea
commit
c7b7336da3
|
@ -31,6 +31,7 @@ vm_metric_exclude = [ "*" ]
|
||||||
## VMs
|
## VMs
|
||||||
## Typical VM metrics (if omitted or empty, all metrics are collected)
|
## Typical VM metrics (if omitted or empty, all metrics are collected)
|
||||||
# vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
|
# vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
|
||||||
|
# vm_exclude = [] # Inventory paths to exclude
|
||||||
vm_metric_include = [
|
vm_metric_include = [
|
||||||
"cpu.demand.average",
|
"cpu.demand.average",
|
||||||
"cpu.idle.summation",
|
"cpu.idle.summation",
|
||||||
|
@ -73,6 +74,7 @@ vm_metric_exclude = [ "*" ]
|
||||||
## Hosts
|
## Hosts
|
||||||
## Typical host metrics (if omitted or empty, all metrics are collected)
|
## Typical host metrics (if omitted or empty, all metrics are collected)
|
||||||
# host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
|
# host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
|
||||||
|
# host_exclude [] # Inventory paths to exclude
|
||||||
host_metric_include = [
|
host_metric_include = [
|
||||||
"cpu.coreUtilization.average",
|
"cpu.coreUtilization.average",
|
||||||
"cpu.costop.summation",
|
"cpu.costop.summation",
|
||||||
|
@ -130,18 +132,21 @@ vm_metric_exclude = [ "*" ]
|
||||||
|
|
||||||
## Clusters
|
## Clusters
|
||||||
# cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
|
# cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
|
||||||
|
# cluster_exclude = [] # Inventory paths to exclude
|
||||||
# cluster_metric_include = [] ## if omitted or empty, all metrics are collected
|
# cluster_metric_include = [] ## if omitted or empty, all metrics are collected
|
||||||
# cluster_metric_exclude = [] ## Nothing excluded by default
|
# cluster_metric_exclude = [] ## Nothing excluded by default
|
||||||
# cluster_instances = false ## false by default
|
# cluster_instances = false ## false by default
|
||||||
|
|
||||||
## Datastores
|
## Datastores
|
||||||
# cluster_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
|
# cluster_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
|
||||||
|
# cluster_exclude = [] # Inventory paths to exclude
|
||||||
# datastore_metric_include = [] ## if omitted or empty, all metrics are collected
|
# datastore_metric_include = [] ## if omitted or empty, all metrics are collected
|
||||||
# datastore_metric_exclude = [] ## Nothing excluded by default
|
# datastore_metric_exclude = [] ## Nothing excluded by default
|
||||||
# datastore_instances = false ## false by default
|
# datastore_instances = false ## false by default
|
||||||
|
|
||||||
## Datacenters
|
## Datacenters
|
||||||
# datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
|
# datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
|
||||||
|
# datacenter_exclude = [] # Inventory paths to exclude
|
||||||
datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
|
datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
|
||||||
datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
|
datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
|
||||||
# datacenter_instances = false ## false by default
|
# datacenter_instances = false ## false by default
|
||||||
|
|
|
@ -32,8 +32,6 @@ var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$")
|
||||||
|
|
||||||
const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics
|
const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics
|
||||||
|
|
||||||
const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics
|
|
||||||
|
|
||||||
const maxSampleConst = 10 // Absolute maximim number of samples regardless of period
|
const maxSampleConst = 10 // Absolute maximim number of samples regardless of period
|
||||||
|
|
||||||
const maxMetadataSamples = 100 // Number of resources to sample for metric metadata
|
const maxMetadataSamples = 100 // Number of resources to sample for metric metadata
|
||||||
|
@ -67,6 +65,7 @@ type resourceKind struct {
|
||||||
objects objectMap
|
objects objectMap
|
||||||
filters filter.Filter
|
filters filter.Filter
|
||||||
paths []string
|
paths []string
|
||||||
|
excludePaths []string
|
||||||
collectInstances bool
|
collectInstances bool
|
||||||
getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error)
|
getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error)
|
||||||
include []string
|
include []string
|
||||||
|
@ -132,6 +131,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
|
||||||
objects: make(objectMap),
|
objects: make(objectMap),
|
||||||
filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
|
filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
|
||||||
paths: parent.DatacenterInclude,
|
paths: parent.DatacenterInclude,
|
||||||
|
excludePaths: parent.DatacenterExclude,
|
||||||
simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
|
simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
|
||||||
include: parent.DatacenterMetricInclude,
|
include: parent.DatacenterMetricInclude,
|
||||||
collectInstances: parent.DatacenterInstances,
|
collectInstances: parent.DatacenterInstances,
|
||||||
|
@ -149,6 +149,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
|
||||||
objects: make(objectMap),
|
objects: make(objectMap),
|
||||||
filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
|
filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
|
||||||
paths: parent.ClusterInclude,
|
paths: parent.ClusterInclude,
|
||||||
|
excludePaths: parent.ClusterExclude,
|
||||||
simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
|
simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
|
||||||
include: parent.ClusterMetricInclude,
|
include: parent.ClusterMetricInclude,
|
||||||
collectInstances: parent.ClusterInstances,
|
collectInstances: parent.ClusterInstances,
|
||||||
|
@ -166,6 +167,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
|
||||||
objects: make(objectMap),
|
objects: make(objectMap),
|
||||||
filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude),
|
filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude),
|
||||||
paths: parent.HostInclude,
|
paths: parent.HostInclude,
|
||||||
|
excludePaths: parent.HostExclude,
|
||||||
simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude),
|
simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude),
|
||||||
include: parent.HostMetricInclude,
|
include: parent.HostMetricInclude,
|
||||||
collectInstances: parent.HostInstances,
|
collectInstances: parent.HostInstances,
|
||||||
|
@ -183,6 +185,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
|
||||||
objects: make(objectMap),
|
objects: make(objectMap),
|
||||||
filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude),
|
filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude),
|
||||||
paths: parent.VMInclude,
|
paths: parent.VMInclude,
|
||||||
|
excludePaths: parent.VMExclude,
|
||||||
simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude),
|
simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude),
|
||||||
include: parent.VMMetricInclude,
|
include: parent.VMMetricInclude,
|
||||||
collectInstances: parent.VMInstances,
|
collectInstances: parent.VMInstances,
|
||||||
|
@ -199,6 +202,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL) (*Endpoint,
|
||||||
objects: make(objectMap),
|
objects: make(objectMap),
|
||||||
filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
|
filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
|
||||||
paths: parent.DatastoreInclude,
|
paths: parent.DatastoreInclude,
|
||||||
|
excludePaths: parent.DatastoreExclude,
|
||||||
simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
|
simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
|
||||||
include: parent.DatastoreMetricInclude,
|
include: parent.DatastoreMetricInclude,
|
||||||
collectInstances: parent.DatastoreInstances,
|
collectInstances: parent.DatastoreInstances,
|
||||||
|
@ -329,32 +333,36 @@ func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache
|
||||||
path := make([]string, 0)
|
path := make([]string, 0)
|
||||||
returnVal := ""
|
returnVal := ""
|
||||||
here := r
|
here := r
|
||||||
for {
|
done := false
|
||||||
if name, ok := cache[here.Reference().String()]; ok {
|
for !done {
|
||||||
// Populate cache for the entire chain of objects leading here.
|
done = func() bool {
|
||||||
returnVal = name
|
if name, ok := cache[here.Reference().String()]; ok {
|
||||||
break
|
// Populate cache for the entire chain of objects leading here.
|
||||||
}
|
returnVal = name
|
||||||
path = append(path, here.Reference().String())
|
return true
|
||||||
o := object.NewCommon(client.Client.Client, r)
|
}
|
||||||
var result mo.ManagedEntity
|
path = append(path, here.Reference().String())
|
||||||
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
o := object.NewCommon(client.Client.Client, r)
|
||||||
defer cancel1()
|
var result mo.ManagedEntity
|
||||||
err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
||||||
if err != nil {
|
defer cancel1()
|
||||||
e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
|
err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
|
||||||
break
|
if err != nil {
|
||||||
}
|
e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
|
||||||
if result.Reference().Type == "Datacenter" {
|
return true
|
||||||
// Populate cache for the entire chain of objects leading here.
|
}
|
||||||
returnVal = result.Name
|
if result.Reference().Type == "Datacenter" {
|
||||||
break
|
// Populate cache for the entire chain of objects leading here.
|
||||||
}
|
returnVal = result.Name
|
||||||
if result.Parent == nil {
|
return true
|
||||||
e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
|
}
|
||||||
break
|
if result.Parent == nil {
|
||||||
}
|
e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
|
||||||
here = result.Parent.Reference()
|
return true
|
||||||
|
}
|
||||||
|
here = result.Parent.Reference()
|
||||||
|
return false
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
for _, s := range path {
|
for _, s := range path {
|
||||||
cache[s] = returnVal
|
cache[s] = returnVal
|
||||||
|
@ -389,43 +397,51 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
||||||
// Populate resource objects, and endpoint instance info.
|
// Populate resource objects, and endpoint instance info.
|
||||||
newObjects := make(map[string]objectMap)
|
newObjects := make(map[string]objectMap)
|
||||||
for k, res := range e.resourceKinds {
|
for k, res := range e.resourceKinds {
|
||||||
e.Parent.Log.Debugf("Discovering resources for %s", res.name)
|
err := func() error {
|
||||||
// Need to do this for all resource types even if they are not enabled
|
e.Parent.Log.Debugf("Discovering resources for %s", res.name)
|
||||||
if res.enabled || k != "vm" {
|
// Need to do this for all resource types even if they are not enabled
|
||||||
rf := ResourceFilter{
|
if res.enabled || k != "vm" {
|
||||||
finder: &Finder{client},
|
rf := ResourceFilter{
|
||||||
resType: res.vcName,
|
finder: &Finder{client},
|
||||||
paths: res.paths}
|
resType: res.vcName,
|
||||||
|
paths: res.paths,
|
||||||
|
excludePaths: res.excludePaths,
|
||||||
|
}
|
||||||
|
|
||||||
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
||||||
defer cancel1()
|
defer cancel1()
|
||||||
objects, err := res.getObjects(ctx1, e, &rf)
|
objects, err := res.getObjects(ctx1, e, &rf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fill in datacenter names where available (no need to do it for Datacenters)
|
// Fill in datacenter names where available (no need to do it for Datacenters)
|
||||||
if res.name != "Datacenter" {
|
if res.name != "Datacenter" {
|
||||||
for k, obj := range objects {
|
for k, obj := range objects {
|
||||||
if obj.parentRef != nil {
|
if obj.parentRef != nil {
|
||||||
obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef)
|
obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef)
|
||||||
objects[k] = obj
|
objects[k] = obj
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// No need to collect metric metadata if resource type is not enabled
|
// No need to collect metric metadata if resource type is not enabled
|
||||||
if res.enabled {
|
if res.enabled {
|
||||||
if res.simple {
|
if res.simple {
|
||||||
e.simpleMetadataSelect(ctx, client, res)
|
e.simpleMetadataSelect(ctx, client, res)
|
||||||
} else {
|
} else {
|
||||||
e.complexMetadataSelect(ctx, res, objects, metricNames)
|
e.complexMetadataSelect(ctx, res, objects, metricNames)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
newObjects[k] = objects
|
||||||
newObjects[k] = objects
|
|
||||||
|
|
||||||
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects)))
|
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects)))
|
||||||
numRes += int64(len(objects))
|
numRes += int64(len(objects))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,8 +449,8 @@ func (e *Endpoint) discover(ctx context.Context) error {
|
||||||
dss := newObjects["datastore"]
|
dss := newObjects["datastore"]
|
||||||
l2d := make(map[string]string)
|
l2d := make(map[string]string)
|
||||||
for _, ds := range dss {
|
for _, ds := range dss {
|
||||||
url := ds.altID
|
lunId := ds.altID
|
||||||
m := isolateLUN.FindStringSubmatch(url)
|
m := isolateLUN.FindStringSubmatch(lunId)
|
||||||
if m != nil {
|
if m != nil {
|
||||||
l2d[m[1]] = ds.name
|
l2d[m[1]] = ds.name
|
||||||
}
|
}
|
||||||
|
@ -583,39 +599,47 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje
|
||||||
cache := make(map[string]*types.ManagedObjectReference)
|
cache := make(map[string]*types.ManagedObjectReference)
|
||||||
m := make(objectMap, len(resources))
|
m := make(objectMap, len(resources))
|
||||||
for _, r := range resources {
|
for _, r := range resources {
|
||||||
// We're not interested in the immediate parent (a folder), but the data center.
|
// Wrap in a function to make defer work correctly.
|
||||||
p, ok := cache[r.Parent.Value]
|
err := func() error {
|
||||||
if !ok {
|
// We're not interested in the immediate parent (a folder), but the data center.
|
||||||
ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
p, ok := cache[r.Parent.Value]
|
||||||
defer cancel2()
|
if !ok {
|
||||||
client, err := e.clientFactory.GetClient(ctx2)
|
ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
||||||
if err != nil {
|
defer cancel2()
|
||||||
return nil, err
|
client, err := e.clientFactory.GetClient(ctx2)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o := object.NewFolder(client.Client.Client, *r.Parent)
|
||||||
|
var folder mo.Folder
|
||||||
|
ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
||||||
|
defer cancel3()
|
||||||
|
err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
|
||||||
|
if err != nil {
|
||||||
|
e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
|
||||||
|
p = nil
|
||||||
|
} else {
|
||||||
|
pp := folder.Parent.Reference()
|
||||||
|
p = &pp
|
||||||
|
cache[r.Parent.Value] = p
|
||||||
|
}
|
||||||
}
|
}
|
||||||
o := object.NewFolder(client.Client.Client, *r.Parent)
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
||||||
var folder mo.Folder
|
name: r.Name,
|
||||||
ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
ref: r.ExtensibleManagedObject.Reference(),
|
||||||
defer cancel3()
|
parentRef: p,
|
||||||
err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
|
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||||
if err != nil {
|
|
||||||
e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
|
|
||||||
p = nil
|
|
||||||
} else {
|
|
||||||
pp := folder.Parent.Reference()
|
|
||||||
p = &pp
|
|
||||||
cache[r.Parent.Value] = p
|
|
||||||
}
|
}
|
||||||
}
|
return nil
|
||||||
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
}()
|
||||||
name: r.Name,
|
if err != nil {
|
||||||
ref: r.ExtensibleManagedObject.Reference(),
|
return nil, err
|
||||||
parentRef: p,
|
|
||||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//noinspection GoUnusedParameter
|
||||||
func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
|
func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
|
||||||
var resources []mo.HostSystem
|
var resources []mo.HostSystem
|
||||||
err := filter.FindAll(ctx, &resources)
|
err := filter.FindAll(ctx, &resources)
|
||||||
|
@ -717,18 +741,18 @@ func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (ob
|
||||||
}
|
}
|
||||||
m := make(objectMap)
|
m := make(objectMap)
|
||||||
for _, r := range resources {
|
for _, r := range resources {
|
||||||
url := ""
|
lunId := ""
|
||||||
if r.Info != nil {
|
if r.Info != nil {
|
||||||
info := r.Info.GetDatastoreInfo()
|
info := r.Info.GetDatastoreInfo()
|
||||||
if info != nil {
|
if info != nil {
|
||||||
url = info.Url
|
lunId = info.Url
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
||||||
name: r.Name,
|
name: r.Name,
|
||||||
ref: r.ExtensibleManagedObject.Reference(),
|
ref: r.ExtensibleManagedObject.Reference(),
|
||||||
parentRef: r.Parent,
|
parentRef: r.Parent,
|
||||||
altID: url,
|
altID: lunId,
|
||||||
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -814,7 +838,7 @@ func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job func([]types
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job func([]types.PerfQuerySpec)) {
|
func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, job func([]types.PerfQuerySpec)) {
|
||||||
te := NewThrottledExecutor(e.Parent.CollectConcurrency)
|
te := NewThrottledExecutor(e.Parent.CollectConcurrency)
|
||||||
maxMetrics := e.Parent.MaxQueryMetrics
|
maxMetrics := e.Parent.MaxQueryMetrics
|
||||||
if maxMetrics < 1 {
|
if maxMetrics < 1 {
|
||||||
|
@ -831,7 +855,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
|
||||||
metrics := 0
|
metrics := 0
|
||||||
total := 0
|
total := 0
|
||||||
nRes := 0
|
nRes := 0
|
||||||
for _, object := range res.objects {
|
for _, resource := range res.objects {
|
||||||
mr := len(res.metrics)
|
mr := len(res.metrics)
|
||||||
for mr > 0 {
|
for mr > 0 {
|
||||||
mc := mr
|
mc := mr
|
||||||
|
@ -841,14 +865,14 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim
|
||||||
}
|
}
|
||||||
fm := len(res.metrics) - mr
|
fm := len(res.metrics) - mr
|
||||||
pq := types.PerfQuerySpec{
|
pq := types.PerfQuerySpec{
|
||||||
Entity: object.ref,
|
Entity: resource.ref,
|
||||||
MaxSample: maxSampleConst,
|
MaxSample: maxSampleConst,
|
||||||
MetricId: res.metrics[fm : fm+mc],
|
MetricId: res.metrics[fm : fm+mc],
|
||||||
IntervalId: res.sampling,
|
IntervalId: res.sampling,
|
||||||
Format: "normal",
|
Format: "normal",
|
||||||
}
|
}
|
||||||
|
|
||||||
start, ok := e.hwMarks.Get(object.ref.Value)
|
start, ok := e.hwMarks.Get(resource.ref.Value)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Look back 3 sampling periods by default
|
// Look back 3 sampling periods by default
|
||||||
start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1))
|
start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1))
|
||||||
|
@ -917,7 +941,7 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
||||||
// Estimate the interval at which we're invoked. Use local time (not server time)
|
// Estimate the interval at which we're invoked. Use local time (not server time)
|
||||||
// since this is about how we got invoked locally.
|
// since this is about how we got invoked locally.
|
||||||
localNow := time.Now()
|
localNow := time.Now()
|
||||||
estInterval := time.Duration(time.Minute)
|
estInterval := time.Minute
|
||||||
if !res.lastColl.IsZero() {
|
if !res.lastColl.IsZero() {
|
||||||
s := time.Duration(res.sampling) * time.Second
|
s := time.Duration(res.sampling) * time.Second
|
||||||
rawInterval := localNow.Sub(res.lastColl)
|
rawInterval := localNow.Sub(res.lastColl)
|
||||||
|
@ -957,13 +981,14 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc
|
||||||
latestSample := time.Time{}
|
latestSample := time.Time{}
|
||||||
|
|
||||||
// Divide workload into chunks and process them concurrently
|
// Divide workload into chunks and process them concurrently
|
||||||
e.chunkify(ctx, res, now, latest, acc,
|
e.chunkify(ctx, res, now, latest,
|
||||||
func(chunk []types.PerfQuerySpec) {
|
func(chunk []types.PerfQuerySpec) {
|
||||||
n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval)
|
n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, estInterval)
|
||||||
e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error()))
|
acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error()))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
|
||||||
atomic.AddInt64(&count, int64(n))
|
atomic.AddInt64(&count, int64(n))
|
||||||
tsMux.Lock()
|
tsMux.Lock()
|
||||||
defer tsMux.Unlock()
|
defer tsMux.Unlock()
|
||||||
|
@ -1004,7 +1029,7 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur
|
||||||
if roundedTs == lastBucket {
|
if roundedTs == lastBucket {
|
||||||
bi++
|
bi++
|
||||||
p := len(rValues) - 1
|
p := len(rValues) - 1
|
||||||
rValues[p] = ((bi-1)/bi)*float64(rValues[p]) + v/bi
|
rValues[p] = ((bi-1)/bi)*rValues[p] + v/bi
|
||||||
} else {
|
} else {
|
||||||
rValues = append(rValues, v)
|
rValues = append(rValues, v)
|
||||||
roundedInfo := types.PerfSampleInfo{
|
roundedInfo := types.PerfSampleInfo{
|
||||||
|
@ -1019,7 +1044,7 @@ func alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Dur
|
||||||
return rInfo, rValues
|
return rInfo, rValues
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) {
|
func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec, res *resourceKind, acc telegraf.Accumulator, interval time.Duration) (int, time.Time, error) {
|
||||||
e.Parent.Log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs))
|
e.Parent.Log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs))
|
||||||
latestSample := time.Time{}
|
latestSample := time.Time{}
|
||||||
count := 0
|
count := 0
|
||||||
|
@ -1100,7 +1125,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs []types.PerfQuerySpec,
|
||||||
}
|
}
|
||||||
v := alignedValues[idx]
|
v := alignedValues[idx]
|
||||||
if info.UnitInfo.GetElementDescription().Key == "percent" {
|
if info.UnitInfo.GetElementDescription().Key == "percent" {
|
||||||
bucket.fields[fn] = float64(v) / 100.0
|
bucket.fields[fn] = v / 100.0
|
||||||
} else {
|
} else {
|
||||||
if e.Parent.UseIntSamples {
|
if e.Parent.UseIntSamples {
|
||||||
bucket.fields[fn] = int64(round(v))
|
bucket.fields[fn] = int64(round(v))
|
||||||
|
|
|
@ -25,34 +25,54 @@ type Finder struct {
|
||||||
// ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a
|
// ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a
|
||||||
// self contained object capable of returning a certain set of resources.
|
// self contained object capable of returning a certain set of resources.
|
||||||
type ResourceFilter struct {
|
type ResourceFilter struct {
|
||||||
finder *Finder
|
finder *Finder
|
||||||
resType string
|
resType string
|
||||||
paths []string
|
paths []string
|
||||||
|
excludePaths []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindAll returns the union of resources found given the supplied resource type and paths.
|
// FindAll returns the union of resources found given the supplied resource type and paths.
|
||||||
func (f *Finder) FindAll(ctx context.Context, resType string, paths []string, dst interface{}) error {
|
func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error {
|
||||||
|
objs := make(map[string]types.ObjectContent)
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
if err := f.Find(ctx, resType, p, dst); err != nil {
|
if err := f.find(ctx, resType, p, objs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
if len(excludePaths) > 0 {
|
||||||
|
excludes := make(map[string]types.ObjectContent)
|
||||||
|
for _, p := range excludePaths {
|
||||||
|
if err := f.find(ctx, resType, p, excludes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k := range excludes {
|
||||||
|
delete(objs, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return objectContentToTypedArray(objs, dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find returns the resources matching the specified path.
|
// Find returns the resources matching the specified path.
|
||||||
func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error {
|
func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error {
|
||||||
|
objs := make(map[string]types.ObjectContent)
|
||||||
|
err := f.find(ctx, resType, path, objs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return objectContentToTypedArray(objs, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Finder) find(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error {
|
||||||
p := strings.Split(path, "/")
|
p := strings.Split(path, "/")
|
||||||
flt := make([]property.Filter, len(p)-1)
|
flt := make([]property.Filter, len(p)-1)
|
||||||
for i := 1; i < len(p); i++ {
|
for i := 1; i < len(p); i++ {
|
||||||
flt[i-1] = property.Filter{"name": p[i]}
|
flt[i-1] = property.Filter{"name": p[i]}
|
||||||
}
|
}
|
||||||
objs := make(map[string]types.ObjectContent)
|
|
||||||
err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs)
|
err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
objectContentToTypedArray(objs, dst)
|
|
||||||
f.client.log.Debugf("Find(%s, %s) returned %d objects", resType, path, len(objs))
|
f.client.log.Debugf("Find(%s, %s) returned %d objects", resType, path, len(objs))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -94,6 +114,9 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference,
|
||||||
// Special case: The last token is a recursive wildcard, so we can grab everything
|
// Special case: The last token is a recursive wildcard, so we can grab everything
|
||||||
// recursively in a single call.
|
// recursively in a single call.
|
||||||
v2, err := m.CreateContainerView(ctx, root, []string{resType}, true)
|
v2, err := m.CreateContainerView(ctx, root, []string{resType}, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer v2.Destroy(ctx)
|
defer v2.Destroy(ctx)
|
||||||
err = v2.Retrieve(ctx, []string{resType}, fields, &content)
|
err = v2.Retrieve(ctx, []string{resType}, fields, &content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -204,7 +227,7 @@ func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interfac
|
||||||
// FindAll finds all resources matching the paths that were specified upon creation of
|
// FindAll finds all resources matching the paths that were specified upon creation of
|
||||||
// the ResourceFilter.
|
// the ResourceFilter.
|
||||||
func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error {
|
func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error {
|
||||||
return r.finder.FindAll(ctx, r.resType, r.paths, dst)
|
return r.finder.FindAll(ctx, r.resType, r.paths, r.excludePaths, dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchName(f property.Filter, props []types.DynamicProperty) bool {
|
func matchName(f property.Filter, props []types.DynamicProperty) bool {
|
||||||
|
|
|
@ -22,22 +22,27 @@ type VSphere struct {
|
||||||
DatacenterMetricInclude []string
|
DatacenterMetricInclude []string
|
||||||
DatacenterMetricExclude []string
|
DatacenterMetricExclude []string
|
||||||
DatacenterInclude []string
|
DatacenterInclude []string
|
||||||
|
DatacenterExclude []string
|
||||||
ClusterInstances bool
|
ClusterInstances bool
|
||||||
ClusterMetricInclude []string
|
ClusterMetricInclude []string
|
||||||
ClusterMetricExclude []string
|
ClusterMetricExclude []string
|
||||||
ClusterInclude []string
|
ClusterInclude []string
|
||||||
|
ClusterExclude []string
|
||||||
HostInstances bool
|
HostInstances bool
|
||||||
HostMetricInclude []string
|
HostMetricInclude []string
|
||||||
HostMetricExclude []string
|
HostMetricExclude []string
|
||||||
HostInclude []string
|
HostInclude []string
|
||||||
|
HostExclude []string
|
||||||
VMInstances bool `toml:"vm_instances"`
|
VMInstances bool `toml:"vm_instances"`
|
||||||
VMMetricInclude []string `toml:"vm_metric_include"`
|
VMMetricInclude []string `toml:"vm_metric_include"`
|
||||||
VMMetricExclude []string `toml:"vm_metric_exclude"`
|
VMMetricExclude []string `toml:"vm_metric_exclude"`
|
||||||
VMInclude []string `toml:"vm_include"`
|
VMInclude []string `toml:"vm_include"`
|
||||||
|
VMExclude []string `toml:"vm_exclude"`
|
||||||
DatastoreInstances bool
|
DatastoreInstances bool
|
||||||
DatastoreMetricInclude []string
|
DatastoreMetricInclude []string
|
||||||
DatastoreMetricExclude []string
|
DatastoreMetricExclude []string
|
||||||
DatastoreInclude []string
|
DatastoreInclude []string
|
||||||
|
DatastoreExclude []string
|
||||||
Separator string
|
Separator string
|
||||||
CustomAttributeInclude []string
|
CustomAttributeInclude []string
|
||||||
CustomAttributeExclude []string
|
CustomAttributeExclude []string
|
||||||
|
|
|
@ -377,10 +377,59 @@ func TestFinder(t *testing.T) {
|
||||||
testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "")
|
testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "")
|
||||||
|
|
||||||
vm = []mo.VirtualMachine{}
|
vm = []mo.VirtualMachine{}
|
||||||
err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, &vm)
|
err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, []string{}, &vm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 4, len(vm))
|
require.Equal(t, 4, len(vm))
|
||||||
|
|
||||||
|
rf := ResourceFilter{
|
||||||
|
finder: &f,
|
||||||
|
paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
|
||||||
|
excludePaths: []string{"/DC0/vm/DC0_H0_VM0"},
|
||||||
|
resType: "VirtualMachine",
|
||||||
|
}
|
||||||
|
vm = []mo.VirtualMachine{}
|
||||||
|
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||||
|
require.Equal(t, 3, len(vm))
|
||||||
|
|
||||||
|
rf = ResourceFilter{
|
||||||
|
finder: &f,
|
||||||
|
paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"},
|
||||||
|
excludePaths: []string{"/**"},
|
||||||
|
resType: "VirtualMachine",
|
||||||
|
}
|
||||||
|
vm = []mo.VirtualMachine{}
|
||||||
|
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||||
|
require.Equal(t, 0, len(vm))
|
||||||
|
|
||||||
|
rf = ResourceFilter{
|
||||||
|
finder: &f,
|
||||||
|
paths: []string{"/**"},
|
||||||
|
excludePaths: []string{"/**"},
|
||||||
|
resType: "VirtualMachine",
|
||||||
|
}
|
||||||
|
vm = []mo.VirtualMachine{}
|
||||||
|
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||||
|
require.Equal(t, 0, len(vm))
|
||||||
|
|
||||||
|
rf = ResourceFilter{
|
||||||
|
finder: &f,
|
||||||
|
paths: []string{"/**"},
|
||||||
|
excludePaths: []string{"/this won't match anything"},
|
||||||
|
resType: "VirtualMachine",
|
||||||
|
}
|
||||||
|
vm = []mo.VirtualMachine{}
|
||||||
|
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||||
|
require.Equal(t, 8, len(vm))
|
||||||
|
|
||||||
|
rf = ResourceFilter{
|
||||||
|
finder: &f,
|
||||||
|
paths: []string{"/**"},
|
||||||
|
excludePaths: []string{"/**/*VM0"},
|
||||||
|
resType: "VirtualMachine",
|
||||||
|
}
|
||||||
|
vm = []mo.VirtualMachine{}
|
||||||
|
require.NoError(t, rf.FindAll(ctx, &vm))
|
||||||
|
require.Equal(t, 4, len(vm))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFolders(t *testing.T) {
|
func TestFolders(t *testing.T) {
|
||||||
|
|
Loading…
Reference in New Issue