2018-09-11 21:53:46 +00:00
|
|
|
package vsphere
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-12-28 21:24:43 +00:00
|
|
|
"errors"
|
2018-09-11 21:53:46 +00:00
|
|
|
"fmt"
|
2019-03-11 18:16:32 +00:00
|
|
|
"math"
|
2018-12-28 21:24:43 +00:00
|
|
|
"math/rand"
|
2018-09-11 21:53:46 +00:00
|
|
|
"net/url"
|
2018-11-02 19:05:28 +00:00
|
|
|
"regexp"
|
2018-09-11 21:53:46 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/influxdata/telegraf/filter"
|
|
|
|
|
|
|
|
"github.com/influxdata/telegraf"
|
|
|
|
"github.com/vmware/govmomi/object"
|
|
|
|
"github.com/vmware/govmomi/performance"
|
|
|
|
"github.com/vmware/govmomi/vim25/mo"
|
|
|
|
"github.com/vmware/govmomi/vim25/types"
|
|
|
|
)
|
|
|
|
|
2018-11-02 19:05:28 +00:00
|
|
|
var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$")
|
|
|
|
|
2019-08-15 00:03:33 +00:00
|
|
|
var isIPv4 = regexp.MustCompile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$")
|
|
|
|
|
|
|
|
var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$")
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics
|
|
|
|
|
|
|
|
const maxSampleConst = 10 // Absolute maximum number of samples regardless of period
|
2018-12-28 21:24:43 +00:00
|
|
|
|
|
|
|
const maxMetadataSamples = 100 // Number of resources to sample for metric metadata
|
2018-11-06 22:22:43 +00:00
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
const hwMarkTTL = time.Duration(4 * time.Hour)
|
|
|
|
|
|
|
|
type queryChunk []types.PerfQuerySpec
|
|
|
|
|
|
|
|
type queryJob func(queryChunk)
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower
|
|
|
|
// level Client type.
|
|
|
|
type Endpoint struct {
|
2019-08-15 00:03:33 +00:00
|
|
|
Parent *VSphere
|
|
|
|
URL *url.URL
|
|
|
|
resourceKinds map[string]*resourceKind
|
|
|
|
hwMarks *TSCache
|
|
|
|
lun2ds map[string]string
|
|
|
|
discoveryTicker *time.Ticker
|
|
|
|
collectMux sync.RWMutex
|
|
|
|
initialized bool
|
|
|
|
clientFactory *ClientFactory
|
|
|
|
busy sync.Mutex
|
|
|
|
customFields map[int32]string
|
|
|
|
customAttrFilter filter.Filter
|
|
|
|
customAttrEnabled bool
|
2020-04-21 18:30:29 +00:00
|
|
|
metricNameLookup map[int32]string
|
|
|
|
metricNameMux sync.RWMutex
|
|
|
|
log telegraf.Logger
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type resourceKind struct {
|
|
|
|
name string
|
2019-02-12 22:05:14 +00:00
|
|
|
vcName string
|
2018-09-11 21:53:46 +00:00
|
|
|
pKey string
|
|
|
|
parentTag string
|
|
|
|
enabled bool
|
|
|
|
realTime bool
|
|
|
|
sampling int32
|
|
|
|
objects objectMap
|
|
|
|
filters filter.Filter
|
2019-02-12 22:05:14 +00:00
|
|
|
paths []string
|
2020-01-16 20:14:00 +00:00
|
|
|
excludePaths []string
|
2019-02-12 22:05:14 +00:00
|
|
|
collectInstances bool
|
|
|
|
getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error)
|
2018-12-28 21:24:43 +00:00
|
|
|
include []string
|
|
|
|
simple bool
|
|
|
|
metrics performance.MetricList
|
|
|
|
parent string
|
|
|
|
latestSample time.Time
|
|
|
|
lastColl time.Time
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type metricEntry struct {
|
|
|
|
tags map[string]string
|
|
|
|
name string
|
|
|
|
ts time.Time
|
|
|
|
fields map[string]interface{}
|
|
|
|
}
|
|
|
|
|
2020-01-13 23:15:55 +00:00
|
|
|
type objectMap map[string]*objectRef
|
2018-09-11 21:53:46 +00:00
|
|
|
|
|
|
|
type objectRef struct {
|
2019-08-15 00:03:33 +00:00
|
|
|
name string
|
|
|
|
altID string
|
|
|
|
ref types.ManagedObjectReference
|
|
|
|
parentRef *types.ManagedObjectReference //Pointer because it must be nillable
|
|
|
|
guest string
|
|
|
|
dcname string
|
|
|
|
customValues map[string]string
|
|
|
|
lookup map[string]string
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) {
|
|
|
|
if pKind, ok := e.resourceKinds[res.parent]; ok {
|
|
|
|
if p, ok := pKind.objects[obj.parentRef.Value]; ok {
|
2020-01-13 23:15:55 +00:00
|
|
|
return p, true
|
2018-12-28 21:24:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, false
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed
|
|
|
|
// as parameters.
|
2020-04-21 18:30:29 +00:00
|
|
|
func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegraf.Logger) (*Endpoint, error) {
|
2018-09-11 21:53:46 +00:00
|
|
|
e := Endpoint{
|
2019-08-15 00:03:33 +00:00
|
|
|
URL: url,
|
|
|
|
Parent: parent,
|
2020-04-21 18:30:29 +00:00
|
|
|
hwMarks: NewTSCache(hwMarkTTL),
|
2019-08-15 00:03:33 +00:00
|
|
|
lun2ds: make(map[string]string),
|
|
|
|
initialized: false,
|
|
|
|
clientFactory: NewClientFactory(ctx, url, parent),
|
|
|
|
customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude),
|
|
|
|
customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude),
|
2020-04-21 18:30:29 +00:00
|
|
|
log: log,
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2019-01-03 19:30:05 +00:00
|
|
|
e.resourceKinds = map[string]*resourceKind{
|
2018-09-11 21:53:46 +00:00
|
|
|
"datacenter": {
|
|
|
|
name: "datacenter",
|
2019-02-12 22:05:14 +00:00
|
|
|
vcName: "Datacenter",
|
2018-09-11 21:53:46 +00:00
|
|
|
pKey: "dcname",
|
|
|
|
parentTag: "",
|
|
|
|
enabled: anythingEnabled(parent.DatacenterMetricExclude),
|
|
|
|
realTime: false,
|
|
|
|
sampling: 300,
|
|
|
|
objects: make(objectMap),
|
|
|
|
filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
|
2019-02-12 22:05:14 +00:00
|
|
|
paths: parent.DatacenterInclude,
|
2020-01-16 20:14:00 +00:00
|
|
|
excludePaths: parent.DatacenterExclude,
|
2018-12-28 21:24:43 +00:00
|
|
|
simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude),
|
|
|
|
include: parent.DatacenterMetricInclude,
|
2018-09-11 21:53:46 +00:00
|
|
|
collectInstances: parent.DatacenterInstances,
|
|
|
|
getObjects: getDatacenters,
|
2018-12-28 21:24:43 +00:00
|
|
|
parent: "",
|
2018-09-11 21:53:46 +00:00
|
|
|
},
|
|
|
|
"cluster": {
|
|
|
|
name: "cluster",
|
2019-02-12 22:05:14 +00:00
|
|
|
vcName: "ClusterComputeResource",
|
2018-09-11 21:53:46 +00:00
|
|
|
pKey: "clustername",
|
|
|
|
parentTag: "dcname",
|
|
|
|
enabled: anythingEnabled(parent.ClusterMetricExclude),
|
|
|
|
realTime: false,
|
|
|
|
sampling: 300,
|
|
|
|
objects: make(objectMap),
|
|
|
|
filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
|
2019-02-12 22:05:14 +00:00
|
|
|
paths: parent.ClusterInclude,
|
2020-01-16 20:14:00 +00:00
|
|
|
excludePaths: parent.ClusterExclude,
|
2018-12-28 21:24:43 +00:00
|
|
|
simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude),
|
|
|
|
include: parent.ClusterMetricInclude,
|
2018-09-11 21:53:46 +00:00
|
|
|
collectInstances: parent.ClusterInstances,
|
|
|
|
getObjects: getClusters,
|
2018-12-28 21:24:43 +00:00
|
|
|
parent: "datacenter",
|
2018-09-11 21:53:46 +00:00
|
|
|
},
|
|
|
|
"host": {
|
|
|
|
name: "host",
|
2019-02-12 22:05:14 +00:00
|
|
|
vcName: "HostSystem",
|
2018-09-11 21:53:46 +00:00
|
|
|
pKey: "esxhostname",
|
|
|
|
parentTag: "clustername",
|
|
|
|
enabled: anythingEnabled(parent.HostMetricExclude),
|
|
|
|
realTime: true,
|
|
|
|
sampling: 20,
|
|
|
|
objects: make(objectMap),
|
|
|
|
filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude),
|
2019-02-12 22:05:14 +00:00
|
|
|
paths: parent.HostInclude,
|
2020-01-16 20:14:00 +00:00
|
|
|
excludePaths: parent.HostExclude,
|
2018-12-28 21:24:43 +00:00
|
|
|
simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude),
|
|
|
|
include: parent.HostMetricInclude,
|
2018-09-11 21:53:46 +00:00
|
|
|
collectInstances: parent.HostInstances,
|
|
|
|
getObjects: getHosts,
|
2018-12-28 21:24:43 +00:00
|
|
|
parent: "cluster",
|
2018-09-11 21:53:46 +00:00
|
|
|
},
|
|
|
|
"vm": {
|
|
|
|
name: "vm",
|
2019-02-12 22:05:14 +00:00
|
|
|
vcName: "VirtualMachine",
|
2018-09-11 21:53:46 +00:00
|
|
|
pKey: "vmname",
|
|
|
|
parentTag: "esxhostname",
|
|
|
|
enabled: anythingEnabled(parent.VMMetricExclude),
|
|
|
|
realTime: true,
|
|
|
|
sampling: 20,
|
|
|
|
objects: make(objectMap),
|
|
|
|
filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude),
|
2019-02-12 22:05:14 +00:00
|
|
|
paths: parent.VMInclude,
|
2020-01-16 20:14:00 +00:00
|
|
|
excludePaths: parent.VMExclude,
|
2018-12-28 21:24:43 +00:00
|
|
|
simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude),
|
|
|
|
include: parent.VMMetricInclude,
|
2018-09-11 21:53:46 +00:00
|
|
|
collectInstances: parent.VMInstances,
|
|
|
|
getObjects: getVMs,
|
2018-12-28 21:24:43 +00:00
|
|
|
parent: "host",
|
2018-09-11 21:53:46 +00:00
|
|
|
},
|
|
|
|
"datastore": {
|
|
|
|
name: "datastore",
|
2019-02-12 22:05:14 +00:00
|
|
|
vcName: "Datastore",
|
2018-09-11 21:53:46 +00:00
|
|
|
pKey: "dsname",
|
|
|
|
enabled: anythingEnabled(parent.DatastoreMetricExclude),
|
|
|
|
realTime: false,
|
|
|
|
sampling: 300,
|
|
|
|
objects: make(objectMap),
|
|
|
|
filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
|
2019-02-12 22:05:14 +00:00
|
|
|
paths: parent.DatastoreInclude,
|
2020-01-16 20:14:00 +00:00
|
|
|
excludePaths: parent.DatastoreExclude,
|
2018-12-28 21:24:43 +00:00
|
|
|
simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude),
|
|
|
|
include: parent.DatastoreMetricInclude,
|
2018-09-11 21:53:46 +00:00
|
|
|
collectInstances: parent.DatastoreInstances,
|
|
|
|
getObjects: getDatastores,
|
2018-12-28 21:24:43 +00:00
|
|
|
parent: "",
|
2018-09-11 21:53:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start discover and other goodness
|
|
|
|
err := e.init(ctx)
|
|
|
|
|
|
|
|
return &e, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func anythingEnabled(ex []string) bool {
|
|
|
|
for _, s := range ex {
|
|
|
|
if s == "*" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func newFilterOrPanic(include []string, exclude []string) filter.Filter {
|
|
|
|
f, err := filter.NewIncludeExcludeFilter(include, exclude)
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("Include/exclude filters are invalid: %s", err))
|
|
|
|
}
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
func isSimple(include []string, exclude []string) bool {
|
|
|
|
if len(exclude) > 0 || len(include) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, s := range include {
|
|
|
|
if strings.Contains(s, "*") {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
func (e *Endpoint) startDiscovery(ctx context.Context) {
|
|
|
|
e.discoveryTicker = time.NewTicker(e.Parent.ObjectDiscoveryInterval.Duration)
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-e.discoveryTicker.C:
|
|
|
|
err := e.discover(ctx)
|
|
|
|
if err != nil && err != context.Canceled {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Exiting discovery goroutine for %s", e.URL.Host)
|
2018-09-11 21:53:46 +00:00
|
|
|
e.discoveryTicker.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) initalDiscovery(ctx context.Context) {
|
|
|
|
err := e.discover(ctx)
|
|
|
|
if err != nil && err != context.Canceled {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error())
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
e.startDiscovery(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) init(ctx context.Context) error {
|
2019-08-15 00:03:33 +00:00
|
|
|
client, err := e.clientFactory.GetClient(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initial load of custom field metadata
|
|
|
|
if e.customAttrEnabled {
|
|
|
|
fields, err := client.GetCustomFields(ctx)
|
|
|
|
if err != nil {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Warn("Could not load custom field metadata")
|
2019-08-15 00:03:33 +00:00
|
|
|
} else {
|
|
|
|
e.customFields = fields
|
|
|
|
}
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
|
|
|
|
if e.Parent.ObjectDiscoveryInterval.Duration > 0 {
|
2020-01-15 00:30:43 +00:00
|
|
|
e.Parent.Log.Debug("Running initial discovery")
|
|
|
|
e.initalDiscovery(ctx)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
e.initialized = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
func (e *Endpoint) getMetricNameForId(id int32) string {
|
|
|
|
e.metricNameMux.RLock()
|
|
|
|
defer e.metricNameMux.RUnlock()
|
|
|
|
return e.metricNameLookup[id]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error {
|
|
|
|
e.metricNameMux.Lock()
|
|
|
|
defer e.metricNameMux.Unlock()
|
2018-09-11 21:53:46 +00:00
|
|
|
client, err := e.clientFactory.GetClient(ctx)
|
|
|
|
if err != nil {
|
2020-04-21 18:30:29 +00:00
|
|
|
return err
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
mn, err := client.CounterInfoByName(ctx)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
2020-04-21 18:30:29 +00:00
|
|
|
return err
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
e.metricNameLookup = make(map[int32]string)
|
2018-09-11 21:53:46 +00:00
|
|
|
for name, m := range mn {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.metricNameLookup[m.Key] = name
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
return nil
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2020-01-13 23:15:55 +00:00
|
|
|
func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) {
|
2018-09-11 21:53:46 +00:00
|
|
|
client, err := e.clientFactory.GetClient(ctx)
|
|
|
|
if err != nil {
|
2018-12-28 21:24:43 +00:00
|
|
|
return nil, err
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2018-10-11 20:08:09 +00:00
|
|
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel1()
|
2018-12-28 21:24:43 +00:00
|
|
|
metrics, err := client.Perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
return metrics, nil
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) string {
|
|
|
|
path := make([]string, 0)
|
|
|
|
returnVal := ""
|
|
|
|
here := r
|
2020-01-16 20:14:00 +00:00
|
|
|
done := false
|
|
|
|
for !done {
|
|
|
|
done = func() bool {
|
|
|
|
if name, ok := cache[here.Reference().String()]; ok {
|
|
|
|
// Populate cache for the entire chain of objects leading here.
|
|
|
|
returnVal = name
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
path = append(path, here.Reference().String())
|
|
|
|
o := object.NewCommon(client.Client.Client, r)
|
|
|
|
var result mo.ManagedEntity
|
|
|
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel1()
|
|
|
|
err := o.Properties(ctx1, here, []string{"parent", "name"}, &result)
|
|
|
|
if err != nil {
|
|
|
|
e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error())
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if result.Reference().Type == "Datacenter" {
|
|
|
|
// Populate cache for the entire chain of objects leading here.
|
|
|
|
returnVal = result.Name
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if result.Parent == nil {
|
|
|
|
e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference())
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
here = result.Parent.Reference()
|
|
|
|
return false
|
|
|
|
}()
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
for _, s := range path {
|
|
|
|
cache[s] = returnVal
|
|
|
|
}
|
|
|
|
return returnVal
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) discover(ctx context.Context) error {
|
|
|
|
e.busy.Lock()
|
|
|
|
defer e.busy.Unlock()
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
err := e.reloadMetricNameMap(ctx)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
sw := NewStopwatch("discover", e.URL.Host)
|
|
|
|
|
|
|
|
client, err := e.clientFactory.GetClient(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Discover new objects for %s", e.URL.Host)
|
2018-09-11 21:53:46 +00:00
|
|
|
dcNameCache := make(map[string]string)
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
numRes := int64(0)
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// Populate resource objects, and endpoint instance info.
|
2019-01-03 19:30:05 +00:00
|
|
|
newObjects := make(map[string]objectMap)
|
2018-09-11 21:53:46 +00:00
|
|
|
for k, res := range e.resourceKinds {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Discovering resources for %s", res.name)
|
|
|
|
// Need to do this for all resource types even if they are not enabled
|
|
|
|
if res.enabled || k != "vm" {
|
|
|
|
rf := ResourceFilter{
|
|
|
|
finder: &Finder{client},
|
|
|
|
resType: res.vcName,
|
|
|
|
paths: res.paths,
|
|
|
|
excludePaths: res.excludePaths}
|
2019-02-12 22:05:14 +00:00
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
objects, err := res.getObjects(ctx1, e, &rf)
|
|
|
|
cancel1()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
// Fill in datacenter names where available (no need to do it for Datacenters)
|
|
|
|
if res.name != "Datacenter" {
|
|
|
|
for k, obj := range objects {
|
|
|
|
if obj.parentRef != nil {
|
|
|
|
obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef)
|
|
|
|
objects[k] = obj
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
// No need to collect metric metadata if resource type is not enabled
|
|
|
|
if res.enabled {
|
|
|
|
if res.simple {
|
|
|
|
e.simpleMetadataSelect(ctx, client, res)
|
|
|
|
} else {
|
|
|
|
e.complexMetadataSelect(ctx, res, objects)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2020-01-16 20:14:00 +00:00
|
|
|
newObjects[k] = objects
|
2018-12-28 21:24:43 +00:00
|
|
|
|
2020-01-16 20:14:00 +00:00
|
|
|
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects)))
|
|
|
|
numRes += int64(len(objects))
|
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
}
|
2020-01-16 20:14:00 +00:00
|
|
|
if err != nil {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Error(err)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-02 19:05:28 +00:00
|
|
|
// Build lun2ds map
|
2019-05-29 01:08:44 +00:00
|
|
|
dss := newObjects["datastore"]
|
2018-11-02 19:05:28 +00:00
|
|
|
l2d := make(map[string]string)
|
2019-05-29 01:08:44 +00:00
|
|
|
for _, ds := range dss {
|
2020-01-16 20:14:00 +00:00
|
|
|
lunId := ds.altID
|
|
|
|
m := isolateLUN.FindStringSubmatch(lunId)
|
2018-11-02 19:05:28 +00:00
|
|
|
if m != nil {
|
|
|
|
l2d[m[1]] = ds.name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-15 00:03:33 +00:00
|
|
|
// Load custom field metadata
|
|
|
|
var fields map[int32]string
|
|
|
|
if e.customAttrEnabled {
|
|
|
|
fields, err = client.GetCustomFields(ctx)
|
|
|
|
if err != nil {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Warn("Could not load custom field metadata")
|
2019-08-15 00:03:33 +00:00
|
|
|
fields = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// Atomically swap maps
|
|
|
|
e.collectMux.Lock()
|
|
|
|
defer e.collectMux.Unlock()
|
|
|
|
|
2019-01-03 19:30:05 +00:00
|
|
|
for k, v := range newObjects {
|
|
|
|
e.resourceKinds[k].objects = v
|
|
|
|
}
|
2018-11-02 19:05:28 +00:00
|
|
|
e.lun2ds = l2d
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2019-08-15 00:03:33 +00:00
|
|
|
if fields != nil {
|
|
|
|
e.customFields = fields
|
|
|
|
}
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
sw.Stop()
|
2018-12-28 21:24:43 +00:00
|
|
|
SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": "instance-total"}, numRes)
|
2018-09-11 21:53:46 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Using fast metric metadata selection for %s", res.name)
|
2018-12-28 21:24:43 +00:00
|
|
|
m, err := client.CounterInfoByName(ctx)
|
|
|
|
if err != nil {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
|
2018-12-28 21:24:43 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
res.metrics = make(performance.MetricList, 0, len(res.include))
|
|
|
|
for _, s := range res.include {
|
|
|
|
if pci, ok := m[s]; ok {
|
|
|
|
cnt := types.PerfMetricId{
|
|
|
|
CounterId: pci.Key,
|
|
|
|
}
|
|
|
|
if res.collectInstances {
|
|
|
|
cnt.Instance = "*"
|
|
|
|
} else {
|
|
|
|
cnt.Instance = ""
|
|
|
|
}
|
|
|
|
res.metrics = append(res.metrics, cnt)
|
|
|
|
} else {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Warnf("Metric name %s is unknown. Will not be collected", s)
|
2018-12-28 21:24:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) {
|
2018-12-28 21:24:43 +00:00
|
|
|
// We're only going to get metadata from maxMetadataSamples resources. If we have
|
|
|
|
// more resources than that, we pick maxMetadataSamples samples at random.
|
2020-01-13 23:15:55 +00:00
|
|
|
sampledObjects := make([]*objectRef, len(objects))
|
2018-12-28 21:24:43 +00:00
|
|
|
i := 0
|
|
|
|
for _, obj := range objects {
|
|
|
|
sampledObjects[i] = obj
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
n := len(sampledObjects)
|
|
|
|
if n > maxMetadataSamples {
|
2020-05-14 07:41:58 +00:00
|
|
|
// Shuffle samples into the maxMetadataSamples positions
|
2018-12-28 21:24:43 +00:00
|
|
|
for i := 0; i < maxMetadataSamples; i++ {
|
|
|
|
j := int(rand.Int31n(int32(i + 1)))
|
|
|
|
t := sampledObjects[i]
|
|
|
|
sampledObjects[i] = sampledObjects[j]
|
|
|
|
sampledObjects[j] = t
|
|
|
|
}
|
|
|
|
sampledObjects = sampledObjects[0:maxMetadataSamples]
|
|
|
|
}
|
|
|
|
|
|
|
|
instInfoMux := sync.Mutex{}
|
|
|
|
te := NewThrottledExecutor(e.Parent.DiscoverConcurrency)
|
|
|
|
for _, obj := range sampledObjects {
|
2020-01-13 23:15:55 +00:00
|
|
|
func(obj *objectRef) {
|
2018-12-28 21:24:43 +00:00
|
|
|
te.Run(ctx, func() {
|
|
|
|
metrics, err := e.getMetadata(ctx, obj, res.sampling)
|
|
|
|
if err != nil {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error())
|
2018-12-28 21:24:43 +00:00
|
|
|
}
|
|
|
|
mMap := make(map[string]types.PerfMetricId)
|
|
|
|
for _, m := range metrics {
|
|
|
|
if m.Instance != "" && res.collectInstances {
|
|
|
|
m.Instance = "*"
|
|
|
|
} else {
|
|
|
|
m.Instance = ""
|
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
if res.filters.Match(e.getMetricNameForId(m.CounterId)) {
|
2018-12-28 21:24:43 +00:00
|
|
|
mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m
|
|
|
|
}
|
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Found %d metrics for %s", len(mMap), obj.name)
|
2018-12-28 21:24:43 +00:00
|
|
|
instInfoMux.Lock()
|
|
|
|
defer instInfoMux.Unlock()
|
|
|
|
if len(mMap) > len(res.metrics) {
|
|
|
|
res.metrics = make(performance.MetricList, len(mMap))
|
|
|
|
i := 0
|
|
|
|
for _, m := range mMap {
|
|
|
|
res.metrics[i] = m
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}(obj)
|
|
|
|
}
|
|
|
|
te.Wait()
|
|
|
|
}
|
|
|
|
|
2019-02-12 22:05:14 +00:00
|
|
|
func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
|
2018-09-11 21:53:46 +00:00
|
|
|
var resources []mo.Datacenter
|
2019-02-12 22:05:14 +00:00
|
|
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel1()
|
|
|
|
err := filter.FindAll(ctx1, &resources)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m := make(objectMap, len(resources))
|
|
|
|
for _, r := range resources {
|
2020-01-13 23:15:55 +00:00
|
|
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
|
|
|
name: r.Name,
|
|
|
|
ref: r.ExtensibleManagedObject.Reference(),
|
|
|
|
parentRef: r.Parent,
|
|
|
|
dcname: r.Name,
|
|
|
|
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2019-02-12 22:05:14 +00:00
|
|
|
func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
|
2018-09-11 21:53:46 +00:00
|
|
|
var resources []mo.ClusterComputeResource
|
2019-02-12 22:05:14 +00:00
|
|
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel1()
|
|
|
|
err := filter.FindAll(ctx1, &resources)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cache := make(map[string]*types.ManagedObjectReference)
|
|
|
|
m := make(objectMap, len(resources))
|
|
|
|
for _, r := range resources {
|
2020-01-16 20:14:00 +00:00
|
|
|
// Wrap in a function to make defer work correctly.
|
|
|
|
err := func() error {
|
|
|
|
// We're not interested in the immediate parent (a folder), but the data center.
|
|
|
|
p, ok := cache[r.Parent.Value]
|
|
|
|
if !ok {
|
|
|
|
ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel2()
|
|
|
|
client, err := e.clientFactory.GetClient(ctx2)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
o := object.NewFolder(client.Client.Client, *r.Parent)
|
|
|
|
var folder mo.Folder
|
|
|
|
ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel3()
|
|
|
|
err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder)
|
|
|
|
if err != nil {
|
|
|
|
e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error())
|
|
|
|
p = nil
|
|
|
|
} else {
|
|
|
|
pp := folder.Parent.Reference()
|
|
|
|
p = &pp
|
|
|
|
cache[r.Parent.Value] = p
|
|
|
|
}
|
2019-02-12 22:05:14 +00:00
|
|
|
}
|
2020-01-16 20:14:00 +00:00
|
|
|
return nil
|
|
|
|
}()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-01-13 23:15:55 +00:00
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2020-01-16 20:14:00 +00:00
|
|
|
//noinspection GoUnusedParameter
|
2019-02-12 22:05:14 +00:00
|
|
|
func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
|
2018-09-11 21:53:46 +00:00
|
|
|
var resources []mo.HostSystem
|
2019-02-12 22:05:14 +00:00
|
|
|
err := filter.FindAll(ctx, &resources)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m := make(objectMap)
|
|
|
|
for _, r := range resources {
|
2020-01-13 23:15:55 +00:00
|
|
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
|
|
|
name: r.Name,
|
|
|
|
ref: r.ExtensibleManagedObject.Reference(),
|
|
|
|
parentRef: r.Parent,
|
|
|
|
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2019-02-12 22:05:14 +00:00
|
|
|
func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
|
2018-09-11 21:53:46 +00:00
|
|
|
var resources []mo.VirtualMachine
|
2019-02-12 22:05:14 +00:00
|
|
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel1()
|
|
|
|
err := filter.FindAll(ctx1, &resources)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m := make(objectMap)
|
|
|
|
for _, r := range resources {
|
2018-12-28 21:24:43 +00:00
|
|
|
if r.Runtime.PowerState != "poweredOn" {
|
|
|
|
continue
|
|
|
|
}
|
2018-10-01 23:13:32 +00:00
|
|
|
guest := "unknown"
|
|
|
|
uuid := ""
|
2019-08-15 00:03:33 +00:00
|
|
|
lookup := make(map[string]string)
|
|
|
|
|
|
|
|
// Extract host name
|
|
|
|
if r.Guest != nil && r.Guest.HostName != "" {
|
|
|
|
lookup["guesthostname"] = r.Guest.HostName
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect network information
|
|
|
|
for _, net := range r.Guest.Net {
|
|
|
|
if net.DeviceConfigId == -1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if net.IpConfig == nil || net.IpConfig.IpAddress == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ips := make(map[string][]string)
|
|
|
|
for _, ip := range net.IpConfig.IpAddress {
|
|
|
|
addr := ip.IpAddress
|
|
|
|
for _, ipType := range e.Parent.IpAddresses {
|
|
|
|
if !(ipType == "ipv4" && isIPv4.MatchString(addr) ||
|
|
|
|
ipType == "ipv6" && isIPv6.MatchString(addr)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// By convention, we want the preferred addresses to appear first in the array.
|
|
|
|
if _, ok := ips[ipType]; !ok {
|
|
|
|
ips[ipType] = make([]string, 0)
|
|
|
|
}
|
|
|
|
if ip.State == "preferred" {
|
|
|
|
ips[ipType] = append([]string{addr}, ips[ipType]...)
|
|
|
|
} else {
|
|
|
|
ips[ipType] = append(ips[ipType], addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for ipType, ipList := range ips {
|
|
|
|
lookup["nic/"+strconv.Itoa(int(net.DeviceConfigId))+"/"+ipType] = strings.Join(ipList, ",")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// Sometimes Config is unknown and returns a nil pointer
|
|
|
|
if r.Config != nil {
|
|
|
|
guest = cleanGuestID(r.Config.GuestId)
|
2018-10-01 23:13:32 +00:00
|
|
|
uuid = r.Config.Uuid
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
cvs := make(map[string]string)
|
|
|
|
if e.customAttrEnabled {
|
|
|
|
for _, cv := range r.Summary.CustomValue {
|
|
|
|
val := cv.(*types.CustomFieldStringValue)
|
|
|
|
if val.Value == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
key, ok := e.customFields[val.Key]
|
|
|
|
if !ok {
|
|
|
|
e.log.Warnf("Metadata for custom field %d not found. Skipping", val.Key)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if e.customAttrFilter.Match(key) {
|
|
|
|
cvs[key] = val.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-13 23:15:55 +00:00
|
|
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
2019-08-15 00:03:33 +00:00
|
|
|
name: r.Name,
|
|
|
|
ref: r.ExtensibleManagedObject.Reference(),
|
|
|
|
parentRef: r.Runtime.Host,
|
|
|
|
guest: guest,
|
|
|
|
altID: uuid,
|
2020-01-13 23:15:55 +00:00
|
|
|
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
2019-08-15 00:03:33 +00:00
|
|
|
lookup: lookup,
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2019-02-12 22:05:14 +00:00
|
|
|
func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) {
|
2018-09-11 21:53:46 +00:00
|
|
|
var resources []mo.Datastore
|
2019-02-12 22:05:14 +00:00
|
|
|
ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration)
|
|
|
|
defer cancel1()
|
|
|
|
err := filter.FindAll(ctx1, &resources)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m := make(objectMap)
|
|
|
|
for _, r := range resources {
|
2020-01-16 20:14:00 +00:00
|
|
|
lunId := ""
|
2018-11-02 19:05:28 +00:00
|
|
|
if r.Info != nil {
|
|
|
|
info := r.Info.GetDatastoreInfo()
|
|
|
|
if info != nil {
|
2020-01-16 20:14:00 +00:00
|
|
|
lunId = info.Url
|
2018-11-02 19:05:28 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-13 23:15:55 +00:00
|
|
|
m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{
|
|
|
|
name: r.Name,
|
|
|
|
ref: r.ExtensibleManagedObject.Reference(),
|
|
|
|
parentRef: r.Parent,
|
2020-01-16 20:14:00 +00:00
|
|
|
altID: lunId,
|
2020-01-13 23:15:55 +00:00
|
|
|
customValues: e.loadCustomAttributes(&r.ManagedEntity),
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2020-01-13 23:15:55 +00:00
|
|
|
func (e *Endpoint) loadCustomAttributes(entity *mo.ManagedEntity) map[string]string {
|
|
|
|
if !e.customAttrEnabled {
|
|
|
|
return map[string]string{}
|
|
|
|
}
|
|
|
|
cvs := make(map[string]string)
|
|
|
|
for _, v := range entity.CustomValue {
|
|
|
|
cv, ok := v.(*types.CustomFieldStringValue)
|
|
|
|
if !ok {
|
|
|
|
e.Parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
key, ok := e.customFields[cv.Key]
|
|
|
|
if !ok {
|
|
|
|
e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if e.customAttrFilter.Match(key) {
|
|
|
|
cvs[key] = cv.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cvs
|
|
|
|
}
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// Close shuts down an Endpoint and releases any resources associated with it.
|
|
|
|
func (e *Endpoint) Close() {
|
|
|
|
e.clientFactory.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect runs a round of data collections as specified in the configuration.
|
|
|
|
func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error {
|
2018-12-28 21:24:43 +00:00
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// If we never managed to do a discovery, collection will be a no-op. Therefore,
|
|
|
|
// we need to check that a connection is available, or the collection will
|
|
|
|
// silently fail.
|
|
|
|
if _, err := e.clientFactory.GetClient(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
e.collectMux.RLock()
|
|
|
|
defer e.collectMux.RUnlock()
|
|
|
|
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
// If discovery interval is disabled (0), discover on each collection cycle
|
|
|
|
if e.Parent.ObjectDiscoveryInterval.Duration == 0 {
|
|
|
|
err := e.discover(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
var wg sync.WaitGroup
|
2018-09-11 21:53:46 +00:00
|
|
|
for k, res := range e.resourceKinds {
|
|
|
|
if res.enabled {
|
2018-12-28 21:24:43 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(k string) {
|
|
|
|
defer wg.Done()
|
|
|
|
err := e.collectResource(ctx, k, acc)
|
|
|
|
if err != nil {
|
|
|
|
acc.AddError(err)
|
|
|
|
}
|
|
|
|
}(k)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
wg.Wait()
|
2018-11-06 22:22:43 +00:00
|
|
|
|
|
|
|
// Purge old timestamps from the cache
|
|
|
|
e.hwMarks.Purge()
|
2018-09-11 21:53:46 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
// Workaround to make sure pqs is a copy of the loop variable and won't change.
|
2020-04-21 18:30:29 +00:00
|
|
|
func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pqs queryChunk) {
|
2018-12-28 21:24:43 +00:00
|
|
|
te.Run(ctx, func() {
|
|
|
|
job(pqs)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job queryJob) {
|
2018-12-28 21:24:43 +00:00
|
|
|
te := NewThrottledExecutor(e.Parent.CollectConcurrency)
|
2018-11-06 22:22:43 +00:00
|
|
|
maxMetrics := e.Parent.MaxQueryMetrics
|
|
|
|
if maxMetrics < 1 {
|
|
|
|
maxMetrics = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Workaround for vCenter weirdness. Cluster metrics seem to count multiple times
|
|
|
|
// when checking query size, so keep it at a low value.
|
|
|
|
// Revisit this when we better understand the reason why vCenter counts it this way!
|
|
|
|
if res.name == "cluster" && maxMetrics > 10 {
|
|
|
|
maxMetrics = 10
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects)
|
|
|
|
|
|
|
|
for _, object := range res.objects {
|
|
|
|
timeBuckets := make(map[int64]*types.PerfQuerySpec, 0)
|
|
|
|
for metricIdx, metric := range res.metrics {
|
|
|
|
|
|
|
|
// Determine time of last successful collection
|
|
|
|
metricName := e.getMetricNameForId(metric.CounterId)
|
|
|
|
if metricName == "" {
|
|
|
|
e.log.Info("Unable to find metric name for id %d. Skipping!", metric.CounterId)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
start, ok := e.hwMarks.Get(object.ref.Value, metricName)
|
2018-12-28 21:24:43 +00:00
|
|
|
if !ok {
|
|
|
|
start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1))
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
start = start.Truncate(20 * time.Second) // Truncate to maximum resolution
|
|
|
|
|
|
|
|
// Create bucket if we don't already have it
|
|
|
|
bucket, ok := timeBuckets[start.Unix()]
|
|
|
|
if !ok {
|
|
|
|
bucket = &types.PerfQuerySpec{
|
|
|
|
Entity: object.ref,
|
|
|
|
MaxSample: maxSampleConst,
|
|
|
|
MetricId: make([]types.PerfMetricId, 0),
|
|
|
|
IntervalId: res.sampling,
|
|
|
|
Format: "normal",
|
|
|
|
}
|
|
|
|
bucket.StartTime = &start
|
|
|
|
bucket.EndTime = &now
|
|
|
|
timeBuckets[start.Unix()] = bucket
|
2019-02-12 22:05:14 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
// Add this metric to the bucket
|
|
|
|
bucket.MetricId = append(bucket.MetricId, metric)
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
// Bucket filled to capacity? (Only applies to non real time)
|
|
|
|
// OR if we're past the absolute maximum limit
|
|
|
|
if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > 100000 {
|
|
|
|
e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d",
|
|
|
|
len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects))
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
// Don't send work items if the context has been cancelled.
|
2018-09-11 21:53:46 +00:00
|
|
|
if ctx.Err() == context.Canceled {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
// Run collection job
|
2020-04-21 18:30:29 +00:00
|
|
|
delete(timeBuckets, start.Unix())
|
|
|
|
submitChunkJob(ctx, te, job, queryChunk{*bucket})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Handle data in time bucket and submit job if we've reached the maximum number of object.
|
|
|
|
for _, bucket := range timeBuckets {
|
|
|
|
pqs = append(pqs, *bucket)
|
|
|
|
if (!res.realTime && len(pqs) > e.Parent.MaxQueryObjects) || len(pqs) > 100000 {
|
|
|
|
e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, len(bucket.MetricId))
|
2018-12-28 21:24:43 +00:00
|
|
|
submitChunkJob(ctx, te, job, pqs)
|
2020-04-21 18:30:29 +00:00
|
|
|
pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
// Submit any jobs left in the queue
|
2018-09-11 21:53:46 +00:00
|
|
|
if len(pqs) > 0 {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Submitting job for %s: %d objects", res.name, len(pqs))
|
2018-12-28 21:24:43 +00:00
|
|
|
submitChunkJob(ctx, te, job, pqs)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
|
|
|
|
// Wait for background collection to finish
|
|
|
|
te.Wait()
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error {
|
|
|
|
res := e.resourceKinds[resourceType]
|
2018-10-03 19:02:06 +00:00
|
|
|
client, err := e.clientFactory.GetClient(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
now, err := client.GetServerTime(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
|
|
|
|
// Estimate the interval at which we're invoked. Use local time (not server time)
|
|
|
|
// since this is about how we got invoked locally.
|
|
|
|
localNow := time.Now()
|
2020-01-16 20:14:00 +00:00
|
|
|
estInterval := time.Minute
|
2018-12-28 21:24:43 +00:00
|
|
|
if !res.lastColl.IsZero() {
|
2019-05-07 18:52:24 +00:00
|
|
|
s := time.Duration(res.sampling) * time.Second
|
|
|
|
rawInterval := localNow.Sub(res.lastColl)
|
|
|
|
paddedInterval := rawInterval + time.Duration(res.sampling/2)*time.Second
|
|
|
|
estInterval = paddedInterval.Truncate(s)
|
|
|
|
if estInterval < s {
|
|
|
|
estInterval = s
|
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval)
|
2018-12-28 21:24:43 +00:00
|
|
|
}
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Interval estimated to %s", estInterval)
|
2019-05-07 18:52:24 +00:00
|
|
|
res.lastColl = localNow
|
2018-12-28 21:24:43 +00:00
|
|
|
|
|
|
|
latest := res.latestSample
|
|
|
|
if !latest.IsZero() {
|
2018-10-03 19:02:06 +00:00
|
|
|
elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter.
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType)
|
2018-09-11 21:53:46 +00:00
|
|
|
if !res.realTime && elapsed < float64(res.sampling) {
|
2018-12-28 21:24:43 +00:00
|
|
|
// No new data would be available. We're outta here!
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Sampling period for %s of %d has not elapsed on %s",
|
2018-09-11 21:53:46 +00:00
|
|
|
resourceType, res.sampling, e.URL.Host)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else {
|
2018-10-03 19:02:06 +00:00
|
|
|
latest = now.Add(time.Duration(-res.sampling) * time.Second)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
internalTags := map[string]string{"resourcetype": resourceType}
|
|
|
|
sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags)
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Collecting metrics for %d objects of type %s for %s",
|
2018-09-11 21:53:46 +00:00
|
|
|
len(res.objects), resourceType, e.URL.Host)
|
|
|
|
|
|
|
|
count := int64(0)
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
var tsMux sync.Mutex
|
|
|
|
latestSample := time.Time{}
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
// Divide workload into chunks and process them concurrently
|
2020-04-21 18:30:29 +00:00
|
|
|
e.chunkify(ctx, res, now, latest, acc,
|
|
|
|
func(chunk queryChunk) {
|
|
|
|
n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval)
|
|
|
|
e.log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
|
2018-12-28 21:24:43 +00:00
|
|
|
if err != nil {
|
2019-09-23 22:39:50 +00:00
|
|
|
acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error()))
|
2020-01-16 20:14:00 +00:00
|
|
|
return
|
2018-12-28 21:24:43 +00:00
|
|
|
}
|
2020-01-16 20:14:00 +00:00
|
|
|
e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n)
|
2018-12-28 21:24:43 +00:00
|
|
|
atomic.AddInt64(&count, int64(n))
|
|
|
|
tsMux.Lock()
|
|
|
|
defer tsMux.Unlock()
|
|
|
|
if localLatest.After(latestSample) && !localLatest.IsZero() {
|
|
|
|
latestSample = localLatest
|
|
|
|
}
|
|
|
|
})
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Latest sample for %s set to %s", resourceType, latestSample)
|
2018-12-28 21:24:43 +00:00
|
|
|
if !latestSample.IsZero() {
|
|
|
|
res.latestSample = latestSample
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
sw.Stop()
|
|
|
|
SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) {
|
2018-12-28 21:24:43 +00:00
|
|
|
rInfo := make([]types.PerfSampleInfo, 0, len(info))
|
|
|
|
rValues := make([]float64, 0, len(values))
|
|
|
|
bi := 1.0
|
|
|
|
var lastBucket time.Time
|
|
|
|
for idx := range info {
|
|
|
|
// According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted
|
|
|
|
// data coming back with missing values. Take care of that gracefully!
|
|
|
|
if idx >= len(values) {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("len(SampleInfo)>len(Value) %d > %d during alignment", len(info), len(values))
|
2018-12-28 21:24:43 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
v := float64(values[idx])
|
|
|
|
if v < 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ts := info[idx].Timestamp
|
|
|
|
roundedTs := ts.Truncate(interval)
|
|
|
|
|
|
|
|
// Are we still working on the same bucket?
|
|
|
|
if roundedTs == lastBucket {
|
|
|
|
bi++
|
|
|
|
p := len(rValues) - 1
|
2020-01-16 20:14:00 +00:00
|
|
|
rValues[p] = ((bi-1)/bi)*rValues[p] + v/bi
|
2018-12-28 21:24:43 +00:00
|
|
|
} else {
|
|
|
|
rValues = append(rValues, v)
|
|
|
|
roundedInfo := types.PerfSampleInfo{
|
|
|
|
Timestamp: roundedTs,
|
|
|
|
Interval: info[idx].Interval,
|
|
|
|
}
|
|
|
|
rInfo = append(rInfo, roundedInfo)
|
|
|
|
bi = 1.0
|
|
|
|
lastBucket = roundedTs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rInfo, rValues
|
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) {
|
|
|
|
e.log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs))
|
2018-12-28 21:24:43 +00:00
|
|
|
latestSample := time.Time{}
|
2018-09-11 21:53:46 +00:00
|
|
|
count := 0
|
2018-12-28 21:24:43 +00:00
|
|
|
resourceType := res.name
|
2018-09-11 21:53:46 +00:00
|
|
|
prefix := "vsphere" + e.Parent.Separator + resourceType
|
|
|
|
|
|
|
|
client, err := e.clientFactory.GetClient(ctx)
|
|
|
|
if err != nil {
|
2018-12-28 21:24:43 +00:00
|
|
|
return count, latestSample, err
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
metricInfo, err := client.CounterInfoByName(ctx)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
2018-12-28 21:24:43 +00:00
|
|
|
return count, latestSample, err
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
ems, err := client.QueryMetrics(ctx, pqs)
|
2018-09-11 21:53:46 +00:00
|
|
|
if err != nil {
|
2018-12-28 21:24:43 +00:00
|
|
|
return count, latestSample, err
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Query for %s returned metrics for %d objects", resourceType, len(ems))
|
2018-09-11 21:53:46 +00:00
|
|
|
|
|
|
|
// Iterate through results
|
|
|
|
for _, em := range ems {
|
|
|
|
moid := em.Entity.Reference().Value
|
2018-12-28 21:24:43 +00:00
|
|
|
instInfo, found := res.objects[moid]
|
2018-09-11 21:53:46 +00:00
|
|
|
if !found {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Errorf("MOID %s not found in cache. Skipping! (This should not happen!)", moid)
|
2018-09-11 21:53:46 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
buckets := make(map[string]metricEntry)
|
|
|
|
for _, v := range em.Value {
|
|
|
|
name := v.Name
|
|
|
|
t := map[string]string{
|
|
|
|
"vcenter": e.URL.Host,
|
|
|
|
"source": instInfo.name,
|
|
|
|
"moid": moid,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Populate tags
|
|
|
|
objectRef, ok := res.objects[moid]
|
|
|
|
if !ok {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Errorf("MOID %s not found in cache. Skipping", moid)
|
2018-09-11 21:53:46 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-01-13 23:15:55 +00:00
|
|
|
e.populateTags(objectRef, resourceType, res, t, &v)
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
nValues := 0
|
2020-04-21 18:30:29 +00:00
|
|
|
alignedInfo, alignedValues := e.alignSamples(em.SampleInfo, v.Value, interval)
|
2018-09-11 21:53:46 +00:00
|
|
|
|
2018-12-28 21:24:43 +00:00
|
|
|
for idx, sample := range alignedInfo {
|
|
|
|
// According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted
|
|
|
|
// data coming back with missing values. Take care of that gracefully!
|
|
|
|
if idx >= len(alignedValues) {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues))
|
2018-12-28 21:24:43 +00:00
|
|
|
break
|
2018-11-06 22:22:43 +00:00
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
ts := sample.Timestamp
|
|
|
|
if ts.After(latestSample) {
|
|
|
|
latestSample = ts
|
|
|
|
}
|
|
|
|
nValues++
|
2018-11-06 22:22:43 +00:00
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// Organize the metrics into a bucket per measurement.
|
|
|
|
mn, fn := e.makeMetricIdentifier(prefix, name)
|
|
|
|
bKey := mn + " " + v.Instance + " " + strconv.FormatInt(ts.UnixNano(), 10)
|
|
|
|
bucket, found := buckets[bKey]
|
|
|
|
if !found {
|
|
|
|
bucket = metricEntry{name: mn, ts: ts, fields: make(map[string]interface{}), tags: t}
|
|
|
|
buckets[bKey] = bucket
|
|
|
|
}
|
|
|
|
|
|
|
|
// Percentage values must be scaled down by 100.
|
|
|
|
info, ok := metricInfo[name]
|
|
|
|
if !ok {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Errorf("Could not determine unit for %s. Skipping", name)
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
v := alignedValues[idx]
|
2018-09-11 21:53:46 +00:00
|
|
|
if info.UnitInfo.GetElementDescription().Key == "percent" {
|
2020-01-16 20:14:00 +00:00
|
|
|
bucket.fields[fn] = v / 100.0
|
2018-09-11 21:53:46 +00:00
|
|
|
} else {
|
2019-03-11 18:16:32 +00:00
|
|
|
if e.Parent.UseIntSamples {
|
|
|
|
bucket.fields[fn] = int64(round(v))
|
|
|
|
} else {
|
|
|
|
bucket.fields[fn] = v
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
count++
|
2018-11-06 22:22:43 +00:00
|
|
|
|
2020-05-14 07:41:58 +00:00
|
|
|
// Update hiwater marks
|
2020-04-21 18:30:29 +00:00
|
|
|
e.hwMarks.Put(moid, name, ts)
|
2018-12-28 21:24:43 +00:00
|
|
|
}
|
|
|
|
if nValues == 0 {
|
2020-04-21 18:30:29 +00:00
|
|
|
e.log.Debugf("Missing value for: %s, %s", name, objectRef.name)
|
2018-12-28 21:24:43 +00:00
|
|
|
continue
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// We've iterated through all the metrics and collected buckets for each
|
|
|
|
// measurement name. Now emit them!
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
acc.AddFields(bucket.name, bucket.fields, bucket.tags, bucket.ts)
|
|
|
|
}
|
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
return count, latestSample, nil
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v *performance.MetricSeries) {
|
|
|
|
// Map name of object.
|
|
|
|
if resource.pKey != "" {
|
|
|
|
t[resource.pKey] = objectRef.name
|
|
|
|
}
|
|
|
|
|
2018-10-01 23:13:32 +00:00
|
|
|
if resourceType == "vm" && objectRef.altID != "" {
|
|
|
|
t["uuid"] = objectRef.altID
|
|
|
|
}
|
|
|
|
|
2018-09-11 21:53:46 +00:00
|
|
|
// Map parent reference
|
2018-12-28 21:24:43 +00:00
|
|
|
parent, found := e.getParent(objectRef, resource)
|
2018-09-11 21:53:46 +00:00
|
|
|
if found {
|
|
|
|
t[resource.parentTag] = parent.name
|
|
|
|
if resourceType == "vm" {
|
|
|
|
if objectRef.guest != "" {
|
|
|
|
t["guest"] = objectRef.guest
|
|
|
|
}
|
2019-08-15 00:03:33 +00:00
|
|
|
if gh := objectRef.lookup["guesthostname"]; gh != "" {
|
|
|
|
t["guesthostname"] = gh
|
|
|
|
}
|
2018-12-28 21:24:43 +00:00
|
|
|
if c, ok := e.resourceKinds["cluster"].objects[parent.parentRef.Value]; ok {
|
2018-09-11 21:53:46 +00:00
|
|
|
t["clustername"] = c.name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in Datacenter name
|
|
|
|
if objectRef.dcname != "" {
|
|
|
|
t["dcname"] = objectRef.dcname
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine which point tag to map to the instance
|
|
|
|
name := v.Name
|
|
|
|
instance := "instance-total"
|
|
|
|
if v.Instance != "" {
|
|
|
|
instance = v.Instance
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(name, "cpu.") {
|
|
|
|
t["cpu"] = instance
|
|
|
|
} else if strings.HasPrefix(name, "datastore.") {
|
|
|
|
t["lun"] = instance
|
2018-11-02 19:05:28 +00:00
|
|
|
if ds, ok := e.lun2ds[instance]; ok {
|
|
|
|
t["dsname"] = ds
|
|
|
|
} else {
|
|
|
|
t["dsname"] = instance
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
} else if strings.HasPrefix(name, "disk.") {
|
|
|
|
t["disk"] = cleanDiskTag(instance)
|
|
|
|
} else if strings.HasPrefix(name, "net.") {
|
|
|
|
t["interface"] = instance
|
2019-08-15 00:03:33 +00:00
|
|
|
|
|
|
|
// Add IP addresses to NIC data.
|
|
|
|
if resourceType == "vm" && objectRef.lookup != nil {
|
|
|
|
key := "nic/" + t["interface"] + "/"
|
|
|
|
if ip, ok := objectRef.lookup[key+"ipv6"]; ok {
|
|
|
|
t["ipv6"] = ip
|
|
|
|
}
|
|
|
|
if ip, ok := objectRef.lookup[key+"ipv4"]; ok {
|
|
|
|
t["ipv4"] = ip
|
|
|
|
}
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
} else if strings.HasPrefix(name, "storageAdapter.") {
|
|
|
|
t["adapter"] = instance
|
|
|
|
} else if strings.HasPrefix(name, "storagePath.") {
|
|
|
|
t["path"] = instance
|
|
|
|
} else if strings.HasPrefix(name, "sys.resource") {
|
|
|
|
t["resource"] = instance
|
|
|
|
} else if strings.HasPrefix(name, "vflashModule.") {
|
|
|
|
t["module"] = instance
|
|
|
|
} else if strings.HasPrefix(name, "virtualDisk.") {
|
|
|
|
t["disk"] = instance
|
|
|
|
} else if v.Instance != "" {
|
|
|
|
// default
|
|
|
|
t["instance"] = v.Instance
|
|
|
|
}
|
2019-08-15 00:03:33 +00:00
|
|
|
|
|
|
|
// Fill in custom values if they exist
|
|
|
|
if objectRef.customValues != nil {
|
|
|
|
for k, v := range objectRef.customValues {
|
|
|
|
if v != "" {
|
|
|
|
t[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-09-11 21:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (string, string) {
|
|
|
|
parts := strings.Split(metric, ".")
|
|
|
|
if len(parts) == 1 {
|
|
|
|
return prefix, parts[0]
|
|
|
|
}
|
|
|
|
return prefix + e.Parent.Separator + parts[0], strings.Join(parts[1:], e.Parent.Separator)
|
|
|
|
}
|
|
|
|
|
|
|
|
func cleanGuestID(id string) string {
|
|
|
|
return strings.TrimSuffix(id, "Guest")
|
|
|
|
}
|
|
|
|
|
|
|
|
func cleanDiskTag(disk string) string {
|
|
|
|
// Remove enclosing "<>"
|
|
|
|
return strings.TrimSuffix(strings.TrimPrefix(disk, "<"), ">")
|
|
|
|
}
|
2019-03-11 18:16:32 +00:00
|
|
|
|
|
|
|
func round(x float64) float64 {
|
|
|
|
t := math.Trunc(x)
|
|
|
|
if math.Abs(x-t) >= 0.5 {
|
|
|
|
return t + math.Copysign(1, x)
|
|
|
|
}
|
|
|
|
return t
|
|
|
|
}
|