2016-03-25 22:16:23 +00:00
|
|
|
package cloudwatch
|
|
|
|
|
|
|
|
import (
|
2019-04-23 00:36:46 +00:00
|
|
|
"errors"
|
2016-03-25 22:16:23 +00:00
|
|
|
"fmt"
|
2019-04-23 00:36:46 +00:00
|
|
|
"strconv"
|
2016-03-25 22:16:23 +00:00
|
|
|
"strings"
|
2016-06-02 11:34:03 +00:00
|
|
|
"sync"
|
2016-03-25 22:16:23 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
|
|
|
|
|
|
|
"github.com/influxdata/telegraf"
|
2019-04-23 00:36:46 +00:00
|
|
|
"github.com/influxdata/telegraf/filter"
|
2016-03-25 22:16:23 +00:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2016-05-25 11:30:39 +00:00
|
|
|
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
2016-05-24 13:50:01 +00:00
|
|
|
"github.com/influxdata/telegraf/internal/limiter"
|
2019-04-23 00:36:46 +00:00
|
|
|
"github.com/influxdata/telegraf/metric"
|
2016-03-25 22:16:23 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
2019-04-23 00:36:46 +00:00
|
|
|
// CloudWatch contains the configuration and cache for the cloudwatch plugin.
|
2016-03-25 22:16:23 +00:00
|
|
|
CloudWatch struct {
|
2019-04-23 00:36:46 +00:00
|
|
|
Region string `toml:"region"`
|
|
|
|
AccessKey string `toml:"access_key"`
|
|
|
|
SecretKey string `toml:"secret_key"`
|
|
|
|
RoleARN string `toml:"role_arn"`
|
|
|
|
Profile string `toml:"profile"`
|
|
|
|
CredentialPath string `toml:"shared_credential_file"`
|
|
|
|
Token string `toml:"token"`
|
|
|
|
EndpointURL string `toml:"endpoint_url"`
|
|
|
|
StatisticExclude []string `toml:"statistic_exclude"`
|
|
|
|
StatisticInclude []string `toml:"statistic_include"`
|
|
|
|
|
|
|
|
Period internal.Duration `toml:"period"`
|
|
|
|
Delay internal.Duration `toml:"delay"`
|
|
|
|
Namespace string `toml:"namespace"`
|
|
|
|
Metrics []*Metric `toml:"metrics"`
|
|
|
|
CacheTTL internal.Duration `toml:"cache_ttl"`
|
|
|
|
RateLimit int `toml:"ratelimit"`
|
|
|
|
|
|
|
|
client cloudwatchClient
|
|
|
|
statFilter filter.Filter
|
|
|
|
metricCache *metricCache
|
|
|
|
queryDimensions map[string]*map[string]string
|
|
|
|
windowStart time.Time
|
|
|
|
windowEnd time.Time
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// Metric defines a simplified Cloudwatch metric.
|
2016-03-25 22:16:23 +00:00
|
|
|
Metric struct {
|
2019-04-23 00:36:46 +00:00
|
|
|
StatisticExclude *[]string `toml:"statistic_exclude"`
|
|
|
|
StatisticInclude *[]string `toml:"statistic_include"`
|
|
|
|
MetricNames []string `toml:"names"`
|
|
|
|
Dimensions []*Dimension `toml:"dimensions"`
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// Dimension defines a simplified Cloudwatch dimension (provides metric filtering).
|
2016-03-25 22:16:23 +00:00
|
|
|
Dimension struct {
|
|
|
|
Name string `toml:"name"`
|
|
|
|
Value string `toml:"value"`
|
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// metricCache caches metrics, their filters, and generated queries.
|
|
|
|
metricCache struct {
|
|
|
|
ttl time.Duration
|
|
|
|
built time.Time
|
|
|
|
metrics []filteredMetric
|
|
|
|
queries []*cloudwatch.MetricDataQuery
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cloudwatchClient interface {
|
|
|
|
ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error)
|
2019-04-23 00:36:46 +00:00
|
|
|
GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error)
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// SampleConfig returns the default configuration of the Cloudwatch input plugin.
|
2016-03-25 22:16:23 +00:00
|
|
|
func (c *CloudWatch) SampleConfig() string {
|
|
|
|
return `
|
|
|
|
## Amazon Region
|
2016-11-04 13:16:41 +00:00
|
|
|
region = "us-east-1"
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2016-04-23 18:19:04 +00:00
|
|
|
## Amazon Credentials
|
|
|
|
## Credentials are loaded in the following order
|
2016-05-25 11:30:39 +00:00
|
|
|
## 1) Assumed credentials via STS if role_arn is specified
|
|
|
|
## 2) explicit credentials from 'access_key' and 'secret_key'
|
|
|
|
## 3) shared profile from 'profile'
|
|
|
|
## 4) environment variables
|
|
|
|
## 5) shared credentials file
|
|
|
|
## 6) EC2 Instance Profile
|
2019-04-23 00:36:46 +00:00
|
|
|
# access_key = ""
|
|
|
|
# secret_key = ""
|
|
|
|
# token = ""
|
|
|
|
# role_arn = ""
|
|
|
|
# profile = ""
|
|
|
|
# shared_credential_file = ""
|
2016-04-23 18:19:04 +00:00
|
|
|
|
2018-07-31 22:07:21 +00:00
|
|
|
## Endpoint to make request against, the correct endpoint is automatically
|
|
|
|
## determined and this option should only be set if you wish to override the
|
|
|
|
## default.
|
|
|
|
## ex: endpoint_url = "http://localhost:8000"
|
|
|
|
# endpoint_url = ""
|
|
|
|
|
2016-11-07 12:14:04 +00:00
|
|
|
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
|
|
|
# metrics are made available to the 1 minute period. Some are collected at
|
2017-05-08 18:29:26 +00:00
|
|
|
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
2016-11-07 12:14:04 +00:00
|
|
|
# Note that if a period is configured that is smaller than the minimum for a
|
|
|
|
# particular metric, that metric will not be returned by the Cloudwatch API
|
|
|
|
# and will not be collected by Telegraf.
|
|
|
|
#
|
2016-03-25 22:16:23 +00:00
|
|
|
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
2016-11-07 12:14:04 +00:00
|
|
|
period = "5m"
|
2016-03-25 22:16:23 +00:00
|
|
|
|
|
|
|
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
2016-11-07 12:14:04 +00:00
|
|
|
delay = "5m"
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2017-11-01 00:00:06 +00:00
|
|
|
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
|
2016-03-25 22:16:23 +00:00
|
|
|
## gaps or overlap in pulled data
|
2016-11-07 12:14:04 +00:00
|
|
|
interval = "5m"
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2016-05-25 11:30:39 +00:00
|
|
|
## Configure the TTL for the internal cache of metrics.
|
2019-04-23 00:36:46 +00:00
|
|
|
# cache_ttl = "1h"
|
2016-05-25 11:30:39 +00:00
|
|
|
|
2016-03-25 22:16:23 +00:00
|
|
|
## Metric Statistic Namespace (required)
|
2016-11-04 13:16:41 +00:00
|
|
|
namespace = "AWS/ELB"
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2016-08-26 01:04:29 +00:00
|
|
|
## Maximum requests per second. Note that the global default AWS rate limit is
|
2019-04-23 00:36:46 +00:00
|
|
|
## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
|
|
|
|
## maximum of 50.
|
2017-03-15 22:20:18 +00:00
|
|
|
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
2019-04-23 00:36:46 +00:00
|
|
|
# ratelimit = 25
|
|
|
|
|
|
|
|
## Namespace-wide statistic filters. These allow fewer queries to be made to
|
|
|
|
## cloudwatch.
|
2019-05-03 17:05:06 +00:00
|
|
|
# statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
|
2019-04-23 00:36:46 +00:00
|
|
|
# statistic_exclude = []
|
2016-08-26 01:04:29 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
## Metrics to Pull
|
2016-03-25 22:16:23 +00:00
|
|
|
## Defaults to all Metrics in Namespace if nothing is provided
|
|
|
|
## Refreshes Namespace available metrics every 1h
|
|
|
|
#[[inputs.cloudwatch.metrics]]
|
2016-11-04 13:16:41 +00:00
|
|
|
# names = ["Latency", "RequestCount"]
|
2016-04-22 16:23:00 +00:00
|
|
|
#
|
2019-04-23 00:36:46 +00:00
|
|
|
# ## Statistic filters for Metric. These allow for retrieving specific
|
|
|
|
# ## statistics for an individual metric.
|
2019-05-03 17:05:06 +00:00
|
|
|
# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
|
2019-04-23 00:36:46 +00:00
|
|
|
# # statistic_exclude = []
|
|
|
|
#
|
|
|
|
# ## Dimension filters for Metric. All dimensions defined for the metric names
|
|
|
|
# ## must be specified in order to retrieve the metric statistics.
|
2016-03-25 22:16:23 +00:00
|
|
|
# [[inputs.cloudwatch.metrics.dimensions]]
|
2016-11-04 13:16:41 +00:00
|
|
|
# name = "LoadBalancerName"
|
|
|
|
# value = "p-example"
|
2016-03-25 22:16:23 +00:00
|
|
|
`
|
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// Description returns a one-sentence description on the Cloudwatch input plugin.
|
2016-03-25 22:16:23 +00:00
|
|
|
func (c *CloudWatch) Description() string {
|
|
|
|
return "Pull Metric Statistics from Amazon CloudWatch"
|
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// Gather takes in an accumulator and adds the metrics that the Input
|
|
|
|
// gathers. This is called every "interval".
|
|
|
|
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
|
|
|
if c.statFilter == nil {
|
|
|
|
var err error
|
|
|
|
// Set config level filter (won't change throughout life of plugin).
|
|
|
|
c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.client == nil {
|
|
|
|
c.initializeCloudWatch()
|
|
|
|
}
|
|
|
|
|
|
|
|
filteredMetrics, err := getFilteredMetrics(c)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.updateWindow(time.Now())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get all of the possible queries so we can send groups of 100.
|
|
|
|
queries, err := c.getDataQueries(filteredMetrics)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit concurrency or we can easily exhaust user connection limit.
|
|
|
|
// See cloudwatch API request limits:
|
|
|
|
// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
|
|
|
|
lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
|
|
|
|
defer lmtr.Stop()
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
rLock := sync.Mutex{}
|
|
|
|
|
|
|
|
results := []*cloudwatch.MetricDataResult{}
|
|
|
|
|
|
|
|
// 100 is the maximum number of metric data queries a `GetMetricData` request can contain.
|
|
|
|
batchSize := 100
|
|
|
|
var batches [][]*cloudwatch.MetricDataQuery
|
|
|
|
|
|
|
|
for batchSize < len(queries) {
|
|
|
|
queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize])
|
|
|
|
}
|
|
|
|
batches = append(batches, queries)
|
|
|
|
|
|
|
|
for i := range batches {
|
|
|
|
wg.Add(1)
|
|
|
|
<-lmtr.C
|
|
|
|
go func(inm []*cloudwatch.MetricDataQuery) {
|
|
|
|
defer wg.Done()
|
|
|
|
result, err := c.gatherMetrics(c.getDataInputs(inm))
|
|
|
|
if err != nil {
|
|
|
|
acc.AddError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
rLock.Lock()
|
|
|
|
results = append(results, result...)
|
|
|
|
rLock.Unlock()
|
|
|
|
}(batches[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
return c.aggregateMetrics(acc, results)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CloudWatch) initializeCloudWatch() error {
|
|
|
|
credentialConfig := &internalaws.CredentialConfig{
|
|
|
|
Region: c.Region,
|
|
|
|
AccessKey: c.AccessKey,
|
|
|
|
SecretKey: c.SecretKey,
|
|
|
|
RoleARN: c.RoleARN,
|
|
|
|
Profile: c.Profile,
|
|
|
|
Filename: c.CredentialPath,
|
|
|
|
Token: c.Token,
|
|
|
|
EndpointURL: c.EndpointURL,
|
|
|
|
}
|
|
|
|
configProvider := credentialConfig.Credentials()
|
|
|
|
|
|
|
|
cfg := &aws.Config{}
|
|
|
|
loglevel := aws.LogOff
|
|
|
|
c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type filteredMetric struct {
|
|
|
|
metrics []*cloudwatch.Metric
|
|
|
|
statFilter filter.Filter
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFilteredMetrics returns metrics specified in the config file or metrics listed from Cloudwatch.
|
|
|
|
func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) {
|
|
|
|
if c.metricCache != nil && c.metricCache.isValid() {
|
|
|
|
return c.metricCache.metrics, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
fMetrics := []filteredMetric{}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
|
|
|
// check for provided metric filter
|
|
|
|
if c.Metrics != nil {
|
|
|
|
for _, m := range c.Metrics {
|
2019-04-23 00:36:46 +00:00
|
|
|
metrics := []*cloudwatch.Metric{}
|
2016-05-25 11:30:39 +00:00
|
|
|
if !hasWilcard(m.Dimensions) {
|
|
|
|
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
|
|
|
for k, d := range m.Dimensions {
|
|
|
|
dimensions[k] = &cloudwatch.Dimension{
|
|
|
|
Name: aws.String(d.Name),
|
|
|
|
Value: aws.String(d.Value),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, name := range m.MetricNames {
|
|
|
|
metrics = append(metrics, &cloudwatch.Metric{
|
|
|
|
Namespace: aws.String(c.Namespace),
|
|
|
|
MetricName: aws.String(name),
|
|
|
|
Dimensions: dimensions,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
allMetrics, err := c.fetchNamespaceMetrics()
|
|
|
|
if err != nil {
|
2016-12-13 14:13:53 +00:00
|
|
|
return nil, err
|
2016-05-25 11:30:39 +00:00
|
|
|
}
|
|
|
|
for _, name := range m.MetricNames {
|
|
|
|
for _, metric := range allMetrics {
|
2016-12-13 14:13:53 +00:00
|
|
|
if isSelected(name, metric, m.Dimensions) {
|
2016-05-25 11:30:39 +00:00
|
|
|
metrics = append(metrics, &cloudwatch.Metric{
|
|
|
|
Namespace: aws.String(c.Namespace),
|
|
|
|
MetricName: aws.String(name),
|
|
|
|
Dimensions: metric.Dimensions,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-23 00:36:46 +00:00
|
|
|
|
|
|
|
if m.StatisticExclude == nil {
|
|
|
|
m.StatisticExclude = &c.StatisticExclude
|
|
|
|
}
|
|
|
|
if m.StatisticInclude == nil {
|
|
|
|
m.StatisticInclude = &c.StatisticInclude
|
|
|
|
}
|
|
|
|
statFilter, err := filter.NewIncludeExcludeFilter(*m.StatisticInclude, *m.StatisticExclude)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fMetrics = append(fMetrics, filteredMetric{
|
|
|
|
metrics: metrics,
|
|
|
|
statFilter: statFilter,
|
|
|
|
})
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
} else {
|
2019-04-23 00:36:46 +00:00
|
|
|
metrics, err := c.fetchNamespaceMetrics()
|
2016-03-25 22:16:23 +00:00
|
|
|
if err != nil {
|
2016-12-13 14:13:53 +00:00
|
|
|
return nil, err
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
2016-12-13 14:13:53 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
fMetrics = []filteredMetric{{
|
|
|
|
metrics: metrics,
|
|
|
|
statFilter: c.statFilter,
|
|
|
|
}}
|
2016-12-13 14:13:53 +00:00
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
c.metricCache = &metricCache{
|
|
|
|
metrics: fMetrics,
|
|
|
|
built: time.Now(),
|
|
|
|
ttl: c.CacheTTL.Duration,
|
2016-12-13 14:13:53 +00:00
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
return fMetrics, nil
|
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace.
|
|
|
|
func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
|
|
|
|
metrics := []*cloudwatch.Metric{}
|
|
|
|
|
|
|
|
var token *string
|
|
|
|
params := &cloudwatch.ListMetricsInput{
|
|
|
|
Namespace: aws.String(c.Namespace),
|
|
|
|
Dimensions: []*cloudwatch.DimensionFilter{},
|
|
|
|
NextToken: token,
|
|
|
|
MetricName: nil,
|
2018-09-11 21:59:39 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
for {
|
|
|
|
resp, err := c.client.ListMetrics(params)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics = append(metrics, resp.Metrics...)
|
|
|
|
if resp.NextToken == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
params.NextToken = resp.NextToken
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
return metrics, nil
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 21:59:39 +00:00
|
|
|
func (c *CloudWatch) updateWindow(relativeTo time.Time) error {
|
|
|
|
windowEnd := relativeTo.Add(-c.Delay.Duration)
|
|
|
|
|
|
|
|
if c.windowEnd.IsZero() {
|
|
|
|
// this is the first run, no window info, so just get a single period
|
|
|
|
c.windowStart = windowEnd.Add(-c.Period.Duration)
|
|
|
|
} else {
|
|
|
|
// subsequent window, start where last window left off
|
|
|
|
c.windowStart = c.windowEnd
|
|
|
|
}
|
|
|
|
|
|
|
|
c.windowEnd = windowEnd
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// getDataQueries gets all of the possible queries so we can maximize the request payload.
|
|
|
|
func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) {
|
|
|
|
if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() {
|
|
|
|
return c.metricCache.queries, nil
|
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
c.queryDimensions = map[string]*map[string]string{}
|
|
|
|
|
|
|
|
dataQueries := []*cloudwatch.MetricDataQuery{}
|
|
|
|
for i, filtered := range filteredMetrics {
|
|
|
|
for j, metric := range filtered.metrics {
|
|
|
|
id := strconv.Itoa(j) + "_" + strconv.Itoa(i)
|
|
|
|
dimension := ctod(metric.Dimensions)
|
|
|
|
if filtered.statFilter.Match("average") {
|
|
|
|
c.queryDimensions["average_"+id] = dimension
|
|
|
|
dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
|
|
|
|
Id: aws.String("average_" + id),
|
|
|
|
Label: aws.String(snakeCase(*metric.MetricName + "_average")),
|
|
|
|
MetricStat: &cloudwatch.MetricStat{
|
|
|
|
Metric: metric,
|
|
|
|
Period: aws.Int64(int64(c.Period.Duration.Seconds())),
|
|
|
|
Stat: aws.String(cloudwatch.StatisticAverage),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if filtered.statFilter.Match("maximum") {
|
|
|
|
c.queryDimensions["maximum_"+id] = dimension
|
|
|
|
dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
|
|
|
|
Id: aws.String("maximum_" + id),
|
|
|
|
Label: aws.String(snakeCase(*metric.MetricName + "_maximum")),
|
|
|
|
MetricStat: &cloudwatch.MetricStat{
|
|
|
|
Metric: metric,
|
|
|
|
Period: aws.Int64(int64(c.Period.Duration.Seconds())),
|
|
|
|
Stat: aws.String(cloudwatch.StatisticMaximum),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if filtered.statFilter.Match("minimum") {
|
|
|
|
c.queryDimensions["minimum_"+id] = dimension
|
|
|
|
dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
|
|
|
|
Id: aws.String("minimum_" + id),
|
|
|
|
Label: aws.String(snakeCase(*metric.MetricName + "_minimum")),
|
|
|
|
MetricStat: &cloudwatch.MetricStat{
|
|
|
|
Metric: metric,
|
|
|
|
Period: aws.Int64(int64(c.Period.Duration.Seconds())),
|
|
|
|
Stat: aws.String(cloudwatch.StatisticMinimum),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if filtered.statFilter.Match("sum") {
|
|
|
|
c.queryDimensions["sum_"+id] = dimension
|
|
|
|
dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
|
|
|
|
Id: aws.String("sum_" + id),
|
|
|
|
Label: aws.String(snakeCase(*metric.MetricName + "_sum")),
|
|
|
|
MetricStat: &cloudwatch.MetricStat{
|
|
|
|
Metric: metric,
|
|
|
|
Period: aws.Int64(int64(c.Period.Duration.Seconds())),
|
|
|
|
Stat: aws.String(cloudwatch.StatisticSum),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if filtered.statFilter.Match("sample_count") {
|
|
|
|
c.queryDimensions["sample_count_"+id] = dimension
|
|
|
|
dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{
|
|
|
|
Id: aws.String("sample_count_" + id),
|
|
|
|
Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")),
|
|
|
|
MetricStat: &cloudwatch.MetricStat{
|
|
|
|
Metric: metric,
|
|
|
|
Period: aws.Int64(int64(c.Period.Duration.Seconds())),
|
|
|
|
Stat: aws.String(cloudwatch.StatisticSampleCount),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2016-04-23 18:19:04 +00:00
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
if len(dataQueries) == 0 {
|
|
|
|
return nil, errors.New("no metrics found to collect")
|
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
if c.metricCache == nil {
|
|
|
|
c.metricCache = &metricCache{
|
|
|
|
queries: dataQueries,
|
|
|
|
built: time.Now(),
|
|
|
|
ttl: c.CacheTTL.Duration,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c.metricCache.queries = dataQueries
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
return dataQueries, nil
|
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// gatherMetrics gets metric data from Cloudwatch.
|
|
|
|
func (c *CloudWatch) gatherMetrics(
|
|
|
|
params *cloudwatch.GetMetricDataInput,
|
|
|
|
) ([]*cloudwatch.MetricDataResult, error) {
|
|
|
|
results := []*cloudwatch.MetricDataResult{}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
for {
|
|
|
|
resp, err := c.client.GetMetricData(params)
|
2016-03-25 22:16:23 +00:00
|
|
|
if err != nil {
|
2019-04-23 00:36:46 +00:00
|
|
|
return nil, fmt.Errorf("failed to get metric data: %v", err)
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
results = append(results, resp.MetricDataResults...)
|
|
|
|
if resp.NextToken == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
params.NextToken = resp.NextToken
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
return results, nil
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
func (c *CloudWatch) aggregateMetrics(
|
2016-05-24 13:50:01 +00:00
|
|
|
acc telegraf.Accumulator,
|
2019-04-23 00:36:46 +00:00
|
|
|
metricDataResults []*cloudwatch.MetricDataResult,
|
2017-04-24 18:13:26 +00:00
|
|
|
) error {
|
2019-04-23 00:36:46 +00:00
|
|
|
var (
|
|
|
|
grouper = metric.NewSeriesGrouper()
|
|
|
|
namespace = sanitizeMeasurement(c.Namespace)
|
|
|
|
)
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
for _, result := range metricDataResults {
|
|
|
|
tags := map[string]string{}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
if dimensions, ok := c.queryDimensions[*result.Id]; ok {
|
|
|
|
tags = *dimensions
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
2019-04-23 00:36:46 +00:00
|
|
|
tags["region"] = c.Region
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
for i := range result.Values {
|
|
|
|
grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i])
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
2019-04-23 00:36:46 +00:00
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
for _, metric := range grouper.Metrics() {
|
|
|
|
acc.AddMetric(metric)
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2017-04-24 18:13:26 +00:00
|
|
|
return nil
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
func init() {
|
|
|
|
inputs.Add("cloudwatch", func() telegraf.Input {
|
|
|
|
return &CloudWatch{
|
|
|
|
CacheTTL: internal.Duration{Duration: time.Hour},
|
|
|
|
RateLimit: 25,
|
|
|
|
}
|
|
|
|
})
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
func sanitizeMeasurement(namespace string) string {
|
2016-03-25 22:16:23 +00:00
|
|
|
namespace = strings.Replace(namespace, "/", "_", -1)
|
|
|
|
namespace = snakeCase(namespace)
|
2019-04-23 00:36:46 +00:00
|
|
|
return "cloudwatch_" + namespace
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func snakeCase(s string) string {
|
|
|
|
s = internal.SnakeCase(s)
|
2019-04-23 00:36:46 +00:00
|
|
|
s = strings.Replace(s, " ", "_", -1)
|
2016-03-25 22:16:23 +00:00
|
|
|
s = strings.Replace(s, "__", "_", -1)
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
type dimension struct {
|
|
|
|
name string
|
|
|
|
value string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ctod converts cloudwatch dimensions to regular dimensions.
|
|
|
|
func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string {
|
|
|
|
dimensions := map[string]string{}
|
|
|
|
for i := range cDimensions {
|
|
|
|
dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value
|
|
|
|
}
|
|
|
|
return &dimensions
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CloudWatch) getDataInputs(dataQueries []*cloudwatch.MetricDataQuery) *cloudwatch.GetMetricDataInput {
|
|
|
|
return &cloudwatch.GetMetricDataInput{
|
|
|
|
StartTime: aws.Time(c.windowStart),
|
|
|
|
EndTime: aws.Time(c.windowEnd),
|
|
|
|
MetricDataQueries: dataQueries,
|
|
|
|
}
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:36:46 +00:00
|
|
|
// isValid checks the validity of the metric cache.
|
|
|
|
func (f *metricCache) isValid() bool {
|
|
|
|
return f.metrics != nil && time.Since(f.built) < f.ttl
|
2016-03-25 22:16:23 +00:00
|
|
|
}
|
2016-05-25 11:30:39 +00:00
|
|
|
|
|
|
|
func hasWilcard(dimensions []*Dimension) bool {
|
|
|
|
for _, d := range dimensions {
|
|
|
|
if d.Value == "" || d.Value == "*" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-12-13 14:13:53 +00:00
|
|
|
func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool {
|
|
|
|
if name != *metric.MetricName {
|
|
|
|
return false
|
|
|
|
}
|
2016-05-25 11:30:39 +00:00
|
|
|
if len(metric.Dimensions) != len(dimensions) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, d := range dimensions {
|
|
|
|
selected := false
|
|
|
|
for _, d2 := range metric.Dimensions {
|
|
|
|
if d.Name == *d2.Name {
|
|
|
|
if d.Value == "" || d.Value == "*" || d.Value == *d2.Value {
|
|
|
|
selected = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !selected {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|