2015-11-24 21:22:11 +00:00
|
|
|
package config
|
2015-04-01 16:34:32 +00:00
|
|
|
|
|
|
|
import (
|
2016-04-01 19:53:34 +00:00
|
|
|
"bytes"
|
2015-04-01 16:34:32 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
2015-11-13 23:14:07 +00:00
|
|
|
"log"
|
2017-03-30 00:12:29 +00:00
|
|
|
"math"
|
2018-11-05 22:19:46 +00:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2016-04-01 19:53:34 +00:00
|
|
|
"os"
|
2015-10-19 07:09:36 +00:00
|
|
|
"path/filepath"
|
2016-04-01 19:53:34 +00:00
|
|
|
"regexp"
|
2016-08-08 14:55:16 +00:00
|
|
|
"runtime"
|
2015-04-01 16:34:32 +00:00
|
|
|
"sort"
|
2016-09-08 14:22:10 +00:00
|
|
|
"strconv"
|
2015-04-01 16:34:32 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2016-01-27 21:21:36 +00:00
|
|
|
"github.com/influxdata/telegraf"
|
2016-01-20 18:57:35 +00:00
|
|
|
"github.com/influxdata/telegraf/internal"
|
2020-05-04 18:09:10 +00:00
|
|
|
"github.com/influxdata/telegraf/models"
|
2016-09-08 14:22:10 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/aggregators"
|
2016-01-20 18:57:35 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/inputs"
|
|
|
|
"github.com/influxdata/telegraf/plugins/outputs"
|
2016-02-06 00:36:35 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/parsers"
|
2016-09-08 14:22:10 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/processors"
|
2016-02-10 22:50:07 +00:00
|
|
|
"github.com/influxdata/telegraf/plugins/serializers"
|
2016-04-01 19:53:34 +00:00
|
|
|
"github.com/influxdata/toml"
|
2016-03-01 10:12:28 +00:00
|
|
|
"github.com/influxdata/toml/ast"
|
2015-04-01 16:34:32 +00:00
|
|
|
)
|
|
|
|
|
2016-03-31 23:50:24 +00:00
|
|
|
var (
|
2019-04-26 03:34:40 +00:00
|
|
|
// Default sections
|
2019-06-05 21:07:02 +00:00
|
|
|
sectionDefaults = []string{"global_tags", "agent", "outputs",
|
2019-04-26 03:34:40 +00:00
|
|
|
"processors", "aggregators", "inputs"}
|
|
|
|
|
2016-03-31 23:50:24 +00:00
|
|
|
// Default input plugins
|
|
|
|
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
|
|
|
|
"processes", "disk", "diskio"}
|
|
|
|
|
|
|
|
// Default output plugins
|
|
|
|
outputDefaults = []string{"influxdb"}
|
2016-04-01 19:53:34 +00:00
|
|
|
|
|
|
|
// envVarRe is a regex to find environment variables in the config file
|
2019-03-29 23:02:10 +00:00
|
|
|
envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`)
|
2018-01-04 23:28:00 +00:00
|
|
|
|
|
|
|
envVarEscaper = strings.NewReplacer(
|
|
|
|
`"`, `\"`,
|
|
|
|
`\`, `\\`,
|
|
|
|
)
|
2016-03-31 23:50:24 +00:00
|
|
|
)
|
|
|
|
|
2015-08-11 16:34:00 +00:00
|
|
|
// Config specifies the URL/user/password for the database that telegraf
|
2015-08-04 14:58:32 +00:00
|
|
|
// will be logging to, as well as all the plugins that the user has
|
|
|
|
// specified
|
2015-04-01 16:34:32 +00:00
|
|
|
type Config struct {
|
2015-11-24 21:22:11 +00:00
|
|
|
Tags map[string]string
|
2016-01-07 20:39:43 +00:00
|
|
|
InputFilters []string
|
2015-11-24 21:22:11 +00:00
|
|
|
OutputFilters []string
|
2015-08-11 20:02:04 +00:00
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
Agent *AgentConfig
|
|
|
|
Inputs []*models.RunningInput
|
|
|
|
Outputs []*models.RunningOutput
|
|
|
|
Aggregators []*models.RunningAggregator
|
2016-09-27 15:17:58 +00:00
|
|
|
// Processors have a slice wrapper type because they need to be sorted
|
2020-06-05 14:43:43 +00:00
|
|
|
Processors models.RunningProcessors
|
|
|
|
AggProcessors models.RunningProcessors
|
2015-04-01 16:34:32 +00:00
|
|
|
}
|
|
|
|
|
2015-11-23 23:28:11 +00:00
|
|
|
func NewConfig() *Config {
|
|
|
|
c := &Config{
|
2015-11-26 01:42:07 +00:00
|
|
|
// Agent defaults:
|
|
|
|
Agent: &AgentConfig{
|
2019-05-03 17:55:11 +00:00
|
|
|
Interval: internal.Duration{Duration: 10 * time.Second},
|
|
|
|
RoundInterval: true,
|
|
|
|
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
2019-10-22 20:32:03 +00:00
|
|
|
LogTarget: "file",
|
2019-05-03 17:55:11 +00:00
|
|
|
LogfileRotationMaxArchives: 5,
|
2015-11-26 01:42:07 +00:00
|
|
|
},
|
|
|
|
|
2015-11-24 21:22:11 +00:00
|
|
|
Tags: make(map[string]string),
|
2016-07-28 11:31:11 +00:00
|
|
|
Inputs: make([]*models.RunningInput, 0),
|
|
|
|
Outputs: make([]*models.RunningOutput, 0),
|
2016-09-08 14:22:10 +00:00
|
|
|
Processors: make([]*models.RunningProcessor, 0),
|
2020-06-05 14:43:43 +00:00
|
|
|
AggProcessors: make([]*models.RunningProcessor, 0),
|
2016-01-07 20:39:43 +00:00
|
|
|
InputFilters: make([]string, 0),
|
2015-11-24 21:22:11 +00:00
|
|
|
OutputFilters: make([]string, 0),
|
2015-11-23 23:28:11 +00:00
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2015-11-26 01:42:07 +00:00
|
|
|
type AgentConfig struct {
|
|
|
|
// Interval at which to gather information
|
|
|
|
Interval internal.Duration
|
|
|
|
|
|
|
|
// RoundInterval rounds collection interval to 'interval'.
|
|
|
|
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
|
|
|
RoundInterval bool
|
|
|
|
|
2017-04-12 17:42:11 +00:00
|
|
|
// By default or when set to "0s", precision will be set to the same
|
|
|
|
// timestamp order as the collection interval, with the maximum being 1s.
|
2016-06-13 14:21:11 +00:00
|
|
|
// ie, when interval = "10s", precision will be "1s"
|
|
|
|
// when interval = "250ms", precision will be "1ms"
|
|
|
|
// Precision will NOT be used for service inputs. It is up to each individual
|
|
|
|
// service input to set the timestamp at the appropriate precision.
|
|
|
|
Precision internal.Duration
|
|
|
|
|
2016-01-19 20:00:36 +00:00
|
|
|
// CollectionJitter is used to jitter the collection by a random amount.
|
|
|
|
// Each plugin will sleep for a random time within jitter before collecting.
|
|
|
|
// This can be used to avoid many plugins querying things like sysfs at the
|
|
|
|
// same time, which can have a measurable effect on the system.
|
|
|
|
CollectionJitter internal.Duration
|
|
|
|
|
2016-02-16 00:21:38 +00:00
|
|
|
// FlushInterval is the Interval at which to flush data
|
2015-11-26 01:42:07 +00:00
|
|
|
FlushInterval internal.Duration
|
|
|
|
|
2016-01-19 20:00:36 +00:00
|
|
|
// FlushJitter Jitters the flush interval by a random amount.
|
|
|
|
// This is primarily to avoid large write spikes for users running a large
|
|
|
|
// number of telegraf instances.
|
|
|
|
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
2015-11-26 01:42:07 +00:00
|
|
|
FlushJitter internal.Duration
|
|
|
|
|
2016-04-24 10:43:54 +00:00
|
|
|
// MetricBatchSize is the maximum number of metrics that is wrote to an
|
|
|
|
// output plugin in one call.
|
|
|
|
MetricBatchSize int
|
|
|
|
|
2016-01-22 18:54:12 +00:00
|
|
|
// MetricBufferLimit is the max number of metrics that each output plugin
|
|
|
|
// will cache. The buffer is cleared when a successful write occurs. When
|
2016-04-24 10:43:54 +00:00
|
|
|
// full, the oldest metrics will be overwritten. This number should be a
|
|
|
|
// multiple of MetricBatchSize. Due to current implementation, this could
|
|
|
|
// not be less than 2 times MetricBatchSize.
|
2016-01-22 18:54:12 +00:00
|
|
|
MetricBufferLimit int
|
|
|
|
|
2016-02-16 00:21:38 +00:00
|
|
|
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
|
|
|
|
// it fills up, regardless of FlushInterval. Setting this option to true
|
|
|
|
// does _not_ deactivate FlushInterval.
|
|
|
|
FlushBufferWhenFull bool
|
|
|
|
|
2016-06-13 14:21:11 +00:00
|
|
|
// TODO(cam): Remove UTC and parameter, they are no longer
|
2015-11-26 01:42:07 +00:00
|
|
|
// valid for the agent config. Leaving them here for now for backwards-
|
2017-11-01 00:00:06 +00:00
|
|
|
// compatibility
|
2016-06-13 14:21:11 +00:00
|
|
|
UTC bool `toml:"utc"`
|
2015-11-26 01:42:07 +00:00
|
|
|
|
2016-01-15 19:25:56 +00:00
|
|
|
// Debug is the option for running in debug mode
|
2019-05-03 17:55:11 +00:00
|
|
|
Debug bool `toml:"debug"`
|
2016-01-15 19:25:56 +00:00
|
|
|
|
2019-05-03 17:55:11 +00:00
|
|
|
// Quiet is the option for running in quiet mode
|
|
|
|
Quiet bool `toml:"quiet"`
|
2016-09-30 21:37:56 +00:00
|
|
|
|
2019-10-22 20:32:03 +00:00
|
|
|
// Log target controls the destination for logs and can be one of "file",
|
|
|
|
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
|
|
|
|
// is determined by the "logfile" setting.
|
2019-08-28 21:34:44 +00:00
|
|
|
LogTarget string `toml:"logtarget"`
|
|
|
|
|
2019-10-22 20:32:03 +00:00
|
|
|
// Name of the file to be logged to when using the "file" logtarget. If set to
|
|
|
|
// the empty string then logs are written to stderr.
|
2019-05-03 17:55:11 +00:00
|
|
|
Logfile string `toml:"logfile"`
|
2019-05-03 17:25:28 +00:00
|
|
|
|
2019-06-04 00:38:21 +00:00
|
|
|
// The file will be rotated after the time interval specified. When set
|
|
|
|
// to 0 no time based rotation is performed.
|
2019-05-03 17:55:11 +00:00
|
|
|
LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"`
|
2019-05-03 17:25:28 +00:00
|
|
|
|
2019-06-04 00:38:21 +00:00
|
|
|
// The logfile will be rotated when it becomes larger than the specified
|
|
|
|
// size. When set to 0 no size based rotation is performed.
|
2019-05-03 17:55:11 +00:00
|
|
|
LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"`
|
|
|
|
|
|
|
|
// Maximum number of rotated archives to keep, any older logs are deleted.
|
|
|
|
// If set to -1, no archives are removed.
|
|
|
|
LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"`
|
2019-05-03 17:25:28 +00:00
|
|
|
|
2016-03-21 21:33:19 +00:00
|
|
|
Hostname string
|
|
|
|
OmitHostname bool
|
2015-11-26 01:42:07 +00:00
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
// Inputs returns a list of strings of the configured inputs.
|
|
|
|
func (c *Config) InputNames() []string {
|
2015-11-24 21:22:11 +00:00
|
|
|
var name []string
|
2016-01-07 20:39:43 +00:00
|
|
|
for _, input := range c.Inputs {
|
2019-02-27 00:03:13 +00:00
|
|
|
name = append(name, input.Config.Name)
|
2015-08-11 20:02:04 +00:00
|
|
|
}
|
2015-11-24 21:22:11 +00:00
|
|
|
return name
|
2015-08-07 20:31:25 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 18:56:49 +00:00
|
|
|
// Outputs returns a list of strings of the configured aggregators.
|
|
|
|
func (c *Config) AggregatorNames() []string {
|
|
|
|
var name []string
|
|
|
|
for _, aggregator := range c.Aggregators {
|
2019-02-27 02:22:12 +00:00
|
|
|
name = append(name, aggregator.Config.Name)
|
2018-05-31 18:56:49 +00:00
|
|
|
}
|
|
|
|
return name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Outputs returns a list of strings of the configured processors.
|
|
|
|
func (c *Config) ProcessorNames() []string {
|
|
|
|
var name []string
|
|
|
|
for _, processor := range c.Processors {
|
2019-08-21 23:49:07 +00:00
|
|
|
name = append(name, processor.Config.Name)
|
2018-05-31 18:56:49 +00:00
|
|
|
}
|
|
|
|
return name
|
|
|
|
}
|
|
|
|
|
2016-07-22 15:05:53 +00:00
|
|
|
// Outputs returns a list of strings of the configured outputs.
|
2015-11-24 21:22:11 +00:00
|
|
|
func (c *Config) OutputNames() []string {
|
|
|
|
var name []string
|
|
|
|
for _, output := range c.Outputs {
|
2019-08-21 23:49:07 +00:00
|
|
|
name = append(name, output.Config.Name)
|
2015-11-24 01:00:54 +00:00
|
|
|
}
|
2015-11-24 21:22:11 +00:00
|
|
|
return name
|
2015-11-24 01:00:54 +00:00
|
|
|
}
|
|
|
|
|
2015-08-04 14:58:32 +00:00
|
|
|
// ListTags returns a string of tags specified in the config,
|
|
|
|
// line-protocol style
|
2015-04-01 16:34:32 +00:00
|
|
|
func (c *Config) ListTags() string {
|
|
|
|
var tags []string
|
|
|
|
|
|
|
|
for k, v := range c.Tags {
|
|
|
|
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(tags)
|
|
|
|
|
|
|
|
return strings.Join(tags, " ")
|
|
|
|
}
|
2015-05-18 22:10:11 +00:00
|
|
|
|
2016-02-18 04:57:33 +00:00
|
|
|
var header = `# Telegraf Configuration
|
2016-03-31 23:50:24 +00:00
|
|
|
#
|
2015-05-22 23:45:14 +00:00
|
|
|
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
2016-01-22 18:54:12 +00:00
|
|
|
# declared inputs, and sent to the declared outputs.
|
2016-03-31 23:50:24 +00:00
|
|
|
#
|
2016-01-22 18:54:12 +00:00
|
|
|
# Plugins must be declared in here to be active.
|
|
|
|
# To deactivate a plugin, comment out the name and any variables.
|
2016-03-31 23:50:24 +00:00
|
|
|
#
|
2016-01-22 18:54:12 +00:00
|
|
|
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
2015-05-18 22:51:11 +00:00
|
|
|
# file would generate.
|
2016-04-01 19:53:34 +00:00
|
|
|
#
|
2019-03-29 23:02:10 +00:00
|
|
|
# Environment variables can be used anywhere in this config file, simply surround
|
|
|
|
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
|
|
|
|
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
|
2015-05-18 22:51:11 +00:00
|
|
|
|
2019-04-26 03:34:40 +00:00
|
|
|
`
|
|
|
|
var globalTagsConfig = `
|
2016-01-27 18:09:14 +00:00
|
|
|
# Global tags can be specified here in key="value" format.
|
2016-02-08 22:56:43 +00:00
|
|
|
[global_tags]
|
2016-01-27 18:09:14 +00:00
|
|
|
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
|
|
|
# rack = "1a"
|
2016-04-01 19:53:34 +00:00
|
|
|
## Environment variables can be used as tags, and throughout the config file
|
|
|
|
# user = "$USER"
|
2015-05-22 23:26:32 +00:00
|
|
|
|
2019-04-26 03:34:40 +00:00
|
|
|
`
|
|
|
|
var agentConfig = `
|
2015-09-02 16:30:44 +00:00
|
|
|
# Configuration for telegraf agent
|
2015-08-25 23:59:12 +00:00
|
|
|
[agent]
|
2016-02-18 21:26:51 +00:00
|
|
|
## Default data collection interval for all inputs
|
2015-10-15 21:53:29 +00:00
|
|
|
interval = "10s"
|
2016-02-18 21:26:51 +00:00
|
|
|
## Rounds collection interval to 'interval'
|
|
|
|
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
2015-10-21 20:05:27 +00:00
|
|
|
round_interval = true
|
2016-01-22 18:54:12 +00:00
|
|
|
|
2016-09-30 21:37:56 +00:00
|
|
|
## Telegraf will send metrics to outputs in batches of at most
|
|
|
|
## metric_batch_size metrics.
|
|
|
|
## This controls the size of writes that Telegraf sends to output plugins.
|
2016-04-24 10:43:54 +00:00
|
|
|
metric_batch_size = 1000
|
2016-09-30 21:37:56 +00:00
|
|
|
|
2019-09-10 00:50:46 +00:00
|
|
|
## Maximum number of unwritten metrics per output. Increasing this value
|
|
|
|
## allows for longer periods of output downtime without dropping metrics at the
|
|
|
|
## cost of higher maximum memory usage.
|
2016-04-24 10:43:54 +00:00
|
|
|
metric_buffer_limit = 10000
|
2016-01-22 18:54:12 +00:00
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Collection jitter is used to jitter the collection by a random amount.
|
|
|
|
## Each plugin will sleep for a random time within jitter before collecting.
|
|
|
|
## This can be used to avoid many plugins querying things like sysfs at the
|
|
|
|
## same time, which can have a measurable effect on the system.
|
2016-01-19 20:00:36 +00:00
|
|
|
collection_jitter = "0s"
|
2015-10-21 20:05:27 +00:00
|
|
|
|
2018-09-19 01:13:20 +00:00
|
|
|
## Default flushing interval for all outputs. Maximum flush_interval will be
|
|
|
|
## flush_interval + flush_jitter
|
2015-10-16 22:13:32 +00:00
|
|
|
flush_interval = "10s"
|
2016-02-18 21:26:51 +00:00
|
|
|
## Jitter the flush interval by a random amount. This is primarily to avoid
|
|
|
|
## large write spikes for users running a large number of telegraf instances.
|
|
|
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
2015-10-23 17:23:08 +00:00
|
|
|
flush_jitter = "0s"
|
2015-10-21 20:05:27 +00:00
|
|
|
|
2017-04-12 17:42:11 +00:00
|
|
|
## By default or when set to "0s", precision will be set to the same
|
|
|
|
## timestamp order as the collection interval, with the maximum being 1s.
|
|
|
|
## ie, when interval = "10s", precision will be "1s"
|
|
|
|
## when interval = "250ms", precision will be "1ms"
|
|
|
|
## Precision will NOT be used for service inputs. It is up to each individual
|
|
|
|
## service input to set the timestamp at the appropriate precision.
|
|
|
|
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
2016-06-13 14:21:11 +00:00
|
|
|
precision = ""
|
2016-09-30 21:37:56 +00:00
|
|
|
|
2019-05-03 17:55:11 +00:00
|
|
|
## Log at debug level.
|
|
|
|
# debug = false
|
|
|
|
## Log only error level messages.
|
|
|
|
# quiet = false
|
|
|
|
|
2019-10-22 20:32:03 +00:00
|
|
|
## Log target controls the destination for logs and can be one of "file",
|
|
|
|
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
|
|
|
|
## is determined by the "logfile" setting.
|
|
|
|
# logtarget = "file"
|
|
|
|
|
|
|
|
## Name of the file to be logged to when using the "file" logtarget. If set to
|
|
|
|
## the empty string then logs are written to stderr.
|
2019-05-03 17:55:11 +00:00
|
|
|
# logfile = ""
|
|
|
|
|
|
|
|
## The logfile will be rotated after the time interval specified. When set
|
2019-07-31 20:30:30 +00:00
|
|
|
## to 0 no time based rotation is performed. Logs are rotated only when
|
|
|
|
## written to, if there is no log activity rotation may be delayed.
|
2019-05-03 17:55:11 +00:00
|
|
|
# logfile_rotation_interval = "0d"
|
|
|
|
|
|
|
|
## The logfile will be rotated when it becomes larger than the specified
|
|
|
|
## size. When set to 0 no size based rotation is performed.
|
|
|
|
# logfile_rotation_max_size = "0MB"
|
|
|
|
|
|
|
|
## Maximum number of rotated archives to keep, any older logs are deleted.
|
|
|
|
## If set to -1, no archives are removed.
|
|
|
|
# logfile_rotation_max_archives = 5
|
2016-09-30 21:37:56 +00:00
|
|
|
|
2016-02-18 21:26:51 +00:00
|
|
|
## Override default hostname, if empty use os.Hostname()
|
2015-10-15 21:53:29 +00:00
|
|
|
hostname = ""
|
2016-03-21 21:33:19 +00:00
|
|
|
## If set to true, do no set the "host" tag in the telegraf agent.
|
|
|
|
omit_hostname = false
|
2015-05-18 22:10:11 +00:00
|
|
|
|
2019-04-26 03:34:40 +00:00
|
|
|
`
|
2015-08-26 17:02:10 +00:00
|
|
|
|
2019-04-26 03:34:40 +00:00
|
|
|
var outputHeader = `
|
2016-03-31 23:50:24 +00:00
|
|
|
###############################################################################
|
|
|
|
# OUTPUT PLUGINS #
|
|
|
|
###############################################################################
|
2019-04-26 03:34:40 +00:00
|
|
|
|
2015-08-25 23:59:12 +00:00
|
|
|
`
|
2015-05-18 22:10:11 +00:00
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
var processorHeader = `
|
|
|
|
###############################################################################
|
|
|
|
# PROCESSOR PLUGINS #
|
|
|
|
###############################################################################
|
2019-04-26 03:34:40 +00:00
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
`
|
|
|
|
|
|
|
|
var aggregatorHeader = `
|
|
|
|
###############################################################################
|
|
|
|
# AGGREGATOR PLUGINS #
|
|
|
|
###############################################################################
|
2019-04-26 03:34:40 +00:00
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
`
|
|
|
|
|
2016-03-31 23:50:24 +00:00
|
|
|
var inputHeader = `
|
|
|
|
###############################################################################
|
|
|
|
# INPUT PLUGINS #
|
|
|
|
###############################################################################
|
2019-04-26 03:34:40 +00:00
|
|
|
|
2015-05-18 22:10:11 +00:00
|
|
|
`
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
var serviceInputHeader = `
|
2016-03-31 23:50:24 +00:00
|
|
|
###############################################################################
|
|
|
|
# SERVICE INPUT PLUGINS #
|
|
|
|
###############################################################################
|
2019-04-26 03:34:40 +00:00
|
|
|
|
2015-09-24 18:06:11 +00:00
|
|
|
`
|
|
|
|
|
2015-09-22 01:38:57 +00:00
|
|
|
// PrintSampleConfig prints the sample config
|
2016-10-05 09:58:30 +00:00
|
|
|
func PrintSampleConfig(
|
2019-04-26 03:34:40 +00:00
|
|
|
sectionFilters []string,
|
2016-10-05 09:58:30 +00:00
|
|
|
inputFilters []string,
|
|
|
|
outputFilters []string,
|
|
|
|
aggregatorFilters []string,
|
|
|
|
processorFilters []string,
|
|
|
|
) {
|
2019-04-26 03:34:40 +00:00
|
|
|
// print headers
|
2015-05-18 22:10:11 +00:00
|
|
|
fmt.Printf(header)
|
|
|
|
|
2019-04-26 03:34:40 +00:00
|
|
|
if len(sectionFilters) == 0 {
|
|
|
|
sectionFilters = sectionDefaults
|
|
|
|
}
|
|
|
|
printFilteredGlobalSections(sectionFilters)
|
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
// print output plugins
|
2019-04-26 03:34:40 +00:00
|
|
|
if sliceContains("outputs", sectionFilters) {
|
|
|
|
if len(outputFilters) != 0 {
|
|
|
|
if len(outputFilters) >= 3 && outputFilters[1] != "none" {
|
|
|
|
fmt.Printf(outputHeader)
|
|
|
|
}
|
|
|
|
printFilteredOutputs(outputFilters, false)
|
|
|
|
} else {
|
|
|
|
fmt.Printf(outputHeader)
|
|
|
|
printFilteredOutputs(outputDefaults, false)
|
|
|
|
// Print non-default outputs, commented
|
|
|
|
var pnames []string
|
|
|
|
for pname := range outputs.Outputs {
|
|
|
|
if !sliceContains(pname, outputDefaults) {
|
|
|
|
pnames = append(pnames, pname)
|
|
|
|
}
|
2016-03-31 23:50:24 +00:00
|
|
|
}
|
2019-04-26 03:34:40 +00:00
|
|
|
sort.Strings(pnames)
|
|
|
|
printFilteredOutputs(pnames, true)
|
2015-09-22 01:38:57 +00:00
|
|
|
}
|
2015-05-18 22:10:11 +00:00
|
|
|
}
|
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
// print processor plugins
|
2019-04-26 03:34:40 +00:00
|
|
|
if sliceContains("processors", sectionFilters) {
|
|
|
|
if len(processorFilters) != 0 {
|
|
|
|
if len(processorFilters) >= 3 && processorFilters[1] != "none" {
|
|
|
|
fmt.Printf(processorHeader)
|
|
|
|
}
|
|
|
|
printFilteredProcessors(processorFilters, false)
|
|
|
|
} else {
|
|
|
|
fmt.Printf(processorHeader)
|
|
|
|
pnames := []string{}
|
|
|
|
for pname := range processors.Processors {
|
|
|
|
pnames = append(pnames, pname)
|
|
|
|
}
|
|
|
|
sort.Strings(pnames)
|
|
|
|
printFilteredProcessors(pnames, true)
|
2016-10-05 09:58:30 +00:00
|
|
|
}
|
2016-09-08 14:22:10 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 03:34:40 +00:00
|
|
|
// print aggregator plugins
|
|
|
|
if sliceContains("aggregators", sectionFilters) {
|
|
|
|
if len(aggregatorFilters) != 0 {
|
|
|
|
if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" {
|
|
|
|
fmt.Printf(aggregatorHeader)
|
|
|
|
}
|
|
|
|
printFilteredAggregators(aggregatorFilters, false)
|
|
|
|
} else {
|
|
|
|
fmt.Printf(aggregatorHeader)
|
|
|
|
pnames := []string{}
|
|
|
|
for pname := range aggregators.Aggregators {
|
|
|
|
pnames = append(pnames, pname)
|
|
|
|
}
|
|
|
|
sort.Strings(pnames)
|
|
|
|
printFilteredAggregators(pnames, true)
|
2016-10-05 09:58:30 +00:00
|
|
|
}
|
2016-09-08 14:22:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// print input plugins
|
2019-04-26 03:34:40 +00:00
|
|
|
if sliceContains("inputs", sectionFilters) {
|
|
|
|
if len(inputFilters) != 0 {
|
|
|
|
if len(inputFilters) >= 3 && inputFilters[1] != "none" {
|
|
|
|
fmt.Printf(inputHeader)
|
|
|
|
}
|
|
|
|
printFilteredInputs(inputFilters, false)
|
|
|
|
} else {
|
|
|
|
fmt.Printf(inputHeader)
|
|
|
|
printFilteredInputs(inputDefaults, false)
|
|
|
|
// Print non-default inputs, commented
|
|
|
|
var pnames []string
|
|
|
|
for pname := range inputs.Inputs {
|
|
|
|
if !sliceContains(pname, inputDefaults) {
|
|
|
|
pnames = append(pnames, pname)
|
|
|
|
}
|
2016-03-31 23:50:24 +00:00
|
|
|
}
|
2019-04-26 03:34:40 +00:00
|
|
|
sort.Strings(pnames)
|
|
|
|
printFilteredInputs(pnames, true)
|
2016-03-31 23:50:24 +00:00
|
|
|
}
|
2015-08-25 23:59:12 +00:00
|
|
|
}
|
2016-03-31 23:50:24 +00:00
|
|
|
}
|
2015-05-18 22:10:11 +00:00
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
func printFilteredProcessors(processorFilters []string, commented bool) {
|
|
|
|
// Filter processors
|
|
|
|
var pnames []string
|
|
|
|
for pname := range processors.Processors {
|
|
|
|
if sliceContains(pname, processorFilters) {
|
|
|
|
pnames = append(pnames, pname)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(pnames)
|
|
|
|
|
|
|
|
// Print Outputs
|
|
|
|
for _, pname := range pnames {
|
|
|
|
creator := processors.Processors[pname]
|
|
|
|
output := creator()
|
|
|
|
printConfig(pname, output, "processors", commented)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
|
|
|
|
// Filter outputs
|
|
|
|
var anames []string
|
|
|
|
for aname := range aggregators.Aggregators {
|
|
|
|
if sliceContains(aname, aggregatorFilters) {
|
|
|
|
anames = append(anames, aname)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(anames)
|
|
|
|
|
|
|
|
// Print Outputs
|
|
|
|
for _, aname := range anames {
|
|
|
|
creator := aggregators.Aggregators[aname]
|
|
|
|
output := creator()
|
|
|
|
printConfig(aname, output, "aggregators", commented)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-31 23:50:24 +00:00
|
|
|
func printFilteredInputs(inputFilters []string, commented bool) {
|
2016-01-07 20:39:43 +00:00
|
|
|
// Filter inputs
|
2015-08-25 23:59:12 +00:00
|
|
|
var pnames []string
|
2016-01-07 20:39:43 +00:00
|
|
|
for pname := range inputs.Inputs {
|
2016-03-31 23:50:24 +00:00
|
|
|
if sliceContains(pname, inputFilters) {
|
2015-09-22 01:38:57 +00:00
|
|
|
pnames = append(pnames, pname)
|
|
|
|
}
|
2015-08-25 23:59:12 +00:00
|
|
|
}
|
|
|
|
sort.Strings(pnames)
|
|
|
|
|
2016-04-01 19:53:34 +00:00
|
|
|
// cache service inputs to print them at the end
|
2016-01-27 21:21:36 +00:00
|
|
|
servInputs := make(map[string]telegraf.ServiceInput)
|
2016-04-01 19:53:34 +00:00
|
|
|
// for alphabetical looping:
|
|
|
|
servInputNames := []string{}
|
|
|
|
|
|
|
|
// Print Inputs
|
2015-08-25 23:59:12 +00:00
|
|
|
for _, pname := range pnames {
|
2016-01-07 20:39:43 +00:00
|
|
|
creator := inputs.Inputs[pname]
|
|
|
|
input := creator()
|
2015-05-18 22:10:11 +00:00
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
switch p := input.(type) {
|
2016-01-27 21:21:36 +00:00
|
|
|
case telegraf.ServiceInput:
|
2016-01-07 20:39:43 +00:00
|
|
|
servInputs[pname] = p
|
2016-04-01 19:53:34 +00:00
|
|
|
servInputNames = append(servInputNames, pname)
|
2015-09-24 18:06:11 +00:00
|
|
|
continue
|
2015-05-18 22:10:11 +00:00
|
|
|
}
|
2015-09-24 18:06:11 +00:00
|
|
|
|
2016-03-31 23:50:24 +00:00
|
|
|
printConfig(pname, input, "inputs", commented)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
// Print Service Inputs
|
2016-03-31 23:50:24 +00:00
|
|
|
if len(servInputs) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2016-04-01 19:53:34 +00:00
|
|
|
sort.Strings(servInputNames)
|
2019-04-26 03:34:40 +00:00
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
fmt.Printf(serviceInputHeader)
|
2016-04-01 19:53:34 +00:00
|
|
|
for _, name := range servInputNames {
|
|
|
|
printConfig(name, servInputs[name], "inputs", commented)
|
2016-03-31 23:50:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func printFilteredOutputs(outputFilters []string, commented bool) {
|
|
|
|
// Filter outputs
|
|
|
|
var onames []string
|
|
|
|
for oname := range outputs.Outputs {
|
|
|
|
if sliceContains(oname, outputFilters) {
|
|
|
|
onames = append(onames, oname)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(onames)
|
|
|
|
|
|
|
|
// Print Outputs
|
|
|
|
for _, oname := range onames {
|
|
|
|
creator := outputs.Outputs[oname]
|
|
|
|
output := creator()
|
|
|
|
printConfig(oname, output, "outputs", commented)
|
2015-09-24 18:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-26 03:34:40 +00:00
|
|
|
func printFilteredGlobalSections(sectionFilters []string) {
|
|
|
|
if sliceContains("global_tags", sectionFilters) {
|
|
|
|
fmt.Printf(globalTagsConfig)
|
|
|
|
}
|
2019-06-05 21:07:02 +00:00
|
|
|
|
|
|
|
if sliceContains("agent", sectionFilters) {
|
|
|
|
fmt.Printf(agentConfig)
|
|
|
|
}
|
2019-04-26 03:34:40 +00:00
|
|
|
}
|
|
|
|
|
2020-06-05 14:43:43 +00:00
|
|
|
func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) {
|
2016-03-31 23:50:24 +00:00
|
|
|
comment := ""
|
|
|
|
if commented {
|
|
|
|
comment = "# "
|
|
|
|
}
|
|
|
|
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
|
|
|
|
op, name)
|
|
|
|
|
2015-10-22 20:24:51 +00:00
|
|
|
config := p.SampleConfig()
|
2015-09-24 18:06:11 +00:00
|
|
|
if config == "" {
|
2016-03-31 23:50:24 +00:00
|
|
|
fmt.Printf("\n%s # no configuration\n\n", comment)
|
2015-09-24 18:06:11 +00:00
|
|
|
} else {
|
2016-03-31 23:50:24 +00:00
|
|
|
lines := strings.Split(config, "\n")
|
|
|
|
for i, line := range lines {
|
|
|
|
if i == 0 || i == len(lines)-1 {
|
|
|
|
fmt.Print("\n")
|
|
|
|
continue
|
|
|
|
}
|
2016-05-31 15:25:02 +00:00
|
|
|
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
|
2016-03-31 23:50:24 +00:00
|
|
|
}
|
2015-05-18 22:10:11 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-24 20:52:46 +00:00
|
|
|
|
2015-09-22 01:38:57 +00:00
|
|
|
func sliceContains(name string, list []string) bool {
|
|
|
|
for _, b := range list {
|
|
|
|
if b == name {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
// PrintInputConfig prints the config usage of a single input.
|
|
|
|
func PrintInputConfig(name string) error {
|
|
|
|
if creator, ok := inputs.Inputs[name]; ok {
|
2016-03-31 23:50:24 +00:00
|
|
|
printConfig(name, creator(), "inputs", false)
|
2015-08-24 20:52:46 +00:00
|
|
|
} else {
|
2016-01-07 20:39:43 +00:00
|
|
|
return errors.New(fmt.Sprintf("Input %s not found", name))
|
2015-08-24 20:52:46 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-10-17 04:47:13 +00:00
|
|
|
|
2015-10-22 20:24:51 +00:00
|
|
|
// PrintOutputConfig prints the config usage of a single output.
|
|
|
|
func PrintOutputConfig(name string) error {
|
|
|
|
if creator, ok := outputs.Outputs[name]; ok {
|
2016-03-31 23:50:24 +00:00
|
|
|
printConfig(name, creator(), "outputs", false)
|
2015-10-22 20:24:51 +00:00
|
|
|
} else {
|
|
|
|
return errors.New(fmt.Sprintf("Output %s not found", name))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-19 07:09:36 +00:00
|
|
|
func (c *Config) LoadDirectory(path string) error {
|
2016-09-28 14:30:02 +00:00
|
|
|
walkfn := func(thispath string, info os.FileInfo, _ error) error {
|
2017-02-23 13:45:36 +00:00
|
|
|
if info == nil {
|
|
|
|
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
|
|
|
return nil
|
|
|
|
}
|
2018-04-11 23:51:19 +00:00
|
|
|
|
2016-09-28 14:30:02 +00:00
|
|
|
if info.IsDir() {
|
2018-04-11 23:51:19 +00:00
|
|
|
if strings.HasPrefix(info.Name(), "..") {
|
|
|
|
// skip Kubernetes mounts, prevening loading the same config twice
|
|
|
|
return filepath.SkipDir
|
|
|
|
}
|
|
|
|
|
2016-09-28 14:30:02 +00:00
|
|
|
return nil
|
2015-10-19 07:09:36 +00:00
|
|
|
}
|
2016-09-28 14:30:02 +00:00
|
|
|
name := info.Name()
|
2015-11-26 01:42:07 +00:00
|
|
|
if len(name) < 6 || name[len(name)-5:] != ".conf" {
|
2016-09-28 14:30:02 +00:00
|
|
|
return nil
|
2015-10-19 07:09:36 +00:00
|
|
|
}
|
2016-09-28 14:30:02 +00:00
|
|
|
err := c.LoadConfig(thispath)
|
2015-10-19 07:09:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-28 14:30:02 +00:00
|
|
|
return nil
|
2015-10-19 07:09:36 +00:00
|
|
|
}
|
2016-09-28 14:30:02 +00:00
|
|
|
return filepath.Walk(path, walkfn)
|
2015-10-19 07:09:36 +00:00
|
|
|
}
|
|
|
|
|
2016-04-28 21:19:03 +00:00
|
|
|
// Try to find a default config file at these locations (in order):
|
|
|
|
// 1. $TELEGRAF_CONFIG_PATH
|
|
|
|
// 2. $HOME/.telegraf/telegraf.conf
|
|
|
|
// 3. /etc/telegraf/telegraf.conf
|
|
|
|
//
|
|
|
|
func getDefaultConfigPath() (string, error) {
|
|
|
|
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
|
|
|
|
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
|
|
|
|
etcfile := "/etc/telegraf/telegraf.conf"
|
2016-08-08 14:55:16 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
2019-08-27 20:47:01 +00:00
|
|
|
programFiles := os.Getenv("ProgramFiles")
|
|
|
|
if programFiles == "" { // Should never happen
|
|
|
|
programFiles = `C:\Program Files`
|
|
|
|
}
|
|
|
|
etcfile = programFiles + `\Telegraf\telegraf.conf`
|
2016-08-08 14:55:16 +00:00
|
|
|
}
|
2016-04-28 21:19:03 +00:00
|
|
|
for _, path := range []string{envfile, homefile, etcfile} {
|
|
|
|
if _, err := os.Stat(path); err == nil {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("I! Using config file: %s", path)
|
2016-04-28 21:19:03 +00:00
|
|
|
return path, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we got here, we didn't find a file in a default location
|
|
|
|
return "", fmt.Errorf("No config file specified, and could not find one"+
|
|
|
|
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:22:11 +00:00
|
|
|
// LoadConfig loads the given config file and applies it to c
|
2015-11-23 23:28:11 +00:00
|
|
|
func (c *Config) LoadConfig(path string) error {
|
2016-04-28 21:19:03 +00:00
|
|
|
var err error
|
|
|
|
if path == "" {
|
|
|
|
if path, err = getDefaultConfigPath(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2018-11-05 22:19:46 +00:00
|
|
|
data, err := loadConfig(path)
|
|
|
|
if err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error loading config file %s: %w", path, err)
|
2018-11-05 22:19:46 +00:00
|
|
|
}
|
|
|
|
|
2020-06-05 14:43:43 +00:00
|
|
|
if err = c.LoadConfigData(data); err != nil {
|
|
|
|
return fmt.Errorf("Error loading config file %s: %w", path, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// LoadConfigData loads TOML-formatted config data
|
|
|
|
func (c *Config) LoadConfigData(data []byte) error {
|
2018-11-05 22:19:46 +00:00
|
|
|
tbl, err := parseConfig(data)
|
2015-10-17 04:47:13 +00:00
|
|
|
if err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing data: %s", err)
|
2015-10-17 04:47:13 +00:00
|
|
|
}
|
|
|
|
|
2016-04-29 22:12:15 +00:00
|
|
|
// Parse tags tables first:
|
|
|
|
for _, tableName := range []string{"tags", "global_tags"} {
|
|
|
|
if val, ok := tbl.Fields[tableName]; ok {
|
|
|
|
subTable, ok := val.(*ast.Table)
|
|
|
|
if !ok {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("invalid configuration, bad table name %q", tableName)
|
2016-04-29 22:12:15 +00:00
|
|
|
}
|
2017-02-10 17:27:18 +00:00
|
|
|
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("error parsing table name %q: %w", tableName, err)
|
2016-04-29 22:12:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse agent table:
|
|
|
|
if val, ok := tbl.Fields["agent"]; ok {
|
|
|
|
subTable, ok := val.(*ast.Table)
|
|
|
|
if !ok {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("invalid configuration, error parsing agent table")
|
2016-04-29 22:12:15 +00:00
|
|
|
}
|
2017-02-10 17:27:18 +00:00
|
|
|
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("error parsing agent table: %w", err)
|
2016-04-29 22:12:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
if !c.Agent.OmitHostname {
|
|
|
|
if c.Agent.Hostname == "" {
|
|
|
|
hostname, err := os.Hostname()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Agent.Hostname = hostname
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Tags["host"] = c.Agent.Hostname
|
|
|
|
}
|
|
|
|
|
2016-04-29 22:12:15 +00:00
|
|
|
// Parse all the rest of the plugins:
|
2015-10-17 04:47:13 +00:00
|
|
|
for name, val := range tbl.Fields {
|
2015-11-13 23:14:07 +00:00
|
|
|
subTable, ok := val.(*ast.Table)
|
2015-10-17 04:47:13 +00:00
|
|
|
if !ok {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("invalid configuration, error parsing field %q as table", name)
|
2015-10-17 04:47:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch name {
|
2016-04-29 22:12:15 +00:00
|
|
|
case "agent", "global_tags", "tags":
|
2015-10-17 04:47:13 +00:00
|
|
|
case "outputs":
|
2016-01-07 20:39:43 +00:00
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
2016-09-08 14:22:10 +00:00
|
|
|
// legacy [outputs.influxdb] support
|
2015-11-13 23:14:07 +00:00
|
|
|
case *ast.Table:
|
2016-01-07 20:39:43 +00:00
|
|
|
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing %s, %s", pluginName, err)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
2015-11-13 23:14:07 +00:00
|
|
|
case []*ast.Table:
|
2016-01-07 20:39:43 +00:00
|
|
|
for _, t := range pluginSubTable {
|
|
|
|
if err = c.addOutput(pluginName, t); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing %s array, %s", pluginName, err)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
2015-11-13 23:14:07 +00:00
|
|
|
}
|
|
|
|
default:
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Unsupported config format: %s",
|
|
|
|
pluginName)
|
2015-10-17 04:47:13 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-08 19:49:50 +00:00
|
|
|
case "inputs", "plugins":
|
2015-11-20 02:08:02 +00:00
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
2016-09-08 14:22:10 +00:00
|
|
|
// legacy [inputs.cpu] support
|
2015-11-20 02:08:02 +00:00
|
|
|
case *ast.Table:
|
2016-01-07 20:39:43 +00:00
|
|
|
if err = c.addInput(pluginName, pluginSubTable); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing %s, %s", pluginName, err)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
2015-11-20 02:08:02 +00:00
|
|
|
case []*ast.Table:
|
2015-11-24 21:22:11 +00:00
|
|
|
for _, t := range pluginSubTable {
|
2016-01-07 20:39:43 +00:00
|
|
|
if err = c.addInput(pluginName, t); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing %s, %s", pluginName, err)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
2015-11-20 02:08:02 +00:00
|
|
|
}
|
|
|
|
default:
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Unsupported config format: %s",
|
|
|
|
pluginName)
|
2015-11-20 02:08:02 +00:00
|
|
|
}
|
|
|
|
}
|
2016-09-08 14:22:10 +00:00
|
|
|
case "processors":
|
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
|
|
|
case []*ast.Table:
|
|
|
|
for _, t := range pluginSubTable {
|
|
|
|
if err = c.addProcessor(pluginName, t); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing %s, %s", pluginName, err)
|
2016-09-08 14:22:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Unsupported config format: %s",
|
|
|
|
pluginName)
|
2016-09-08 14:22:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
case "aggregators":
|
|
|
|
for pluginName, pluginVal := range subTable.Fields {
|
|
|
|
switch pluginSubTable := pluginVal.(type) {
|
|
|
|
case []*ast.Table:
|
|
|
|
for _, t := range pluginSubTable {
|
|
|
|
if err = c.addAggregator(pluginName, t); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing %s, %s", pluginName, err)
|
2016-09-08 14:22:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Unsupported config format: %s",
|
|
|
|
pluginName)
|
2016-09-08 14:22:10 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-07 20:39:43 +00:00
|
|
|
// Assume it's an input input for legacy config file support if no other
|
2015-11-20 02:08:02 +00:00
|
|
|
// identifiers are present
|
2015-10-17 04:47:13 +00:00
|
|
|
default:
|
2016-01-07 20:39:43 +00:00
|
|
|
if err = c.addInput(name, subTable); err != nil {
|
2020-06-05 14:43:43 +00:00
|
|
|
return fmt.Errorf("Error parsing %s, %s", name, err)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
2015-10-17 04:47:13 +00:00
|
|
|
}
|
|
|
|
}
|
2016-09-27 15:17:58 +00:00
|
|
|
|
|
|
|
if len(c.Processors) > 1 {
|
|
|
|
sort.Sort(c.Processors)
|
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
|
2015-10-17 04:47:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-11-24 21:22:11 +00:00
|
|
|
|
2016-06-22 17:54:29 +00:00
|
|
|
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
|
2017-11-01 00:00:06 +00:00
|
|
|
// this is for Windows compatibility only.
|
2016-06-22 17:54:29 +00:00
|
|
|
// see https://github.com/influxdata/telegraf/issues/1378
|
2016-06-24 07:47:31 +00:00
|
|
|
func trimBOM(f []byte) []byte {
|
|
|
|
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
2016-06-22 17:54:29 +00:00
|
|
|
}
|
|
|
|
|
2018-01-04 23:28:00 +00:00
|
|
|
// escapeEnv escapes a value for inserting into a TOML string.
|
|
|
|
func escapeEnv(value string) string {
|
|
|
|
return envVarEscaper.Replace(value)
|
|
|
|
}
|
|
|
|
|
2018-11-05 22:19:46 +00:00
|
|
|
func loadConfig(config string) ([]byte, error) {
|
|
|
|
u, err := url.Parse(config)
|
2016-04-01 19:53:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-11-05 22:19:46 +00:00
|
|
|
|
|
|
|
switch u.Scheme {
|
2019-01-04 18:40:44 +00:00
|
|
|
case "https", "http":
|
2018-11-05 22:19:46 +00:00
|
|
|
return fetchConfig(u)
|
|
|
|
default:
|
|
|
|
// If it isn't a https scheme, try it as a file.
|
|
|
|
}
|
|
|
|
return ioutil.ReadFile(config)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func fetchConfig(u *url.URL) ([]byte, error) {
|
|
|
|
req, err := http.NewRequest("GET", u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-09-30 23:55:47 +00:00
|
|
|
|
|
|
|
if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists {
|
|
|
|
req.Header.Add("Authorization", "Token "+v)
|
|
|
|
}
|
2018-11-05 22:19:46 +00:00
|
|
|
req.Header.Add("Accept", "application/toml")
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-05 23:15:58 +00:00
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status)
|
|
|
|
}
|
|
|
|
|
2018-11-05 22:19:46 +00:00
|
|
|
defer resp.Body.Close()
|
|
|
|
return ioutil.ReadAll(resp.Body)
|
|
|
|
}
|
|
|
|
|
|
|
|
// parseConfig loads a TOML configuration from a provided path and
|
|
|
|
// returns the AST produced from the TOML parser. When loading the file, it
|
|
|
|
// will find environment variables and replace them.
|
|
|
|
func parseConfig(contents []byte) (*ast.Table, error) {
|
2016-06-22 17:54:29 +00:00
|
|
|
contents = trimBOM(contents)
|
2016-04-01 19:53:34 +00:00
|
|
|
|
2019-03-29 23:02:10 +00:00
|
|
|
parameters := envVarRe.FindAllSubmatch(contents, -1)
|
|
|
|
for _, parameter := range parameters {
|
|
|
|
if len(parameter) != 3 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
var env_var []byte
|
|
|
|
if parameter[1] != nil {
|
|
|
|
env_var = parameter[1]
|
|
|
|
} else if parameter[2] != nil {
|
|
|
|
env_var = parameter[2]
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-01-04 23:28:00 +00:00
|
|
|
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
|
|
|
|
if ok {
|
|
|
|
env_val = escapeEnv(env_val)
|
2019-03-29 23:02:10 +00:00
|
|
|
contents = bytes.Replace(contents, parameter[0], []byte(env_val), 1)
|
2016-04-01 19:53:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return toml.Parse(contents)
|
|
|
|
}
|
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
func (c *Config) addAggregator(name string, table *ast.Table) error {
|
|
|
|
creator, ok := aggregators.Aggregators[name]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Undefined but requested aggregator: %s", name)
|
|
|
|
}
|
|
|
|
aggregator := creator()
|
|
|
|
|
2016-09-22 17:10:51 +00:00
|
|
|
conf, err := buildAggregator(name, table)
|
2016-09-08 14:22:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-02-10 17:27:18 +00:00
|
|
|
if err := toml.UnmarshalTable(table, aggregator); err != nil {
|
2016-09-08 14:22:10 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-22 17:10:51 +00:00
|
|
|
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
|
2016-09-08 14:22:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Config) addProcessor(name string, table *ast.Table) error {
|
|
|
|
creator, ok := processors.Processors[name]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Undefined but requested processor: %s", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
processorConfig, err := buildProcessor(name, table)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-05 14:43:43 +00:00
|
|
|
rf, err := c.newRunningProcessor(creator, processorConfig, name, table)
|
|
|
|
if err != nil {
|
2016-09-08 14:22:10 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-06-05 14:43:43 +00:00
|
|
|
c.Processors = append(c.Processors, rf)
|
2016-09-08 14:22:10 +00:00
|
|
|
|
2020-06-05 14:43:43 +00:00
|
|
|
// save a copy for the aggregator
|
|
|
|
rf, err = c.newRunningProcessor(creator, processorConfig, name, table)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.AggProcessors = append(c.AggProcessors, rf)
|
2016-09-08 14:22:10 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-05 14:43:43 +00:00
|
|
|
func (c *Config) newRunningProcessor(
|
|
|
|
creator processors.StreamingCreator,
|
|
|
|
processorConfig *models.ProcessorConfig,
|
|
|
|
name string,
|
|
|
|
table *ast.Table,
|
|
|
|
) (*models.RunningProcessor, error) {
|
|
|
|
processor := creator()
|
|
|
|
|
|
|
|
if p, ok := processor.(unwrappable); ok {
|
|
|
|
if err := toml.UnmarshalTable(table, p.Unwrap()); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := toml.UnmarshalTable(table, processor); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rf := models.NewRunningProcessor(processor, processorConfig)
|
|
|
|
return rf, nil
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:22:11 +00:00
|
|
|
func (c *Config) addOutput(name string, table *ast.Table) error {
|
|
|
|
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
creator, ok := outputs.Outputs[name]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Undefined but requested output: %s", name)
|
|
|
|
}
|
2015-12-01 14:15:28 +00:00
|
|
|
output := creator()
|
2015-11-24 21:22:11 +00:00
|
|
|
|
2016-02-10 22:50:07 +00:00
|
|
|
// If the output has a SetSerializer function, then this means it can write
|
|
|
|
// arbitrary types of output, so build the serializer and set it.
|
|
|
|
switch t := output.(type) {
|
|
|
|
case serializers.SerializerOutput:
|
|
|
|
serializer, err := buildSerializer(name, table)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.SetSerializer(serializer)
|
|
|
|
}
|
|
|
|
|
2015-12-01 14:15:28 +00:00
|
|
|
outputConfig, err := buildOutput(name, table)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-02-10 17:27:18 +00:00
|
|
|
if err := toml.UnmarshalTable(table, output); err != nil {
|
2015-11-24 21:22:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-28 11:31:11 +00:00
|
|
|
ro := models.NewRunningOutput(name, output, outputConfig,
|
2016-04-25 23:49:06 +00:00
|
|
|
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
|
2015-11-24 21:22:11 +00:00
|
|
|
c.Outputs = append(c.Outputs, ro)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
func (c *Config) addInput(name string, table *ast.Table) error {
|
|
|
|
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
|
2015-11-24 21:22:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
2016-01-07 20:39:43 +00:00
|
|
|
// Legacy support renaming io input to diskio
|
2015-12-15 20:21:02 +00:00
|
|
|
if name == "io" {
|
|
|
|
name = "diskio"
|
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
creator, ok := inputs.Inputs[name]
|
2015-11-24 21:22:11 +00:00
|
|
|
if !ok {
|
2016-01-07 20:39:43 +00:00
|
|
|
return fmt.Errorf("Undefined but requested input: %s", name)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
2016-01-07 20:39:43 +00:00
|
|
|
input := creator()
|
2015-11-24 21:22:11 +00:00
|
|
|
|
2016-02-06 00:36:35 +00:00
|
|
|
// If the input has a SetParser function, then this means it can accept
|
|
|
|
// arbitrary types of input, so build the parser and set it.
|
|
|
|
switch t := input.(type) {
|
|
|
|
case parsers.ParserInput:
|
|
|
|
parser, err := buildParser(name, table)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.SetParser(parser)
|
|
|
|
}
|
|
|
|
|
2018-09-18 16:23:45 +00:00
|
|
|
switch t := input.(type) {
|
|
|
|
case parsers.ParserFuncInput:
|
|
|
|
config, err := getParserConfig(name, table)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.SetParserFunc(func() (parsers.Parser, error) {
|
|
|
|
return parsers.NewParser(config)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
pluginConfig, err := buildInput(name, table)
|
2015-11-24 21:22:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-01 14:15:28 +00:00
|
|
|
|
2017-02-10 17:27:18 +00:00
|
|
|
if err := toml.UnmarshalTable(table, input); err != nil {
|
2015-12-01 14:15:28 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-07 08:34:46 +00:00
|
|
|
rp := models.NewRunningInput(input, pluginConfig)
|
2018-11-05 21:34:28 +00:00
|
|
|
rp.SetDefaultTags(c.Tags)
|
2016-01-07 20:39:43 +00:00
|
|
|
c.Inputs = append(c.Inputs, rp)
|
2015-11-24 21:22:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-06 12:29:46 +00:00
|
|
|
// buildAggregator parses Aggregator specific items from the ast.Table,
|
|
|
|
// builds the filter and returns a
|
|
|
|
// models.AggregatorConfig to be inserted into models.RunningAggregator
|
2016-09-08 14:22:10 +00:00
|
|
|
func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
|
2016-10-07 15:43:44 +00:00
|
|
|
conf := &models.AggregatorConfig{
|
|
|
|
Name: name,
|
|
|
|
Delay: time.Millisecond * 100,
|
|
|
|
Period: time.Second * 30,
|
2019-07-31 19:52:12 +00:00
|
|
|
Grace: time.Second * 0,
|
2016-10-07 15:43:44 +00:00
|
|
|
}
|
2016-09-22 17:10:51 +00:00
|
|
|
|
|
|
|
if node, ok := tbl.Fields["period"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
dur, err := time.ParseDuration(str.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
conf.Period = dur
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["delay"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
dur, err := time.ParseDuration(str.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
conf.Delay = dur
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-31 19:52:12 +00:00
|
|
|
if node, ok := tbl.Fields["grace"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
dur, err := time.ParseDuration(str.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
conf.Grace = dur
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-09-08 14:22:10 +00:00
|
|
|
if node, ok := tbl.Fields["drop_original"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
conf.DropOriginal, err = strconv.ParseBool(b.Value)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error parsing boolean value for %s: %s\n", name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_prefix"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
conf.MeasurementPrefix = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_suffix"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
conf.MeasurementSuffix = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_override"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
conf.NameOverride = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:49:07 +00:00
|
|
|
if node, ok := tbl.Fields["alias"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
conf.Alias = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 14:22:10 +00:00
|
|
|
conf.Tags = make(map[string]string)
|
|
|
|
if node, ok := tbl.Fields["tags"]; ok {
|
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
2017-02-10 17:27:18 +00:00
|
|
|
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
2016-09-08 14:22:10 +00:00
|
|
|
log.Printf("Could not parse tags for input %s\n", name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-22 17:10:51 +00:00
|
|
|
delete(tbl.Fields, "period")
|
|
|
|
delete(tbl.Fields, "delay")
|
2019-07-31 19:52:12 +00:00
|
|
|
delete(tbl.Fields, "grace")
|
2016-09-08 14:22:10 +00:00
|
|
|
delete(tbl.Fields, "drop_original")
|
|
|
|
delete(tbl.Fields, "name_prefix")
|
|
|
|
delete(tbl.Fields, "name_suffix")
|
|
|
|
delete(tbl.Fields, "name_override")
|
2019-08-21 23:49:07 +00:00
|
|
|
delete(tbl.Fields, "alias")
|
2016-09-08 14:22:10 +00:00
|
|
|
delete(tbl.Fields, "tags")
|
|
|
|
var err error
|
|
|
|
conf.Filter, err = buildFilter(tbl)
|
|
|
|
if err != nil {
|
|
|
|
return conf, err
|
|
|
|
}
|
|
|
|
return conf, nil
|
|
|
|
}
|
|
|
|
|
2016-10-06 12:29:46 +00:00
|
|
|
// buildProcessor parses Processor specific items from the ast.Table,
|
|
|
|
// builds the filter and returns a
|
|
|
|
// models.ProcessorConfig to be inserted into models.RunningProcessor
|
2016-09-08 14:22:10 +00:00
|
|
|
func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
|
|
|
|
conf := &models.ProcessorConfig{Name: name}
|
|
|
|
|
2016-09-27 15:17:58 +00:00
|
|
|
if node, ok := tbl.Fields["order"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
var err error
|
|
|
|
conf.Order, err = strconv.ParseInt(b.Value, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error parsing int value for %s: %s\n", name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:49:07 +00:00
|
|
|
if node, ok := tbl.Fields["alias"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
conf.Alias = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(tbl.Fields, "alias")
|
2016-09-27 15:17:58 +00:00
|
|
|
delete(tbl.Fields, "order")
|
2016-09-08 14:22:10 +00:00
|
|
|
var err error
|
|
|
|
conf.Filter, err = buildFilter(tbl)
|
|
|
|
if err != nil {
|
|
|
|
return conf, err
|
|
|
|
}
|
|
|
|
return conf, nil
|
|
|
|
}
|
|
|
|
|
2016-02-22 20:35:06 +00:00
|
|
|
// buildFilter builds a Filter
|
|
|
|
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
2016-07-28 11:31:11 +00:00
|
|
|
// be inserted into the models.OutputConfig/models.InputConfig
|
2016-04-12 23:06:27 +00:00
|
|
|
// to be used for glob filtering on tags and measurements
|
2016-07-28 11:31:11 +00:00
|
|
|
func buildFilter(tbl *ast.Table) (models.Filter, error) {
|
|
|
|
f := models.Filter{}
|
2015-11-24 21:22:11 +00:00
|
|
|
|
2016-02-20 05:35:12 +00:00
|
|
|
if node, ok := tbl.Fields["namepass"]; ok {
|
2015-11-24 21:22:11 +00:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
2016-02-20 05:35:12 +00:00
|
|
|
f.NamePass = append(f.NamePass, str.Value)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-20 05:35:12 +00:00
|
|
|
if node, ok := tbl.Fields["namedrop"]; ok {
|
2015-11-24 21:22:11 +00:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
2016-02-20 05:35:12 +00:00
|
|
|
f.NameDrop = append(f.NameDrop, str.Value)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-20 05:35:12 +00:00
|
|
|
fields := []string{"pass", "fieldpass"}
|
|
|
|
for _, field := range fields {
|
|
|
|
if node, ok := tbl.Fields[field]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
f.FieldPass = append(f.FieldPass, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fields = []string{"drop", "fielddrop"}
|
|
|
|
for _, field := range fields {
|
|
|
|
if node, ok := tbl.Fields[field]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
f.FieldDrop = append(f.FieldDrop, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 21:22:11 +00:00
|
|
|
if node, ok := tbl.Fields["tagpass"]; ok {
|
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
|
|
|
for name, val := range subtbl.Fields {
|
|
|
|
if kv, ok := val.(*ast.KeyValue); ok {
|
2016-07-28 11:31:11 +00:00
|
|
|
tagfilter := &models.TagFilter{Name: name}
|
2015-11-24 21:22:11 +00:00
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-12-01 14:15:28 +00:00
|
|
|
f.TagPass = append(f.TagPass, *tagfilter)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["tagdrop"]; ok {
|
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
|
|
|
for name, val := range subtbl.Fields {
|
|
|
|
if kv, ok := val.(*ast.KeyValue); ok {
|
2016-07-28 11:31:11 +00:00
|
|
|
tagfilter := &models.TagFilter{Name: name}
|
2015-11-24 21:22:11 +00:00
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-12-01 14:15:28 +00:00
|
|
|
f.TagDrop = append(f.TagDrop, *tagfilter)
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-12 23:06:27 +00:00
|
|
|
if node, ok := tbl.Fields["tagexclude"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
f.TagExclude = append(f.TagExclude, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["taginclude"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
f.TagInclude = append(f.TagInclude, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-09-05 15:16:37 +00:00
|
|
|
if err := f.Compile(); err != nil {
|
2016-04-12 23:06:27 +00:00
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
|
2016-02-20 05:35:12 +00:00
|
|
|
delete(tbl.Fields, "namedrop")
|
|
|
|
delete(tbl.Fields, "namepass")
|
|
|
|
delete(tbl.Fields, "fielddrop")
|
|
|
|
delete(tbl.Fields, "fieldpass")
|
2015-11-24 21:22:11 +00:00
|
|
|
delete(tbl.Fields, "drop")
|
|
|
|
delete(tbl.Fields, "pass")
|
|
|
|
delete(tbl.Fields, "tagdrop")
|
|
|
|
delete(tbl.Fields, "tagpass")
|
2016-04-12 23:06:27 +00:00
|
|
|
delete(tbl.Fields, "tagexclude")
|
|
|
|
delete(tbl.Fields, "taginclude")
|
|
|
|
return f, nil
|
2015-12-01 14:15:28 +00:00
|
|
|
}
|
|
|
|
|
2016-01-07 20:39:43 +00:00
|
|
|
// buildInput parses input specific items from the ast.Table,
|
2015-12-11 20:07:32 +00:00
|
|
|
// builds the filter and returns a
|
2016-07-28 11:31:11 +00:00
|
|
|
// models.InputConfig to be inserted into models.RunningInput
|
|
|
|
func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
|
|
|
cp := &models.InputConfig{Name: name}
|
2015-12-01 14:15:28 +00:00
|
|
|
if node, ok := tbl.Fields["interval"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
dur, err := time.ParseDuration(str.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cp.Interval = dur
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-12-11 20:07:32 +00:00
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_prefix"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
cp.MeasurementPrefix = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_suffix"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
cp.MeasurementSuffix = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_override"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
cp.NameOverride = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:49:07 +00:00
|
|
|
if node, ok := tbl.Fields["alias"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
cp.Alias = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-11 20:07:32 +00:00
|
|
|
cp.Tags = make(map[string]string)
|
|
|
|
if node, ok := tbl.Fields["tags"]; ok {
|
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
2017-02-10 17:27:18 +00:00
|
|
|
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
2016-09-30 21:37:56 +00:00
|
|
|
log.Printf("E! Could not parse tags for input %s\n", name)
|
2015-12-11 20:07:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(tbl.Fields, "name_prefix")
|
|
|
|
delete(tbl.Fields, "name_suffix")
|
|
|
|
delete(tbl.Fields, "name_override")
|
2019-08-21 23:49:07 +00:00
|
|
|
delete(tbl.Fields, "alias")
|
2015-12-01 14:15:28 +00:00
|
|
|
delete(tbl.Fields, "interval")
|
2015-12-11 20:07:32 +00:00
|
|
|
delete(tbl.Fields, "tags")
|
2016-04-12 23:06:27 +00:00
|
|
|
var err error
|
|
|
|
cp.Filter, err = buildFilter(tbl)
|
|
|
|
if err != nil {
|
|
|
|
return cp, err
|
|
|
|
}
|
2015-12-01 14:15:28 +00:00
|
|
|
return cp, nil
|
|
|
|
}
|
|
|
|
|
2016-02-06 00:36:35 +00:00
|
|
|
// buildParser grabs the necessary entries from the ast.Table for creating
|
|
|
|
// a parsers.Parser object, and creates it, which can then be added onto
|
|
|
|
// an Input object.
|
|
|
|
func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
2018-09-18 16:23:45 +00:00
|
|
|
config, err := getParserConfig(name, tbl)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return parsers.NewParser(config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
|
2020-01-21 18:10:02 +00:00
|
|
|
c := &parsers.Config{
|
|
|
|
JSONStrict: true,
|
|
|
|
}
|
2016-02-06 00:36:35 +00:00
|
|
|
|
|
|
|
if node, ok := tbl.Fields["data_format"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DataFormat = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-10 18:50:05 +00:00
|
|
|
// Legacy support, exec plugin originally parsed JSON by default.
|
|
|
|
if name == "exec" && c.DataFormat == "" {
|
|
|
|
c.DataFormat = "json"
|
|
|
|
} else if c.DataFormat == "" {
|
2016-02-06 00:36:35 +00:00
|
|
|
c.DataFormat = "influx"
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["separator"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.Separator = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["templates"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.Templates = append(c.Templates, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["tag_keys"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.TagKeys = append(c.TagKeys, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-23 02:26:48 +00:00
|
|
|
if node, ok := tbl.Fields["json_string_fields"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.JSONStringFields = append(c.JSONStringFields, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["json_name_key"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.JSONNameKey = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["json_query"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.JSONQuery = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["json_time_key"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.JSONTimeKey = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["json_time_format"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.JSONTimeFormat = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-25 19:30:33 +00:00
|
|
|
if node, ok := tbl.Fields["json_timezone"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.JSONTimezone = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-21 18:10:02 +00:00
|
|
|
if node, ok := tbl.Fields["json_strict"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.JSONStrict, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-18 00:01:01 +00:00
|
|
|
if node, ok := tbl.Fields["data_type"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DataType = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-12 17:41:26 +00:00
|
|
|
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CollectdAuthFile = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["collectd_security_level"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CollectdSecurityLevel = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-12 00:29:23 +00:00
|
|
|
if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CollectdSplit = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-12 17:41:26 +00:00
|
|
|
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-08 23:11:36 +00:00
|
|
|
if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DropwizardMetricRegistryPath = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node, ok := tbl.Fields["dropwizard_time_path"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DropwizardTimePath = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node, ok := tbl.Fields["dropwizard_time_format"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DropwizardTimeFormat = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if node, ok := tbl.Fields["dropwizard_tags_path"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DropwizardTagsPath = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.DropwizardTagPathsMap = make(map[string]string)
|
|
|
|
if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok {
|
|
|
|
if subtbl, ok := node.(*ast.Table); ok {
|
|
|
|
for name, val := range subtbl.Fields {
|
|
|
|
if kv, ok := val.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DropwizardTagPathsMap[name] = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-14 06:22:59 +00:00
|
|
|
//for grok data_format
|
|
|
|
if node, ok := tbl.Fields["grok_named_patterns"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["grok_patterns"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.GrokPatterns = append(c.GrokPatterns, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["grok_custom_patterns"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.GrokCustomPatterns = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["grok_timezone"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
2018-10-03 19:58:21 +00:00
|
|
|
c.GrokTimezone = str.Value
|
2018-07-14 06:22:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-27 01:35:57 +00:00
|
|
|
if node, ok := tbl.Fields["grok_unique_timestamp"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.GrokUniqueTimestamp = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 23:40:41 +00:00
|
|
|
//for csv parser
|
|
|
|
if node, ok := tbl.Fields["csv_column_names"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.CSVColumnNames = append(c.CSVColumnNames, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 01:19:44 +00:00
|
|
|
if node, ok := tbl.Fields["csv_column_types"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 23:40:41 +00:00
|
|
|
if node, ok := tbl.Fields["csv_tag_columns"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.CSVTagColumns = append(c.CSVTagColumns, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_delimiter"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CSVDelimiter = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_comment"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CSVComment = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_measurement_column"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CSVMeasurementColumn = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_timestamp_column"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CSVTimestampColumn = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_timestamp_format"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.CSVTimestampFormat = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_header_row_count"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2018-09-18 16:23:45 +00:00
|
|
|
if integer, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
v, err := integer.Int()
|
2018-08-24 23:40:41 +00:00
|
|
|
if err != nil {
|
2018-09-18 16:23:45 +00:00
|
|
|
return nil, err
|
2018-08-24 23:40:41 +00:00
|
|
|
}
|
2018-09-18 16:23:45 +00:00
|
|
|
c.CSVHeaderRowCount = int(v)
|
2018-08-24 23:40:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_skip_rows"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2018-09-20 19:43:39 +00:00
|
|
|
if integer, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
v, err := integer.Int()
|
2018-08-24 23:40:41 +00:00
|
|
|
if err != nil {
|
2018-09-20 19:43:39 +00:00
|
|
|
return nil, err
|
2018-08-24 23:40:41 +00:00
|
|
|
}
|
2019-01-23 19:37:24 +00:00
|
|
|
c.CSVSkipRows = int(v)
|
2018-08-24 23:40:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_skip_columns"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
2018-09-20 19:43:39 +00:00
|
|
|
if integer, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
v, err := integer.Int()
|
2018-08-24 23:40:41 +00:00
|
|
|
if err != nil {
|
2018-09-20 19:43:39 +00:00
|
|
|
return nil, err
|
2018-08-24 23:40:41 +00:00
|
|
|
}
|
2019-01-23 19:37:24 +00:00
|
|
|
c.CSVSkipColumns = int(v)
|
2018-08-24 23:40:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["csv_trim_space"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
//for config with no quotes
|
|
|
|
val, err := strconv.ParseBool(str.Value)
|
|
|
|
c.CSVTrimSpace = val
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("E! parsing to bool: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-17 21:44:25 +00:00
|
|
|
if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok {
|
2019-06-17 20:34:54 +00:00
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
2019-06-17 21:44:25 +00:00
|
|
|
c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value)
|
2019-06-17 20:34:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-06 00:36:35 +00:00
|
|
|
c.MetricName = name
|
|
|
|
|
|
|
|
delete(tbl.Fields, "data_format")
|
|
|
|
delete(tbl.Fields, "separator")
|
|
|
|
delete(tbl.Fields, "templates")
|
|
|
|
delete(tbl.Fields, "tag_keys")
|
2018-08-23 02:26:48 +00:00
|
|
|
delete(tbl.Fields, "json_name_key")
|
2018-09-10 18:55:08 +00:00
|
|
|
delete(tbl.Fields, "json_query")
|
|
|
|
delete(tbl.Fields, "json_string_fields")
|
2018-08-23 02:26:48 +00:00
|
|
|
delete(tbl.Fields, "json_time_format")
|
2018-09-10 18:55:08 +00:00
|
|
|
delete(tbl.Fields, "json_time_key")
|
2019-02-25 19:30:33 +00:00
|
|
|
delete(tbl.Fields, "json_timezone")
|
2020-01-21 18:10:02 +00:00
|
|
|
delete(tbl.Fields, "json_strict")
|
2016-03-18 00:01:01 +00:00
|
|
|
delete(tbl.Fields, "data_type")
|
2017-04-12 17:41:26 +00:00
|
|
|
delete(tbl.Fields, "collectd_auth_file")
|
|
|
|
delete(tbl.Fields, "collectd_security_level")
|
|
|
|
delete(tbl.Fields, "collectd_typesdb")
|
2018-07-12 00:29:23 +00:00
|
|
|
delete(tbl.Fields, "collectd_parse_multivalue")
|
2018-01-08 23:11:36 +00:00
|
|
|
delete(tbl.Fields, "dropwizard_metric_registry_path")
|
|
|
|
delete(tbl.Fields, "dropwizard_time_path")
|
|
|
|
delete(tbl.Fields, "dropwizard_time_format")
|
|
|
|
delete(tbl.Fields, "dropwizard_tags_path")
|
|
|
|
delete(tbl.Fields, "dropwizard_tag_paths")
|
2018-07-14 06:22:59 +00:00
|
|
|
delete(tbl.Fields, "grok_named_patterns")
|
|
|
|
delete(tbl.Fields, "grok_patterns")
|
|
|
|
delete(tbl.Fields, "grok_custom_patterns")
|
|
|
|
delete(tbl.Fields, "grok_custom_pattern_files")
|
|
|
|
delete(tbl.Fields, "grok_timezone")
|
2019-02-27 01:35:57 +00:00
|
|
|
delete(tbl.Fields, "grok_unique_timestamp")
|
2018-09-18 16:23:45 +00:00
|
|
|
delete(tbl.Fields, "csv_column_names")
|
2018-10-04 01:19:44 +00:00
|
|
|
delete(tbl.Fields, "csv_column_types")
|
2018-09-18 16:23:45 +00:00
|
|
|
delete(tbl.Fields, "csv_comment")
|
|
|
|
delete(tbl.Fields, "csv_delimiter")
|
2018-08-24 23:40:41 +00:00
|
|
|
delete(tbl.Fields, "csv_field_columns")
|
2018-09-18 16:23:45 +00:00
|
|
|
delete(tbl.Fields, "csv_header_row_count")
|
|
|
|
delete(tbl.Fields, "csv_measurement_column")
|
|
|
|
delete(tbl.Fields, "csv_skip_columns")
|
|
|
|
delete(tbl.Fields, "csv_skip_rows")
|
|
|
|
delete(tbl.Fields, "csv_tag_columns")
|
2018-08-24 23:40:41 +00:00
|
|
|
delete(tbl.Fields, "csv_timestamp_column")
|
|
|
|
delete(tbl.Fields, "csv_timestamp_format")
|
2018-09-20 19:55:58 +00:00
|
|
|
delete(tbl.Fields, "csv_trim_space")
|
2019-06-17 21:44:25 +00:00
|
|
|
delete(tbl.Fields, "form_urlencoded_tag_keys")
|
2016-02-06 00:36:35 +00:00
|
|
|
|
2018-09-18 16:23:45 +00:00
|
|
|
return c, nil
|
2016-02-06 00:36:35 +00:00
|
|
|
}
|
|
|
|
|
2016-02-10 22:50:07 +00:00
|
|
|
// buildSerializer grabs the necessary entries from the ast.Table for creating
|
|
|
|
// a serializers.Serializer object, and creates it, which can then be added onto
|
|
|
|
// an Output object.
|
|
|
|
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
|
2017-03-30 00:12:29 +00:00
|
|
|
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
|
2016-02-10 22:50:07 +00:00
|
|
|
|
|
|
|
if node, ok := tbl.Fields["data_format"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.DataFormat = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.DataFormat == "" {
|
|
|
|
c.DataFormat = "influx"
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["prefix"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.Prefix = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-08 22:04:45 +00:00
|
|
|
if node, ok := tbl.Fields["template"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.Template = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-31 18:30:21 +00:00
|
|
|
if node, ok := tbl.Fields["templates"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.Templates = append(c.Templates, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-28 00:30:51 +00:00
|
|
|
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if integer, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
v, err := integer.Int()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
c.InfluxMaxLineBytes = int(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.InfluxSortFields, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-29 20:31:43 +00:00
|
|
|
if node, ok := tbl.Fields["influx_uint_support"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.InfluxUintSupport, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 23:39:15 +00:00
|
|
|
if node, ok := tbl.Fields["graphite_tag_support"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.GraphiteTagSupport, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-21 00:15:18 +00:00
|
|
|
if node, ok := tbl.Fields["graphite_separator"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
c.GraphiteSeparator = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-30 00:12:29 +00:00
|
|
|
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
timestampVal, err := time.ParseDuration(str.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
|
|
|
|
}
|
|
|
|
// now that we have a duration, truncate it to the nearest
|
|
|
|
// power of ten (just in case)
|
|
|
|
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
|
|
|
|
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
|
|
|
|
c.TimestampUnits = time.Duration(new_nanoseconds)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-11 20:01:08 +00:00
|
|
|
if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.HecRouting, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 20:38:34 +00:00
|
|
|
if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.SplunkmetricMultiMetric, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-05 21:46:12 +00:00
|
|
|
if node, ok := tbl.Fields["wavefront_source_override"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if ary, ok := kv.Value.(*ast.Array); ok {
|
|
|
|
for _, elem := range ary.Value {
|
|
|
|
if str, ok := elem.(*ast.String); ok {
|
|
|
|
c.WavefrontSourceOverride = append(c.WavefrontSourceOverride, str.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["wavefront_use_strict"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.WavefrontUseStrict, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-26 18:15:25 +00:00
|
|
|
if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.PrometheusExportTimestamp, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.PrometheusSortMetrics, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["prometheus_string_as_label"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if b, ok := kv.Value.(*ast.Boolean); ok {
|
|
|
|
var err error
|
|
|
|
c.PrometheusStringAsLabel, err = b.Boolean()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-28 00:30:51 +00:00
|
|
|
delete(tbl.Fields, "influx_max_line_bytes")
|
|
|
|
delete(tbl.Fields, "influx_sort_fields")
|
2018-03-29 20:31:43 +00:00
|
|
|
delete(tbl.Fields, "influx_uint_support")
|
2018-05-21 23:39:15 +00:00
|
|
|
delete(tbl.Fields, "graphite_tag_support")
|
2020-05-21 00:15:18 +00:00
|
|
|
delete(tbl.Fields, "graphite_separator")
|
2016-02-10 22:50:07 +00:00
|
|
|
delete(tbl.Fields, "data_format")
|
|
|
|
delete(tbl.Fields, "prefix")
|
2016-04-08 22:04:45 +00:00
|
|
|
delete(tbl.Fields, "template")
|
2020-03-31 18:30:21 +00:00
|
|
|
delete(tbl.Fields, "templates")
|
2017-03-30 00:12:29 +00:00
|
|
|
delete(tbl.Fields, "json_timestamp_units")
|
2018-09-11 20:01:08 +00:00
|
|
|
delete(tbl.Fields, "splunkmetric_hec_routing")
|
2019-11-18 20:38:34 +00:00
|
|
|
delete(tbl.Fields, "splunkmetric_multimetric")
|
2019-04-05 21:46:12 +00:00
|
|
|
delete(tbl.Fields, "wavefront_source_override")
|
|
|
|
delete(tbl.Fields, "wavefront_use_strict")
|
2019-12-26 18:15:25 +00:00
|
|
|
delete(tbl.Fields, "prometheus_export_timestamp")
|
|
|
|
delete(tbl.Fields, "prometheus_sort_metrics")
|
|
|
|
delete(tbl.Fields, "prometheus_string_as_label")
|
2016-02-10 22:50:07 +00:00
|
|
|
return serializers.NewSerializer(c)
|
|
|
|
}
|
|
|
|
|
2016-04-12 23:06:27 +00:00
|
|
|
// buildOutput parses output specific items from the ast.Table,
|
|
|
|
// builds the filter and returns an
|
2016-07-28 11:31:11 +00:00
|
|
|
// models.OutputConfig to be inserted into models.RunningInput
|
2015-12-01 14:15:28 +00:00
|
|
|
// Note: error exists in the return for future calls that might require error
|
2016-07-28 11:31:11 +00:00
|
|
|
func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
|
2016-04-12 23:06:27 +00:00
|
|
|
filter, err := buildFilter(tbl)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-07-28 11:31:11 +00:00
|
|
|
oc := &models.OutputConfig{
|
2015-12-01 14:15:28 +00:00
|
|
|
Name: name,
|
2016-04-12 23:06:27 +00:00
|
|
|
Filter: filter,
|
2015-12-01 14:15:28 +00:00
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
// TODO
|
2016-02-22 20:35:06 +00:00
|
|
|
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
|
|
|
|
if len(oc.Filter.FieldDrop) > 0 {
|
|
|
|
oc.Filter.NameDrop = oc.Filter.FieldDrop
|
|
|
|
}
|
|
|
|
if len(oc.Filter.FieldPass) > 0 {
|
|
|
|
oc.Filter.NamePass = oc.Filter.FieldPass
|
|
|
|
}
|
2018-11-05 21:34:28 +00:00
|
|
|
|
|
|
|
if node, ok := tbl.Fields["flush_interval"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
dur, err := time.ParseDuration(str.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
oc.FlushInterval = dur
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-13 00:43:39 +00:00
|
|
|
if node, ok := tbl.Fields["flush_jitter"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
dur, err := time.ParseDuration(str.Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
oc.FlushJitter = new(time.Duration)
|
|
|
|
*oc.FlushJitter = dur
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
if node, ok := tbl.Fields["metric_buffer_limit"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if integer, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
v, err := integer.Int()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
oc.MetricBufferLimit = int(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["metric_batch_size"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if integer, ok := kv.Value.(*ast.Integer); ok {
|
|
|
|
v, err := integer.Int()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
oc.MetricBatchSize = int(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 23:49:07 +00:00
|
|
|
if node, ok := tbl.Fields["alias"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
oc.Alias = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-13 22:04:23 +00:00
|
|
|
if node, ok := tbl.Fields["name_override"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
oc.NameOverride = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_suffix"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
oc.NameSuffix = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if node, ok := tbl.Fields["name_prefix"]; ok {
|
|
|
|
if kv, ok := node.(*ast.KeyValue); ok {
|
|
|
|
if str, ok := kv.Value.(*ast.String); ok {
|
|
|
|
oc.NamePrefix = str.Value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-05 21:34:28 +00:00
|
|
|
delete(tbl.Fields, "flush_interval")
|
2019-11-13 00:43:39 +00:00
|
|
|
delete(tbl.Fields, "flush_jitter")
|
2018-11-05 21:34:28 +00:00
|
|
|
delete(tbl.Fields, "metric_buffer_limit")
|
|
|
|
delete(tbl.Fields, "metric_batch_size")
|
2019-08-21 23:49:07 +00:00
|
|
|
delete(tbl.Fields, "alias")
|
2020-03-13 22:04:23 +00:00
|
|
|
delete(tbl.Fields, "name_override")
|
|
|
|
delete(tbl.Fields, "name_suffix")
|
|
|
|
delete(tbl.Fields, "name_prefix")
|
2018-11-05 21:34:28 +00:00
|
|
|
|
2015-12-01 14:15:28 +00:00
|
|
|
return oc, nil
|
2015-11-24 21:22:11 +00:00
|
|
|
}
|
2020-06-05 14:43:43 +00:00
|
|
|
|
|
|
|
// unwrappable lets you retrieve the original telegraf.Processor from the
|
|
|
|
// StreamingProcessor. This is necessary because the toml Unmarshaller won't
|
|
|
|
// look inside composed types.
|
|
|
|
type unwrappable interface {
|
|
|
|
Unwrap() telegraf.Processor
|
|
|
|
}
|