Overhaul config <-> agent coupling. Put config in it's own package.

This commit is contained in:
Cameron Sparr
2015-11-24 14:22:11 -07:00
parent 8dde60e869
commit 979e5f193a
23 changed files with 554 additions and 748 deletions

563
internal/config/config.go Normal file
View File

@@ -0,0 +1,563 @@
package config
import (
"errors"
"fmt"
"io/ioutil"
"log"
"path/filepath"
"sort"
"strings"
"time"
"github.com/influxdb/telegraf/outputs"
"github.com/influxdb/telegraf/plugins"
"github.com/naoina/toml"
"github.com/naoina/toml/ast"
)
// Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has
// specified
type Config struct {
Tags map[string]string
PluginFilters []string
OutputFilters []string
Agent *ast.Table
Plugins []*RunningPlugin
Outputs []*RunningOutput
}
func NewConfig() *Config {
c := &Config{
Tags: make(map[string]string),
Plugins: make([]*RunningPlugin, 0),
Outputs: make([]*RunningOutput, 0),
PluginFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
return c
}
// TagFilter is the name of a tag, and the values on which to filter
type TagFilter struct {
Name string
Filter []string
}
type RunningOutput struct {
Name string
Output outputs.Output
}
type RunningPlugin struct {
Name string
Plugin plugins.Plugin
Config *PluginConfig
}
// PluginConfig containing a name, interval, and drop/pass prefix lists
// Also lists the tags to filter
type PluginConfig struct {
Name string
Drop []string
Pass []string
TagDrop []TagFilter
TagPass []TagFilter
Interval time.Duration
}
// ShouldPass returns true if the metric should pass, false if should drop
// based on the drop/pass plugin parameters
func (cp *PluginConfig) ShouldPass(measurement string) bool {
if cp.Pass != nil {
for _, pat := range cp.Pass {
if strings.HasPrefix(measurement, pat) {
return true
}
}
return false
}
if cp.Drop != nil {
for _, pat := range cp.Drop {
if strings.HasPrefix(measurement, pat) {
return false
}
}
return true
}
return true
}
// ShouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass plugin parameters
func (cp *PluginConfig) ShouldTagsPass(tags map[string]string) bool {
if cp.TagPass != nil {
for _, pat := range cp.TagPass {
if tagval, ok := tags[pat.Name]; ok {
for _, filter := range pat.Filter {
if filter == tagval {
return true
}
}
}
}
return false
}
if cp.TagDrop != nil {
for _, pat := range cp.TagDrop {
if tagval, ok := tags[pat.Name]; ok {
for _, filter := range pat.Filter {
if filter == tagval {
return false
}
}
}
}
return true
}
return true
}
// Plugins returns a list of strings of the configured plugins.
func (c *Config) PluginNames() []string {
var name []string
for _, plugin := range c.Plugins {
name = append(name, plugin.Name)
}
return name
}
// Outputs returns a list of strings of the configured plugins.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
name = append(name, output.Name)
}
return name
}
// ApplyAgent loads the toml config into the given Agent object, overriding
// defaults (such as collection duration) with the values from the toml config.
func (c *Config) ApplyAgent(a interface{}) error {
if c.Agent != nil {
return toml.UnmarshalTable(c.Agent, a)
}
return nil
}
// ListTags returns a string of tags specified in the config,
// line-protocol style
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
var header = `# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Tags can also be specified via a normal map, but only one form at a time:
[tags]
# dc = "us-east-1"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
[outputs]
`
var pluginHeader = `
###############################################################################
# PLUGINS #
###############################################################################
[plugins]
`
var servicePluginHeader = `
###############################################################################
# SERVICE PLUGINS #
###############################################################################
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
fmt.Printf(header)
// Filter outputs
var onames []string
for oname := range outputs.Outputs {
if len(outputFilters) == 0 || sliceContains(oname, outputFilters) {
onames = append(onames, oname)
}
}
sort.Strings(onames)
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs")
}
// Filter plugins
var pnames []string
for pname := range plugins.Plugins {
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Plugins
fmt.Printf(pluginHeader)
servPlugins := make(map[string]plugins.ServicePlugin)
for _, pname := range pnames {
creator := plugins.Plugins[pname]
plugin := creator()
switch p := plugin.(type) {
case plugins.ServicePlugin:
servPlugins[pname] = p
continue
}
printConfig(pname, plugin, "plugins")
}
// Print Service Plugins
fmt.Printf(servicePluginHeader)
for name, plugin := range servPlugins {
printConfig(name, plugin, "plugins")
}
}
type printer interface {
Description() string
SampleConfig() string
}
func printConfig(name string, p printer, op string) {
fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name)
config := p.SampleConfig()
if config == "" {
fmt.Printf("\n # no configuration\n")
} else {
fmt.Printf(config)
}
}
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintPluginConfig prints the config usage of a single plugin.
func PrintPluginConfig(name string) error {
if creator, ok := plugins.Plugins[name]; ok {
printConfig(name, creator(), "plugins")
} else {
return errors.New(fmt.Sprintf("Plugin %s not found", name))
}
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs")
} else {
return errors.New(fmt.Sprintf("Output %s not found", name))
}
return nil
}
func (c *Config) LoadDirectory(path string) error {
directoryEntries, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, entry := range directoryEntries {
if entry.IsDir() {
continue
}
name := entry.Name()
if name[len(name)-5:] != ".conf" {
continue
}
err := c.LoadConfig(filepath.Join(path, name))
if err != nil {
return err
}
}
return nil
}
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
tbl, err := toml.Parse(data)
if err != nil {
return err
}
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
return errors.New("invalid configuration")
}
switch name {
case "agent":
c.Agent = subTable
case "tags":
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("Could not parse [tags] config\n")
return err
}
case "outputs":
for outputName, outputVal := range subTable.Fields {
switch outputSubTable := outputVal.(type) {
case *ast.Table:
if err = c.addOutput(outputName, outputSubTable); err != nil {
return err
}
case []*ast.Table:
for _, t := range outputSubTable {
if err = c.addOutput(outputName, t); err != nil {
return err
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
outputName)
}
}
case "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case *ast.Table:
if err = c.addPlugin(pluginName, pluginSubTable); err != nil {
return err
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addPlugin(pluginName, t); err != nil {
return err
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
}
// Assume it's a plugin for legacy config file support if no other
// identifiers are present
default:
if err = c.addPlugin(name, subTable); err != nil {
return err
}
}
}
return nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
}
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
o := creator()
if err := toml.UnmarshalTable(table, o); err != nil {
return err
}
ro := &RunningOutput{
Name: name,
Output: o,
}
c.Outputs = append(c.Outputs, ro)
return nil
}
func (c *Config) addPlugin(name string, table *ast.Table) error {
if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) {
return nil
}
creator, ok := plugins.Plugins[name]
if !ok {
return fmt.Errorf("Undefined but requested plugin: %s", name)
}
plugin := creator()
pluginConfig, err := applyPlugin(name, table, plugin)
if err != nil {
return err
}
rp := &RunningPlugin{
Name: name,
Plugin: plugin,
Config: pluginConfig,
}
c.Plugins = append(c.Plugins, rp)
return nil
}
// applyPlugin takes defined plugin names and applies them to the given
// interface, returning a PluginConfig object in the end that can
// be inserted into a runningPlugin by the agent.
func applyPlugin(name string, tbl *ast.Table, p plugins.Plugin) (*PluginConfig, error) {
cp := &PluginConfig{Name: name}
if node, ok := tbl.Fields["pass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Pass = append(cp.Pass, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["drop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Drop = append(cp.Drop, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
cp.Interval = dur
}
}
}
if node, ok := tbl.Fields["tagpass"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
cp.TagPass = append(cp.TagPass, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagdrop"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
cp.TagDrop = append(cp.TagDrop, *tagfilter)
}
}
}
}
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "interval")
delete(tbl.Fields, "tagdrop")
delete(tbl.Fields, "tagpass")
return cp, toml.UnmarshalTable(tbl, p)
}

View File

@@ -0,0 +1,118 @@
package config
import (
"testing"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/exec"
"github.com/influxdb/telegraf/plugins/memcached"
"github.com/influxdb/telegraf/plugins/procstat"
"github.com/stretchr/testify/assert"
)
func TestConfig_LoadSinglePlugin(t *testing.T) {
c := NewConfig()
c.LoadConfig("./testdata/single_plugin.toml")
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &PluginConfig{
Name: "memcached",
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
TagDrop: []TagFilter{
TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []TagFilter{
TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
Interval: 5 * time.Second,
}
assert.Equal(t, memcached, c.Plugins[0].Plugin,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Plugins[0].Config,
"Testdata did not produce correct memcached metadata.")
}
func TestConfig_LoadDirectory(t *testing.T) {
c := NewConfig()
err := c.LoadConfig("./testdata/single_plugin.toml")
if err != nil {
t.Error(err)
}
err = c.LoadDirectory("./testdata/subconfig")
if err != nil {
t.Error(err)
}
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &PluginConfig{
Name: "memcached",
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
TagDrop: []TagFilter{
TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []TagFilter{
TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
Interval: 5 * time.Second,
}
assert.Equal(t, memcached, c.Plugins[0].Plugin,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Plugins[0].Config,
"Testdata did not produce correct memcached metadata.")
ex := plugins.Plugins["exec"]().(*exec.Exec)
ex.Commands = []*exec.Command{
&exec.Command{
Command: "/usr/bin/myothercollector --foo=bar",
Name: "myothercollector",
},
}
eConfig := &PluginConfig{Name: "exec"}
assert.Equal(t, ex, c.Plugins[1].Plugin,
"Merged Testdata did not produce a correct exec struct.")
assert.Equal(t, eConfig, c.Plugins[1].Config,
"Merged Testdata did not produce correct exec metadata.")
memcached.Servers = []string{"192.168.1.1"}
assert.Equal(t, memcached, c.Plugins[2].Plugin,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Plugins[2].Config,
"Testdata did not produce correct memcached metadata.")
pstat := plugins.Plugins["procstat"]().(*procstat.Procstat)
pstat.Specifications = []*procstat.Specification{
&procstat.Specification{
PidFile: "/var/run/grafana-server.pid",
},
&procstat.Specification{
PidFile: "/var/run/influxdb/influxd.pid",
},
}
pConfig := &PluginConfig{Name: "procstat"}
assert.Equal(t, pstat, c.Plugins[3].Plugin,
"Merged Testdata did not produce a correct procstat struct.")
assert.Equal(t, pConfig, c.Plugins[3].Config,
"Merged Testdata did not produce correct procstat metadata.")
}

View File

@@ -0,0 +1,9 @@
[[plugins.memcached]]
servers = ["localhost"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"
[plugins.memcached.tagpass]
goodtag = ["mytag"]
[plugins.memcached.tagdrop]
badtag = ["othertag"]

View File

@@ -0,0 +1,8 @@
[[plugins.exec]]
# specify commands via an array of tables
[[plugins.exec.commands]]
# the command to run
command = "/usr/bin/myothercollector --foo=bar"
# name of the command (used as a prefix for measurements)
name = "myothercollector"

View File

@@ -0,0 +1,9 @@
[[plugins.memcached]]
servers = ["192.168.1.1"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"
[plugins.memcached.tagpass]
goodtag = ["mytag"]
[plugins.memcached.tagdrop]
badtag = ["othertag"]

View File

@@ -0,0 +1,5 @@
[[plugins.procstat]]
[[plugins.procstat.specifications]]
pid_file = "/var/run/grafana-server.pid"
[[plugins.procstat.specifications]]
pid_file = "/var/run/influxdb/influxd.pid"

View File

@@ -0,0 +1,334 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
# as a section with no variables. To deactivate a plugin, comment
# out the name and any variables.
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
# file would generate.
# One rule that plugins conform to is wherever a connection string
# can be passed, the values '' and 'localhost' are treated specially.
# They indicate to the plugin to use their own builtin configuration to
# connect to the local system.
# NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work.
# Tags can also be specified via a normal map, but only one form at a time:
[tags]
# dc = "us-east-1"
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
interval = "10s"
# If utc = false, uses local time (utc is highly recommended)
utc = true
# Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
[outputs]
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP endpoint URL for your InfluxDB instance
# Multiple urls can be specified for InfluxDB cluster support. Server to
# write to will be randomly chosen each interval.
urls = ["http://localhost:8086"] # required.
# The target database for metrics. This database must already exist
database = "telegraf" # required.
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for the POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
[[outputs.influxdb]]
urls = ["udp://localhost:8089"]
database = "udp-telegraf"
# Configuration for the Kafka server to send metrics to
[[outputs.kafka]]
# URLs of kafka brokers
brokers = ["localhost:9092"]
# Kafka topic for producer messages
topic = "telegraf"
# Telegraf tag to use as a routing key
# ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
###############################################################################
# PLUGINS #
###############################################################################
[plugins]
# Read Apache status information (mod_status)
[[plugins.apache]]
# An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
# Read metrics about cpu usage
[[plugins.cpu]]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["cpu_time"]
# Read metrics about disk usage by mount point
[[plugins.disk]]
# no configuration
# Read metrics from one or many disque servers
[[plugins.disque]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read stats from one or more Elasticsearch servers or clusters
[[plugins.elasticsearch]]
# specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
# set local to false when you want to read the indices stats from all nodes
# within the cluster
local = true
# Read flattened metrics from one or more commands that output JSON to stdout
[[plugins.exec]]
# specify commands via an array of tables
[[exec.commands]]
# the command to run
command = "/usr/bin/mycollector --foo=bar"
# name of the command (used as a prefix for measurements)
name = "mycollector"
# Read metrics of haproxy, via socket or csv stats page
[[plugins.haproxy]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:1936, etc.
#
# If no servers are specified, then default to 127.0.0.1:1936
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# Or you can also use local socket(not work yet)
# servers = ["socket:/run/haproxy/admin.sock"]
# Read flattened metrics from one or more JSON HTTP endpoints
[[plugins.httpjson]]
# Specify services via an array of tables
[[httpjson.services]]
# a name for the service being polled
name = "webserver_stats"
# URL of each server in the service's cluster
servers = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
# HTTP method to use (case-sensitive)
method = "GET"
# HTTP parameters (all values must be strings)
[httpjson.services.parameters]
event_type = "cpu_spike"
threshold = "0.75"
# Read metrics about disk IO by device
[[plugins.io]]
# no configuration
# read metrics from a Kafka topic
[[plugins.kafka_consumer]]
# topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
# the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
# Maximum number of points to buffer between collection intervals
point_buffer = 100000
# Offset (must be either "oldest" or "newest")
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[plugins.leofs]]
# An array of URI to gather stats about LeoFS.
# Specify an ip or hostname with port. ie 127.0.0.1:4020
#
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
servers = ["127.0.0.1:4021"]
# Read metrics from local Lustre service on OST, MDS
[[plugins.lustre2]]
# An array of /proc globs to search for Lustre stats
# If not specified, the default will work on Lustre 2.5.x
#
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
# Read metrics about memory usage
[[plugins.mem]]
# no configuration
# Read metrics from one or many memcached servers
[[plugins.memcached]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.0.0.1:11211, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics from one or many MongoDB servers
[[plugins.mongodb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
servers = ["127.0.0.1:27017"]
# Read metrics from one or many mysql servers
[[plugins.mysql]]
# specify servers via a url matching:
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# e.g.
# servers = ["root:root@http://10.0.0.18/?tls=false"]
# servers = ["root:passwd@tcp(127.0.0.1:3306)/"]
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics about network interface usage
[[plugins.net]]
# By default, telegraf gathers stats from any up interface (excluding loopback)
# Setting interfaces will tell it to gather these explicit interfaces,
# regardless of status.
#
# interfaces = ["eth0", ... ]
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[plugins.nginx]]
# An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
# Ping given url(s) and return statistics
[[plugins.ping]]
# urls to ping
urls = ["www.google.com"] # required
# number of pings to send (ping -c <COUNT>)
count = 1 # required
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
ping_interval = 0.0
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
timeout = 0.0
# interface to send ping from (ping -I <INTERFACE>)
interface = ""
# Read metrics from one or many postgresql servers
[[plugins.postgresql]]
# specify servers via an array of tables
[[postgresql.servers]]
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default
# to sslmode=disable as well.
#
# Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying
# to grab metrics for.
#
address = "sslmode=disable"
# A list of databases to pull metrics about. If not specified, metrics for all
# databases are gathered.
# databases = ["app_production", "blah_testing"]
# [[postgresql.servers]]
# address = "influx@remoteserver"
# Read metrics from one or many prometheus clients
[[plugins.prometheus]]
# An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
# Read metrics from one or many RabbitMQ servers via the management API
[[plugins.rabbitmq]]
# Specify servers via an array of tables
[[rabbitmq.servers]]
# name = "rmq-server-1" # optional tag
# url = "http://localhost:15672"
# username = "guest"
# password = "guest"
# A list of nodes to pull metrics about. If not specified, metrics for
# all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
# Read metrics from one or many redis servers
[[plugins.redis]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics from one or many RethinkDB servers
[[plugins.rethinkdb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
servers = ["127.0.0.1:28015"]
# Read metrics about swap memory usage
[[plugins.swap]]
# no configuration
# Read metrics about system load & uptime
[[plugins.system]]
# no configuration