Add -outputfilter flag, and refactor the filter flag to work for -sample-config

Closes #211
Issue #199
This commit is contained in:
Cameron Sparr 2015-09-21 18:38:57 -07:00
parent 0700e0cf94
commit 64d586ef70
6 changed files with 308 additions and 219 deletions

View File

@ -78,11 +78,13 @@ if you don't have it already. You also must build with golang version 1.4+
### How to use it: ### How to use it:
* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration * Run `telegraf -sample-config > telegraf.conf` to create an initial configuration
* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`
to create a config file with only CPU and memory plugins defined, and InfluxDB output defined
* Edit the configuration to match your needs * Edit the configuration to match your needs
* Run `telegraf -config telegraf.conf -test` to output one full measurement sample to STDOUT * Run `telegraf -config telegraf.conf -test` to output one full measurement sample to STDOUT
* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs. * Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs.
* Run `telegraf -config telegraf.conf -filter system:swap` * Run `telegraf -config telegraf.conf -filter system:swap`
to enable only the system & swap plugins defined in the config. to run telegraf with only the system & swap plugins defined in the config.
## Telegraf Options ## Telegraf Options

View File

@ -6,7 +6,6 @@ import (
"log" "log"
"os" "os"
"sort" "sort"
"strings"
"sync" "sync"
"time" "time"
@ -113,15 +112,18 @@ func (a *Agent) Close() error {
} }
// LoadOutputs loads the agent's outputs // LoadOutputs loads the agent's outputs
func (a *Agent) LoadOutputs() ([]string, error) { func (a *Agent) LoadOutputs(filters []string) ([]string, error) {
var names []string var names []string
for _, name := range a.Config.OutputsDeclared() { for _, name := range a.Config.OutputsDeclared() {
fmt.Println(outputs.Outputs)
creator, ok := outputs.Outputs[name] creator, ok := outputs.Outputs[name]
if !ok { if !ok {
return nil, fmt.Errorf("Undefined but requested output: %s", name) return nil, fmt.Errorf("Undefined but requested output: %s", name)
} }
if sliceContains(name, filters) || len(filters) == 0 {
fmt.Println("OUTPUT ENABLED: ", name)
output := creator() output := creator()
err := a.Config.ApplyOutput(name, output) err := a.Config.ApplyOutput(name, output)
@ -132,6 +134,7 @@ func (a *Agent) LoadOutputs() ([]string, error) {
a.outputs = append(a.outputs, &runningOutput{name, output}) a.outputs = append(a.outputs, &runningOutput{name, output})
names = append(names, name) names = append(names, name)
} }
}
sort.Strings(names) sort.Strings(names)
@ -139,14 +142,8 @@ func (a *Agent) LoadOutputs() ([]string, error) {
} }
// LoadPlugins loads the agent's plugins // LoadPlugins loads the agent's plugins
func (a *Agent) LoadPlugins(pluginsFilter string) ([]string, error) { func (a *Agent) LoadPlugins(filters []string) ([]string, error) {
var names []string var names []string
var filters []string
pluginsFilter = strings.TrimSpace(pluginsFilter)
if pluginsFilter != "" {
filters = strings.Split(":"+pluginsFilter+":", ":")
}
for _, name := range a.Config.PluginsDeclared() { for _, name := range a.Config.PluginsDeclared() {
creator, ok := plugins.Plugins[name] creator, ok := plugins.Plugins[name]
@ -154,22 +151,9 @@ func (a *Agent) LoadPlugins(pluginsFilter string) ([]string, error) {
return nil, fmt.Errorf("Undefined but requested plugin: %s", name) return nil, fmt.Errorf("Undefined but requested plugin: %s", name)
} }
isPluginEnabled := false if sliceContains(name, filters) || len(filters) == 0 {
if len(filters) > 0 {
for _, runeValue := range filters {
if runeValue != "" && strings.ToLower(runeValue) == strings.ToLower(name) {
fmt.Printf("plugin [%s] is enabled (filter options)\n", name)
isPluginEnabled = true
break
}
}
} else {
// if no filter, we ALWAYS accept the plugin
isPluginEnabled = true
}
if isPluginEnabled {
plugin := creator() plugin := creator()
config, err := a.Config.ApplyPlugin(name, plugin) config, err := a.Config.ApplyPlugin(name, plugin)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -6,6 +6,8 @@ import (
// needing to load the plugins // needing to load the plugins
_ "github.com/influxdb/telegraf/plugins/all" _ "github.com/influxdb/telegraf/plugins/all"
// needing to load the outputs
// _ "github.com/influxdb/telegraf/outputs/all"
) )
func TestAgent_LoadPlugin(t *testing.T) { func TestAgent_LoadPlugin(t *testing.T) {
@ -14,34 +16,47 @@ func TestAgent_LoadPlugin(t *testing.T) {
config, _ := LoadConfig("./testdata/telegraf-agent.toml") config, _ := LoadConfig("./testdata/telegraf-agent.toml")
a, _ := NewAgent(config) a, _ := NewAgent(config)
pluginsEnabled, _ := a.LoadPlugins("mysql") pluginsEnabled, _ := a.LoadPlugins([]string{"mysql"})
assert.Equal(t, 1, len(pluginsEnabled)) assert.Equal(t, 1, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins("foo") pluginsEnabled, _ = a.LoadPlugins([]string{"foo"})
assert.Equal(t, 0, len(pluginsEnabled)) assert.Equal(t, 0, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins("mysql:foo") pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo"})
assert.Equal(t, 1, len(pluginsEnabled)) assert.Equal(t, 1, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins("mysql:redis") pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "redis"})
assert.Equal(t, 2, len(pluginsEnabled)) assert.Equal(t, 2, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins(":mysql:foo:redis:bar") pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo", "redis", "bar"})
assert.Equal(t, 2, len(pluginsEnabled)) assert.Equal(t, 2, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins("")
assert.Equal(t, 23, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins(" ")
assert.Equal(t, 23, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins(" ")
assert.Equal(t, 23, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins("\n\t")
assert.Equal(t, 23, len(pluginsEnabled))
} }
// TODO enable these unit tests, currently disabled because of a circular import
// func TestAgent_LoadOutput(t *testing.T) {
// // load a dedicated configuration file
// config, _ := LoadConfig("./testdata/telegraf-agent.toml")
// a, _ := NewAgent(config)
// outputsEnabled, _ := a.LoadOutputs([]string{"influxdb"})
// assert.Equal(t, 1, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{})
// assert.Equal(t, 2, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"foo"})
// assert.Equal(t, 0, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo"})
// assert.Equal(t, 1, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "kafka"})
// assert.Equal(t, 2, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo", "kafka", "bar"})
// assert.Equal(t, 2, len(outputsEnabled))
// }
/* /*
func TestAgent_DrivesMetrics(t *testing.T) { func TestAgent_DrivesMetrics(t *testing.T) {
var ( var (

View File

@ -21,8 +21,10 @@ var fVersion = flag.Bool("version", false, "display the version")
var fSampleConfig = flag.Bool("sample-config", false, var fSampleConfig = flag.Bool("sample-config", false,
"print out full sample configuration") "print out full sample configuration")
var fPidfile = flag.String("pidfile", "", "file to write our pid to") var fPidfile = flag.String("pidfile", "", "file to write our pid to")
var fPLuginsFilter = flag.String("filter", "", var fPLuginFilters = flag.String("filter", "",
"filter the plugins to enable, separator is :") "filter the plugins to enable, separator is :")
var fOutputFilters = flag.String("outputfilter", "",
"filter the outputs to enable, separator is :")
var fUsage = flag.String("usage", "", var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'") "print usage for a plugin, ie, 'telegraf -usage mysql'")
@ -33,6 +35,18 @@ var Version string
func main() { func main() {
flag.Parse() flag.Parse()
var pluginFilters []string
if *fPLuginFilters != "" {
pluginsFilter := strings.TrimSpace(*fPLuginFilters)
pluginFilters = strings.Split(":"+pluginsFilter+":", ":")
}
var outputFilters []string
if *fOutputFilters != "" {
outputFilter := strings.TrimSpace(*fOutputFilters)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
if *fVersion { if *fVersion {
v := fmt.Sprintf("Telegraf - Version %s", Version) v := fmt.Sprintf("Telegraf - Version %s", Version)
fmt.Println(v) fmt.Println(v)
@ -40,7 +54,7 @@ func main() {
} }
if *fSampleConfig { if *fSampleConfig {
telegraf.PrintSampleConfig() telegraf.PrintSampleConfig(pluginFilters, outputFilters)
return return
} }
@ -76,7 +90,7 @@ func main() {
ag.Debug = true ag.Debug = true
} }
outputs, err := ag.LoadOutputs() outputs, err := ag.LoadOutputs(outputFilters)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -85,7 +99,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
plugins, err := ag.LoadPlugins(*fPLuginsFilter) plugins, err := ag.LoadPlugins(pluginFilters)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }

View File

@ -384,16 +384,17 @@ var header2 = `
############################################################################### ###############################################################################
` `
// PrintSampleConfig prints the sample config! // PrintSampleConfig prints the sample config
func PrintSampleConfig() { func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
fmt.Printf(header) fmt.Printf(header)
// Print Outputs // Print Outputs
var onames []string var onames []string
for oname := range outputs.Outputs { for oname := range outputs.Outputs {
if len(outputFilters) == 0 || sliceContains(oname, outputFilters) {
onames = append(onames, oname) onames = append(onames, oname)
} }
}
sort.Strings(onames) sort.Strings(onames)
for _, oname := range onames { for _, oname := range onames {
@ -414,10 +415,11 @@ func PrintSampleConfig() {
// Print Plugins // Print Plugins
var pnames []string var pnames []string
for pname := range plugins.Plugins { for pname := range plugins.Plugins {
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
pnames = append(pnames, pname) pnames = append(pnames, pname)
} }
}
sort.Strings(pnames) sort.Strings(pnames)
for _, pname := range pnames { for _, pname := range pnames {
@ -435,6 +437,15 @@ func PrintSampleConfig() {
} }
} }
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintPluginConfig prints the config usage of a single plugin. // PrintPluginConfig prints the config usage of a single plugin.
func PrintPluginConfig(name string) error { func PrintPluginConfig(name string) error {
if creator, ok := plugins.Plugins[name]; ok { if creator, ok := plugins.Plugins[name]; ok {

View File

@ -1,8 +1,5 @@
# Telegraf configuration # Telegraf configuration
# If this file is missing an [agent] section, you must first generate a
# valid config with 'telegraf -sample-config > telegraf.toml'
# Telegraf is entirely plugin driven. All metrics are gathered from the # Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins. # declared plugins.
@ -22,43 +19,84 @@
# NOTE: The configuration has a few required parameters. They are marked # NOTE: The configuration has a few required parameters. They are marked
# with 'required'. Be sure to edit those to make this configuration work. # with 'required'. Be sure to edit those to make this configuration work.
# Configuration for influxdb server to send metrics to
[outputs]
[outputs.influxdb]
# The full HTTP endpoint URL for your InfluxDB instance
url = "http://localhost:8086" # required.
# The target database for metrics. This database must already exist
database = "telegraf" # required.
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for the POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Tags can also be specified via a normal map, but only one form at a time: # Tags can also be specified via a normal map, but only one form at a time:
[tags]
# dc = "us-east-1"
# [tags] # Configuration for telegraf agent
# dc = "us-east-1" [agent]
# Default data collection interval for all plugins
interval = "10s"
# Configuration for telegraf itself # If utc = false, uses local time (utc is highly recommended)
# [agent] utc = true
# interval = "10s"
# debug = false
# hostname = "prod3241"
# PLUGINS # Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# run telegraf in debug mode
debug = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
[outputs]
# Configuration for influxdb server to send metrics to
[outputs.influxdb]
# The full HTTP endpoint URL for your InfluxDB instance
# Multiple urls can be specified for InfluxDB cluster support. Server to
# write to will be randomly chosen each interval.
urls = ["http://localhost:8086"] # required.
# The target database for metrics. This database must already exist
database = "telegraf" # required.
# Connection timeout (for the connection with InfluxDB), formatted as a string.
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# If not provided, will default to 0 (no timeout)
# timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for the POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Configuration for the Kafka server to send metrics to
[outputs.kafka]
# URLs of kafka brokers
brokers = ["localhost:9092"]
# Kafka topic for producer messages
topic = "telegraf"
# Telegraf tag to use as a routing key
# ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
###############################################################################
# PLUGINS #
###############################################################################
# Read Apache status information (mod_status)
[apache]
# An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
# Read metrics about cpu usage # Read metrics about cpu usage
[cpu] [cpu]
totalcpu = true # Whether to report per-cpu stats or not
percpu = false percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
drop = ["cpu_time"]
# Read metrics about disk usage by mount point # Read metrics about disk usage by mount point
[disk] [disk]
@ -66,45 +104,63 @@ percpu = false
# Read metrics from one or many disque servers # Read metrics from one or many disque servers
[disque] [disque]
# An array of URI to gather stats about. Specify an ip or hostname
# An array of URI to gather stats about. Specify an ip or hostname # with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, # 10.0.0.1:10000, etc.
# 10.0.0.1:10000, etc. #
# # If no servers are specified, then localhost is used as the host.
# If no servers are specified, then localhost is used as the host. servers = ["localhost"]
servers = ["localhost"]
# Read stats from one or more Elasticsearch servers or clusters # Read stats from one or more Elasticsearch servers or clusters
[elasticsearch] [elasticsearch]
# specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
# specify a list of one or more Elasticsearch servers # set local to false when you want to read the indices stats from all nodes
servers = ["http://localhost:9200"] # within the cluster
local = true
# set local to false when you want to read the indices stats from all nodes
# within the cluster
local = true
# Read flattened metrics from one or more commands that output JSON to stdout # Read flattened metrics from one or more commands that output JSON to stdout
[exec] [exec]
# specify commands via an array of tables
[[exec.commands]]
# the command to run
command = "/usr/bin/mycollector --foo=bar"
# specify commands via an array of tables # name of the command (used as a prefix for measurements)
[[exec.commands]] name = "mycollector"
# the command to run
command = "/usr/bin/mycollector --foo=bar"
# name of the command (used as a prefix for measurements)
name = "mycollector"
# Read metrics of haproxy, via socket or csv stats page # Read metrics of haproxy, via socket or csv stats page
[haproxy] [haproxy]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:1936, etc.
#
# If no servers are specified, then default to 127.0.0.1:1936
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# Or you can also use local socket(not work yet)
# servers = ["socket:/run/haproxy/admin.sock"]
# An array of address to gather stats about. Specify an ip on hostname # Read flattened metrics from one or more JSON HTTP endpoints
# with optional port. ie localhost, 10.10.3.33:1936, etc. [httpjson]
# # Specify services via an array of tables
# If no servers are specified, then default to 127.0.0.1:1936 [[httpjson.services]]
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# Or you can also use local socket(not work yet) # a name for the service being polled
# servers = ["socket:/run/haproxy/admin.sock"] name = "webserver_stats"
# URL of each server in the service's cluster
servers = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
# HTTP method to use (case-sensitive)
method = "GET"
# HTTP parameters (all values must be strings)
[httpjson.services.parameters]
event_type = "cpu_spike"
threshold = "0.75"
# Read metrics about disk IO by device # Read metrics about disk IO by device
[io] [io]
@ -112,36 +168,33 @@ servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# read metrics from a Kafka topic # read metrics from a Kafka topic
[kafka] [kafka]
# topic to consume
topic = "topic_with_metrics"
# topic to consume # the name of the consumer group
topic = "topic_with_metrics" consumerGroupName = "telegraf_metrics_consumers"
# the name of the consumer group # an array of Zookeeper connection strings
consumerGroupName = "telegraf_metrics_consumers" zookeeperPeers = ["localhost:2181"]
# an array of Zookeeper connection strings # Batch size of points sent to InfluxDB
zookeeperPeers = ["localhost:2181"] batchSize = 1000
# Batch size of points sent to InfluxDB
batchSize = 1000
# Read metrics from a LeoFS Server via SNMP # Read metrics from a LeoFS Server via SNMP
[leofs] [leofs]
# An array of URI to gather stats about LeoFS.
# An array of URI to gather stats about LeoFS. # Specify an ip or hostname with port. ie 127.0.0.1:4020
# Specify an ip or hostname with port. ie 127.0.0.1:4020 #
# # If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. servers = ["127.0.0.1:4021"]
servers = ["127.0.0.1:4021"]
# Read metrics from local Lustre service on OST, MDS # Read metrics from local Lustre service on OST, MDS
[lustre2] [lustre2]
# An array of /proc globs to search for Lustre stats
# An array of /proc globs to search for Lustre stats # If not specified, the default will work on Lustre 2.5.x
# If not specified, the default will work on Lustre 2.5.x #
# # ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
# Read metrics about memory usage # Read metrics about memory usage
[mem] [mem]
@ -149,118 +202,128 @@ servers = ["127.0.0.1:4021"]
# Read metrics from one or many memcached servers # Read metrics from one or many memcached servers
[memcached] [memcached]
# An array of address to gather stats about. Specify an ip on hostname
# An array of address to gather stats about. Specify an ip on hostname # with optional port. ie localhost, 10.0.0.1:11211, etc.
# with optional port. ie localhost, 10.0.0.1:11211, etc. #
# # If no servers are specified, then localhost is used as the host.
# If no servers are specified, then localhost is used as the host. servers = ["localhost"]
servers = ["localhost"]
# Read metrics from one or many MongoDB servers # Read metrics from one or many MongoDB servers
[mongodb] [mongodb]
# An array of URI to gather stats about. Specify an ip or hostname
# An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. #
# # If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. servers = ["127.0.0.1:27017"]
servers = ["127.0.0.1:27017"]
# Read metrics from one or many mysql servers # Read metrics from one or many mysql servers
[mysql] [mysql]
# specify servers via a url matching:
# specify servers via a url matching: # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # e.g.
# e.g. root:root@http://10.0.0.18/?tls=false # root:root@http://10.0.0.18/?tls=false
# # root:passwd@tcp(127.0.0.1:3036)/
# If no servers are specified, then localhost is used as the host. #
servers = ["localhost"] # If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
# Read metrics about network interface usage # Read metrics about network interface usage
[net] [net]
# By default, telegraf gathers stats from any up interface (excluding loopback)
# By default, telegraf gathers stats from any up interface (excluding loopback) # Setting interfaces will tell it to gather these explicit interfaces,
# Setting interfaces will tell it to gather these explicit interfaces, # regardless of status.
# regardless of status. #
# # interfaces = ["eth0", ... ]
# interfaces = ["eth0", ... ]
# Read Nginx's basic status information (ngx_http_stub_status_module) # Read Nginx's basic status information (ngx_http_stub_status_module)
[nginx] [nginx]
# An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
# An array of Nginx stub_status URI to gather stats. # Ping given url(s) and return statistics
urls = ["localhost/status"] [ping]
# urls to ping
urls = ["www.google.com"] # required
# number of pings to send (ping -c <COUNT>)
count = 1 # required
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
ping_interval = 0.0
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
timeout = 0.0
# interface to send ping from (ping -I <INTERFACE>)
interface = ""
# Read metrics from one or many postgresql servers # Read metrics from one or many postgresql servers
[postgresql] [postgresql]
# specify servers via an array of tables
[[postgresql.servers]]
# specify servers via an array of tables # specify address via a url matching:
[[postgresql.servers]] # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default
# to sslmode=disable as well.
#
# Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying
# to grab metrics for.
#
# specify address via a url matching: address = "sslmode=disable"
# postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqotest password=... sslmode=...
#
# All connection parameters are optional. By default, the host is localhost
# and the user is the currently running user. For localhost, we default
# to sslmode=disable as well.
#
address = "sslmode=disable" # A list of databases to pull metrics about. If not specified, metrics for all
# databases are gathered.
# A list of databases to pull metrics about. If not specified, metrics for all # databases = ["app_production", "blah_testing"]
# databases are gathered.
# databases = ["app_production", "blah_testing"] # [[postgresql.servers]]
# address = "influx@remoteserver"
# [[postgresql.servers]]
# address = "influx@remoteserver"
# Read metrics from one or many prometheus clients # Read metrics from one or many prometheus clients
[prometheus] [prometheus]
# An array of urls to scrape metrics from.
# An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"]
urls = ["http://localhost:9100/metrics"]
# Read metrics from one or many RabbitMQ servers via the management API # Read metrics from one or many RabbitMQ servers via the management API
[rabbitmq] [rabbitmq]
# Specify servers via an array of tables
[[rabbitmq.servers]]
# name = "rmq-server-1" # optional tag
# url = "http://localhost:15672"
# username = "guest"
# password = "guest"
# Specify servers via an array of tables # A list of nodes to pull metrics about. If not specified, metrics for
[[rabbitmq.servers]] # all nodes are gathered.
# url = "http://localhost:15672" # nodes = ["rabbit@node1", "rabbit@node2"]
# username = "guest"
# password = "guest"
# A list of nodes to pull metrics about. If not specified, metrics for
# all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
# Read metrics from one or many redis servers # Read metrics from one or many redis servers
[redis] [redis]
# An array of URI to gather stats about. Specify an ip or hostname
# An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, # 10.0.0.1:10000, etc.
# 10.0.0.1:10000, etc. #
# # If no servers are specified, then localhost is used as the host.
# If no servers are specified, then localhost is used as the host. servers = ["localhost"]
servers = ["localhost"]
# Read metrics from one or many RethinkDB servers # Read metrics from one or many RethinkDB servers
[rethinkdb] [rethinkdb]
# An array of URI to gather stats about. Specify an ip or hostname
# An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, # rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. #
# # If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port. servers = ["127.0.0.1:28015"]
servers = ["127.0.0.1:28015"]
# Read metrics about swap memory usage # Read metrics about swap memory usage
[swap] [swap]
# no configuration # no configuration
# Read metrics about system load # Read metrics about system load & uptime
[system] [system]
# no configuration # no configuration