diff --git a/README.md b/README.md index 1ccd7f9a7..356374983 100644 --- a/README.md +++ b/README.md @@ -78,11 +78,13 @@ if you don't have it already. You also must build with golang version 1.4+ ### How to use it: * Run `telegraf -sample-config > telegraf.conf` to create an initial configuration +* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf` +to create a config file with only CPU and memory plugins defined, and InfluxDB output defined * Edit the configuration to match your needs * Run `telegraf -config telegraf.conf -test` to output one full measurement sample to STDOUT * Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs. * Run `telegraf -config telegraf.conf -filter system:swap` -to enable only the system & swap plugins defined in the config. +to run telegraf with only the system & swap plugins defined in the config. ## Telegraf Options diff --git a/agent.go b/agent.go index a8ee44207..06e79a6c5 100644 --- a/agent.go +++ b/agent.go @@ -6,7 +6,6 @@ import ( "log" "os" "sort" - "strings" "sync" "time" @@ -113,24 +112,28 @@ func (a *Agent) Close() error { } // LoadOutputs loads the agent's outputs -func (a *Agent) LoadOutputs() ([]string, error) { +func (a *Agent) LoadOutputs(filters []string) ([]string, error) { var names []string for _, name := range a.Config.OutputsDeclared() { + fmt.Println(outputs.Outputs) creator, ok := outputs.Outputs[name] if !ok { return nil, fmt.Errorf("Undefined but requested output: %s", name) } - output := creator() + if sliceContains(name, filters) || len(filters) == 0 { + fmt.Println("OUTPUT ENABLED: ", name) + output := creator() - err := a.Config.ApplyOutput(name, output) - if err != nil { - return nil, err + err := a.Config.ApplyOutput(name, output) + if err != nil { + return nil, err + } + + a.outputs = append(a.outputs, &runningOutput{name, output}) + names = append(names, name) } - - a.outputs = append(a.outputs, &runningOutput{name, output}) - names = append(names, name) } sort.Strings(names) @@ -139,14 +142,8 @@ func (a *Agent) LoadOutputs() ([]string, error) { } // LoadPlugins loads the agent's plugins -func (a *Agent) LoadPlugins(pluginsFilter string) ([]string, error) { +func (a *Agent) LoadPlugins(filters []string) ([]string, error) { var names []string - var filters []string - - pluginsFilter = strings.TrimSpace(pluginsFilter) - if pluginsFilter != "" { - filters = strings.Split(":"+pluginsFilter+":", ":") - } for _, name := range a.Config.PluginsDeclared() { creator, ok := plugins.Plugins[name] @@ -154,22 +151,9 @@ func (a *Agent) LoadPlugins(pluginsFilter string) ([]string, error) { return nil, fmt.Errorf("Undefined but requested plugin: %s", name) } - isPluginEnabled := false - if len(filters) > 0 { - for _, runeValue := range filters { - if runeValue != "" && strings.ToLower(runeValue) == strings.ToLower(name) { - fmt.Printf("plugin [%s] is enabled (filter options)\n", name) - isPluginEnabled = true - break - } - } - } else { - // if no filter, we ALWAYS accept the plugin - isPluginEnabled = true - } - - if isPluginEnabled { + if sliceContains(name, filters) || len(filters) == 0 { plugin := creator() + config, err := a.Config.ApplyPlugin(name, plugin) if err != nil { return nil, err diff --git a/agent_test.go b/agent_test.go index 913ce565f..3fc384d7e 100644 --- a/agent_test.go +++ b/agent_test.go @@ -6,6 +6,8 @@ import ( // needing to load the plugins _ "github.com/influxdb/telegraf/plugins/all" + // needing to load the outputs + // _ "github.com/influxdb/telegraf/outputs/all" ) func TestAgent_LoadPlugin(t *testing.T) { @@ -14,34 +16,47 @@ func TestAgent_LoadPlugin(t *testing.T) { config, _ := LoadConfig("./testdata/telegraf-agent.toml") a, _ := NewAgent(config) - pluginsEnabled, _ := a.LoadPlugins("mysql") + pluginsEnabled, _ := a.LoadPlugins([]string{"mysql"}) assert.Equal(t, 1, len(pluginsEnabled)) - pluginsEnabled, _ = a.LoadPlugins("foo") + pluginsEnabled, _ = a.LoadPlugins([]string{"foo"}) assert.Equal(t, 0, len(pluginsEnabled)) - pluginsEnabled, _ = a.LoadPlugins("mysql:foo") + pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo"}) assert.Equal(t, 1, len(pluginsEnabled)) - pluginsEnabled, _ = a.LoadPlugins("mysql:redis") + pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "redis"}) assert.Equal(t, 2, len(pluginsEnabled)) - pluginsEnabled, _ = a.LoadPlugins(":mysql:foo:redis:bar") + pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo", "redis", "bar"}) assert.Equal(t, 2, len(pluginsEnabled)) - - pluginsEnabled, _ = a.LoadPlugins("") - assert.Equal(t, 23, len(pluginsEnabled)) - - pluginsEnabled, _ = a.LoadPlugins(" ") - assert.Equal(t, 23, len(pluginsEnabled)) - - pluginsEnabled, _ = a.LoadPlugins(" ") - assert.Equal(t, 23, len(pluginsEnabled)) - - pluginsEnabled, _ = a.LoadPlugins("\n\t") - assert.Equal(t, 23, len(pluginsEnabled)) } +// TODO enable these unit tests, currently disabled because of a circular import +// func TestAgent_LoadOutput(t *testing.T) { +// // load a dedicated configuration file +// config, _ := LoadConfig("./testdata/telegraf-agent.toml") +// a, _ := NewAgent(config) + +// outputsEnabled, _ := a.LoadOutputs([]string{"influxdb"}) +// assert.Equal(t, 1, len(outputsEnabled)) + +// outputsEnabled, _ = a.LoadOutputs([]string{}) +// assert.Equal(t, 2, len(outputsEnabled)) + +// outputsEnabled, _ = a.LoadOutputs([]string{"foo"}) +// assert.Equal(t, 0, len(outputsEnabled)) + +// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo"}) +// assert.Equal(t, 1, len(outputsEnabled)) + +// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "kafka"}) +// assert.Equal(t, 2, len(outputsEnabled)) + +// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo", "kafka", "bar"}) +// assert.Equal(t, 2, len(outputsEnabled)) +// } + /* func TestAgent_DrivesMetrics(t *testing.T) { var ( diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index f28a81bd4..7080f7671 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -21,8 +21,10 @@ var fVersion = flag.Bool("version", false, "display the version") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") -var fPLuginsFilter = flag.String("filter", "", +var fPLuginFilters = flag.String("filter", "", "filter the plugins to enable, separator is :") +var fOutputFilters = flag.String("outputfilter", "", + "filter the outputs to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") @@ -33,6 +35,18 @@ var Version string func main() { flag.Parse() + var pluginFilters []string + if *fPLuginFilters != "" { + pluginsFilter := strings.TrimSpace(*fPLuginFilters) + pluginFilters = strings.Split(":"+pluginsFilter+":", ":") + } + + var outputFilters []string + if *fOutputFilters != "" { + outputFilter := strings.TrimSpace(*fOutputFilters) + outputFilters = strings.Split(":"+outputFilter+":", ":") + } + if *fVersion { v := fmt.Sprintf("Telegraf - Version %s", Version) fmt.Println(v) @@ -40,7 +54,7 @@ func main() { } if *fSampleConfig { - telegraf.PrintSampleConfig() + telegraf.PrintSampleConfig(pluginFilters, outputFilters) return } @@ -76,7 +90,7 @@ func main() { ag.Debug = true } - outputs, err := ag.LoadOutputs() + outputs, err := ag.LoadOutputs(outputFilters) if err != nil { log.Fatal(err) } @@ -85,7 +99,7 @@ func main() { os.Exit(1) } - plugins, err := ag.LoadPlugins(*fPLuginsFilter) + plugins, err := ag.LoadPlugins(pluginFilters) if err != nil { log.Fatal(err) } diff --git a/config.go b/config.go index fae445e8c..d9724e65f 100644 --- a/config.go +++ b/config.go @@ -384,15 +384,16 @@ var header2 = ` ############################################################################### ` -// PrintSampleConfig prints the sample config! -func PrintSampleConfig() { +// PrintSampleConfig prints the sample config +func PrintSampleConfig(pluginFilters []string, outputFilters []string) { fmt.Printf(header) // Print Outputs var onames []string - for oname := range outputs.Outputs { - onames = append(onames, oname) + if len(outputFilters) == 0 || sliceContains(oname, outputFilters) { + onames = append(onames, oname) + } } sort.Strings(onames) @@ -414,9 +415,10 @@ func PrintSampleConfig() { // Print Plugins var pnames []string - for pname := range plugins.Plugins { - pnames = append(pnames, pname) + if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) { + pnames = append(pnames, pname) + } } sort.Strings(pnames) @@ -435,6 +437,15 @@ func PrintSampleConfig() { } } +func sliceContains(name string, list []string) bool { + for _, b := range list { + if b == name { + return true + } + } + return false +} + // PrintPluginConfig prints the config usage of a single plugin. func PrintPluginConfig(name string) error { if creator, ok := plugins.Plugins[name]; ok { diff --git a/testdata/telegraf-agent.toml b/testdata/telegraf-agent.toml index 13c059983..ef0aefa8e 100644 --- a/testdata/telegraf-agent.toml +++ b/testdata/telegraf-agent.toml @@ -1,8 +1,5 @@ # Telegraf configuration -# If this file is missing an [agent] section, you must first generate a -# valid config with 'telegraf -sample-config > telegraf.toml' - # Telegraf is entirely plugin driven. All metrics are gathered from the # declared plugins. @@ -22,245 +19,311 @@ # NOTE: The configuration has a few required parameters. They are marked # with 'required'. Be sure to edit those to make this configuration work. -# Configuration for influxdb server to send metrics to -[outputs] -[outputs.influxdb] -# The full HTTP endpoint URL for your InfluxDB instance -url = "http://localhost:8086" # required. - -# The target database for metrics. This database must already exist -database = "telegraf" # required. - -# Connection timeout (for the connection with InfluxDB), formatted as a string. -# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -# If not provided, will default to 0 (no timeout) -# timeout = "5s" - -# username = "telegraf" -# password = "metricsmetricsmetricsmetrics" - -# Set the user agent for the POSTs (can be useful for log differentiation) -# user_agent = "telegraf" - # Tags can also be specified via a normal map, but only one form at a time: +[tags] + # dc = "us-east-1" -# [tags] -# dc = "us-east-1" +# Configuration for telegraf agent +[agent] + # Default data collection interval for all plugins + interval = "10s" -# Configuration for telegraf itself -# [agent] -# interval = "10s" -# debug = false -# hostname = "prod3241" + # If utc = false, uses local time (utc is highly recommended) + utc = true -# PLUGINS + # Precision of writes, valid values are n, u, ms, s, m, and h + # note: using second precision greatly helps InfluxDB compression + precision = "s" + + # run telegraf in debug mode + debug = false + + # Override default hostname, if empty use os.Hostname() + hostname = "" + + +############################################################################### +# OUTPUTS # +############################################################################### + +[outputs] + +# Configuration for influxdb server to send metrics to +[outputs.influxdb] + # The full HTTP endpoint URL for your InfluxDB instance + # Multiple urls can be specified for InfluxDB cluster support. Server to + # write to will be randomly chosen each interval. + urls = ["http://localhost:8086"] # required. + + # The target database for metrics. This database must already exist + database = "telegraf" # required. + + # Connection timeout (for the connection with InfluxDB), formatted as a string. + # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + # If not provided, will default to 0 (no timeout) + # timeout = "5s" + + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + # Set the user agent for the POSTs (can be useful for log differentiation) + # user_agent = "telegraf" + +# Configuration for the Kafka server to send metrics to +[outputs.kafka] + # URLs of kafka brokers + brokers = ["localhost:9092"] + # Kafka topic for producer messages + topic = "telegraf" + # Telegraf tag to use as a routing key + # ie, if this tag exists, it's value will be used as the routing key + routing_tag = "host" + + +############################################################################### +# PLUGINS # +############################################################################### + +# Read Apache status information (mod_status) +[apache] +# An array of Apache status URI to gather stats. +urls = ["http://localhost/server-status?auto"] # Read metrics about cpu usage [cpu] -totalcpu = true -percpu = false + # Whether to report per-cpu stats or not + percpu = true + # Whether to report total system cpu stats or not + totalcpu = true + # Comment this line if you want the raw CPU time metrics + drop = ["cpu_time"] # Read metrics about disk usage by mount point [disk] - # no configuration + # no configuration # Read metrics from one or many disque servers [disque] - -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, -# 10.0.0.1:10000, etc. -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"] + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, + # 10.0.0.1:10000, etc. + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] # Read stats from one or more Elasticsearch servers or clusters [elasticsearch] + # specify a list of one or more Elasticsearch servers + servers = ["http://localhost:9200"] -# specify a list of one or more Elasticsearch servers -servers = ["http://localhost:9200"] - -# set local to false when you want to read the indices stats from all nodes -# within the cluster -local = true + # set local to false when you want to read the indices stats from all nodes + # within the cluster + local = true # Read flattened metrics from one or more commands that output JSON to stdout [exec] + # specify commands via an array of tables + [[exec.commands]] + # the command to run + command = "/usr/bin/mycollector --foo=bar" -# specify commands via an array of tables -[[exec.commands]] -# the command to run -command = "/usr/bin/mycollector --foo=bar" - -# name of the command (used as a prefix for measurements) -name = "mycollector" + # name of the command (used as a prefix for measurements) + name = "mycollector" # Read metrics of haproxy, via socket or csv stats page [haproxy] + # An array of address to gather stats about. Specify an ip on hostname + # with optional port. ie localhost, 10.10.3.33:1936, etc. + # + # If no servers are specified, then default to 127.0.0.1:1936 + servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] + # Or you can also use local socket(not work yet) + # servers = ["socket:/run/haproxy/admin.sock"] -# An array of address to gather stats about. Specify an ip on hostname -# with optional port. ie localhost, 10.10.3.33:1936, etc. -# -# If no servers are specified, then default to 127.0.0.1:1936 -servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] -# Or you can also use local socket(not work yet) -# servers = ["socket:/run/haproxy/admin.sock"] +# Read flattened metrics from one or more JSON HTTP endpoints +[httpjson] + # Specify services via an array of tables + [[httpjson.services]] + + # a name for the service being polled + name = "webserver_stats" + + # URL of each server in the service's cluster + servers = [ + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", + ] + + # HTTP method to use (case-sensitive) + method = "GET" + + # HTTP parameters (all values must be strings) + [httpjson.services.parameters] + event_type = "cpu_spike" + threshold = "0.75" # Read metrics about disk IO by device [io] - # no configuration + # no configuration # read metrics from a Kafka topic [kafka] + # topic to consume + topic = "topic_with_metrics" -# topic to consume -topic = "topic_with_metrics" + # the name of the consumer group + consumerGroupName = "telegraf_metrics_consumers" -# the name of the consumer group -consumerGroupName = "telegraf_metrics_consumers" + # an array of Zookeeper connection strings + zookeeperPeers = ["localhost:2181"] -# an array of Zookeeper connection strings -zookeeperPeers = ["localhost:2181"] - -# Batch size of points sent to InfluxDB -batchSize = 1000 + # Batch size of points sent to InfluxDB + batchSize = 1000 # Read metrics from a LeoFS Server via SNMP [leofs] - -# An array of URI to gather stats about LeoFS. -# Specify an ip or hostname with port. ie 127.0.0.1:4020 -# -# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. -servers = ["127.0.0.1:4021"] + # An array of URI to gather stats about LeoFS. + # Specify an ip or hostname with port. ie 127.0.0.1:4020 + # + # If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. + servers = ["127.0.0.1:4021"] # Read metrics from local Lustre service on OST, MDS [lustre2] - -# An array of /proc globs to search for Lustre stats -# If not specified, the default will work on Lustre 2.5.x -# -# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] -# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] + # An array of /proc globs to search for Lustre stats + # If not specified, the default will work on Lustre 2.5.x + # + # ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] + # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] # Read metrics about memory usage [mem] - # no configuration + # no configuration # Read metrics from one or many memcached servers [memcached] - -# An array of address to gather stats about. Specify an ip on hostname -# with optional port. ie localhost, 10.0.0.1:11211, etc. -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"] + # An array of address to gather stats about. Specify an ip on hostname + # with optional port. ie localhost, 10.0.0.1:11211, etc. + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] # Read metrics from one or many MongoDB servers [mongodb] - -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, -# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. -# -# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. -servers = ["127.0.0.1:27017"] + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, + # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. + # + # If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. + servers = ["127.0.0.1:27017"] # Read metrics from one or many mysql servers [mysql] - -# specify servers via a url matching: -# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] -# e.g. root:root@http://10.0.0.18/?tls=false -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"] + # specify servers via a url matching: + # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] + # e.g. + # root:root@http://10.0.0.18/?tls=false + # root:passwd@tcp(127.0.0.1:3036)/ + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] # Read metrics about network interface usage [net] - -# By default, telegraf gathers stats from any up interface (excluding loopback) -# Setting interfaces will tell it to gather these explicit interfaces, -# regardless of status. -# -# interfaces = ["eth0", ... ] + # By default, telegraf gathers stats from any up interface (excluding loopback) + # Setting interfaces will tell it to gather these explicit interfaces, + # regardless of status. + # + # interfaces = ["eth0", ... ] # Read Nginx's basic status information (ngx_http_stub_status_module) [nginx] + # An array of Nginx stub_status URI to gather stats. + urls = ["http://localhost/status"] -# An array of Nginx stub_status URI to gather stats. -urls = ["localhost/status"] +# Ping given url(s) and return statistics +[ping] + # urls to ping + urls = ["www.google.com"] # required + # number of pings to send (ping -c ) + count = 1 # required + # interval, in s, at which to ping. 0 == default (ping -i ) + ping_interval = 0.0 + # ping timeout, in s. 0 == no timeout (ping -t ) + timeout = 0.0 + # interface to send ping from (ping -I ) + interface = "" # Read metrics from one or many postgresql servers [postgresql] + # specify servers via an array of tables + [[postgresql.servers]] -# specify servers via an array of tables -[[postgresql.servers]] + # specify address via a url matching: + # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + # or a simple string: + # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # + # All connection parameters are optional. By default, the host is localhost + # and the user is the currently running user. For localhost, we default + # to sslmode=disable as well. + # + # Without the dbname parameter, the driver will default to a database + # with the same name as the user. This dbname is just for instantiating a + # connection with the server and doesn't restrict the databases we are trying + # to grab metrics for. + # -# specify address via a url matching: -# postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full] -# or a simple string: -# host=localhost user=pqotest password=... sslmode=... -# -# All connection parameters are optional. By default, the host is localhost -# and the user is the currently running user. For localhost, we default -# to sslmode=disable as well. -# + address = "sslmode=disable" -address = "sslmode=disable" + # A list of databases to pull metrics about. If not specified, metrics for all + # databases are gathered. -# A list of databases to pull metrics about. If not specified, metrics for all -# databases are gathered. + # databases = ["app_production", "blah_testing"] -# databases = ["app_production", "blah_testing"] - -# [[postgresql.servers]] -# address = "influx@remoteserver" + # [[postgresql.servers]] + # address = "influx@remoteserver" # Read metrics from one or many prometheus clients [prometheus] - -# An array of urls to scrape metrics from. -urls = ["http://localhost:9100/metrics"] + # An array of urls to scrape metrics from. + urls = ["http://localhost:9100/metrics"] # Read metrics from one or many RabbitMQ servers via the management API [rabbitmq] + # Specify servers via an array of tables + [[rabbitmq.servers]] + # name = "rmq-server-1" # optional tag + # url = "http://localhost:15672" + # username = "guest" + # password = "guest" -# Specify servers via an array of tables -[[rabbitmq.servers]] -# url = "http://localhost:15672" -# username = "guest" -# password = "guest" - -# A list of nodes to pull metrics about. If not specified, metrics for -# all nodes are gathered. -# nodes = ["rabbit@node1", "rabbit@node2"] + # A list of nodes to pull metrics about. If not specified, metrics for + # all nodes are gathered. + # nodes = ["rabbit@node1", "rabbit@node2"] # Read metrics from one or many redis servers [redis] - -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, -# 10.0.0.1:10000, etc. -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"] + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, + # 10.0.0.1:10000, etc. + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] # Read metrics from one or many RethinkDB servers [rethinkdb] - -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, -# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. -# -# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port. -servers = ["127.0.0.1:28015"] + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, + # rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. + # + # If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port. + servers = ["127.0.0.1:28015"] # Read metrics about swap memory usage [swap] - # no configuration + # no configuration -# Read metrics about system load +# Read metrics about system load & uptime [system] - # no configuration - + # no configuration