From 0dbb52f9d6d21f4b4e8785b91ce56ec95d3518a6 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 18 Feb 2016 14:26:51 -0700 Subject: [PATCH] Seems to be a toml parse bug around triple pounds --- CONTRIBUTING.md | 16 ++--- docs/DATA_FORMATS_INPUT.md | 72 +++++++++---------- docs/DATA_FORMATS_OUTPUT.md | 32 ++++----- etc/telegraf.conf | 36 +++++----- internal/config/config.go | 36 +++++----- plugins/inputs/aerospike/aerospike.go | 6 +- plugins/inputs/apache/apache.go | 2 +- plugins/inputs/bcache/bcache.go | 10 +-- plugins/inputs/couchdb/couchdb.go | 4 +- plugins/inputs/disque/disque.go | 8 +-- plugins/inputs/docker/docker.go | 8 +-- plugins/inputs/dovecot/dovecot.go | 12 ++-- plugins/inputs/elasticsearch/elasticsearch.go | 8 +-- plugins/inputs/exec/README.md | 44 ++++++------ plugins/inputs/exec/exec.go | 12 ++-- .../inputs/github_webhooks/github_webhooks.go | 2 +- plugins/inputs/haproxy/haproxy.go | 10 +-- plugins/inputs/httpjson/httpjson.go | 16 ++--- plugins/inputs/influxdb/influxdb.go | 8 +-- plugins/inputs/jolokia/jolokia.go | 12 ++-- plugins/inputs/kafka_consumer/README.md | 18 ++--- .../inputs/kafka_consumer/kafka_consumer.go | 16 ++--- plugins/inputs/leofs/leofs.go | 4 +- plugins/inputs/lustre2/lustre2.go | 6 +- plugins/inputs/mailchimp/mailchimp.go | 10 +-- plugins/inputs/memcached/memcached.go | 4 +- plugins/inputs/mongodb/mongodb.go | 10 +-- plugins/inputs/mqtt_consumer/README.md | 20 +++--- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 18 ++--- plugins/inputs/mysql/mysql.go | 16 ++--- plugins/inputs/nats_consumer/README.md | 18 ++--- plugins/inputs/nats_consumer/nats_consumer.go | 16 ++--- plugins/inputs/net_response/net_response.go | 12 ++-- plugins/inputs/nginx/nginx.go | 2 +- plugins/inputs/nsq/nsq.go | 2 +- plugins/inputs/passenger/passenger.go | 16 ++--- plugins/inputs/phpfpm/phpfpm.go | 38 +++++----- plugins/inputs/ping/ping.go | 14 ++-- plugins/inputs/postgresql/postgresql.go | 28 ++++---- plugins/inputs/powerdns/powerdns.go | 4 +- plugins/inputs/procstat/procstat.go | 10 +-- plugins/inputs/prometheus/prometheus.go | 2 +- plugins/inputs/puppetagent/puppetagent.go | 2 +- plugins/inputs/rabbitmq/rabbitmq.go | 4 +- plugins/inputs/raindrops/raindrops.go | 2 +- plugins/inputs/redis/redis.go | 16 ++--- plugins/inputs/rethinkdb/rethinkdb.go | 10 +-- plugins/inputs/sensors/sensors.go | 18 ++--- plugins/inputs/snmp/snmp.go | 10 +-- plugins/inputs/sqlserver/sqlserver.go | 12 ++-- plugins/inputs/statsd/README.md | 32 ++++----- plugins/inputs/statsd/statsd.go | 32 ++++----- plugins/inputs/system/cpu.go | 6 +- plugins/inputs/system/disk.go | 4 +- plugins/inputs/system/net.go | 8 +-- plugins/inputs/trig/trig.go | 2 +- plugins/inputs/twemproxy/twemproxy.go | 4 +- .../win_perf_counters/win_perf_counters.go | 12 ++-- plugins/inputs/zfs/zfs.go | 10 +-- plugins/inputs/zookeeper/zookeeper.go | 8 +-- plugins/outputs/amon/amon.go | 6 +- plugins/outputs/amqp/amqp.go | 26 +++---- plugins/outputs/cloudwatch/cloudwatch.go | 4 +- plugins/outputs/datadog/datadog.go | 4 +- plugins/outputs/file/file.go | 10 +-- plugins/outputs/graphite/graphite.go | 6 +- plugins/outputs/influxdb/influxdb.go | 24 +++---- plugins/outputs/kafka/kafka.go | 20 +++--- plugins/outputs/kinesis/kinesis.go | 12 ++-- plugins/outputs/librato/librato.go | 14 ++-- plugins/outputs/mqtt/mqtt.go | 20 +++--- plugins/outputs/nsq/nsq.go | 12 ++-- plugins/outputs/opentsdb/opentsdb.go | 8 +-- .../prometheus_client/prometheus_client.go | 2 +- plugins/outputs/riemann/riemann.go | 6 +- 75 files changed, 502 insertions(+), 502 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9ab185d8c..afbfbf088 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -114,10 +114,10 @@ creating the `Parser` object. You should also add the following to your SampleConfig() return: ```toml - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` @@ -244,10 +244,10 @@ instantiating and creating the `Serializer` object. You should also add the following to your SampleConfig() return: ```toml - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 3e230519b..79528a962 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -25,19 +25,19 @@ example, in the exec plugin: ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - ### Additional configuration options go here + ## Additional configuration options go here ``` Each data_format has an additional set of configuration options available, which @@ -52,16 +52,16 @@ metrics are parsed directly into Telegraf metrics. ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` @@ -97,19 +97,19 @@ For example, if you had this configuration: ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - ### List of tag names to extract from top-level of JSON server response + ## List of tag names to extract from top-level of JSON server response tag_keys = [ "my_tag_1", "my_tag_2" @@ -241,30 +241,30 @@ There are many more options available, ```toml [[inputs.exec]] - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "graphite" - ### This string will be used to join the matched values. + ## This string will be used to join the matched values. separator = "_" - ### Each template line requires a template pattern. It can have an optional - ### filter before the template and separated by spaces. It can also have optional extra - ### tags following the template. Multiple tags should be separated by commas and no spaces - ### similar to the line protocol format. There can be only one default template. - ### Templates support below format: - ### 1. filter + template - ### 2. filter + template + extra tag - ### 3. filter + template with field key - ### 4. default template + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag + ## 3. filter + template with field key + ## 4. default template templates = [ "*.app env.service.resource.measurement", "stats.* .host.measurement* region=us-west,agent=sensu", diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 30f6e63de..524ec6d66 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -26,16 +26,16 @@ config option, for example, in the `file` output plugin: ```toml [[outputs.file]] - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" - ### Additional configuration options go here + ## Additional configuration options go here ``` Each data_format has an additional set of configuration options available, which @@ -50,13 +50,13 @@ metrics are serialized directly into InfluxDB line-protocol. ```toml [[outputs.file]] - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` @@ -84,13 +84,13 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 ```toml [[outputs.file]] - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" prefix = "telegraf" diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 037f730bf..db87251d5 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -16,37 +16,37 @@ # Configuration for telegraf agent [agent] - ### Default data collection interval for all inputs + ## Default data collection interval for all inputs interval = "10s" - ### Rounds collection interval to 'interval' - ### ie, if interval="10s" then always collect on :00, :10, :20, etc. + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ### Telegraf will cache metric_buffer_limit metrics for each output, and will - ### flush this buffer on a successful write. + ## Telegraf will cache metric_buffer_limit metrics for each output, and will + ## flush this buffer on a successful write. metric_buffer_limit = 10000 - ### Flush the buffer whenever full, regardless of flush_interval. + ## Flush the buffer whenever full, regardless of flush_interval. flush_buffer_when_full = true - ### Collection jitter is used to jitter the collection by a random amount. - ### Each plugin will sleep for a random time within jitter before collecting. - ### This can be used to avoid many plugins querying things like sysfs at the - ### same time, which can have a measurable effect on the system. + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ### Default flushing interval for all outputs. You shouldn't set this below - ### interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" - ### Jitter the flush interval by a random amount. This is primarily to avoid - ### large write spikes for users running a large number of telegraf instances. - ### ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - ### Run telegraf in debug mode + ## Run telegraf in debug mode debug = false - ### Run telegraf in quiet mode + ## Run telegraf in quiet mode quiet = false - ### Override default hostname, if empty use os.Hostname() + ## Override default hostname, if empty use os.Hostname() hostname = "" diff --git a/internal/config/config.go b/internal/config/config.go index 82246f2a4..f47cf7ea7 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -153,37 +153,37 @@ var header = `################################################################## # Configuration for telegraf agent [agent] - ### Default data collection interval for all inputs + ## Default data collection interval for all inputs interval = "10s" - ### Rounds collection interval to 'interval' - ### ie, if interval="10s" then always collect on :00, :10, :20, etc. + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ### Telegraf will cache metric_buffer_limit metrics for each output, and will - ### flush this buffer on a successful write. + ## Telegraf will cache metric_buffer_limit metrics for each output, and will + ## flush this buffer on a successful write. metric_buffer_limit = 10000 - ### Flush the buffer whenever full, regardless of flush_interval. + ## Flush the buffer whenever full, regardless of flush_interval. flush_buffer_when_full = true - ### Collection jitter is used to jitter the collection by a random amount. - ### Each plugin will sleep for a random time within jitter before collecting. - ### This can be used to avoid many plugins querying things like sysfs at the - ### same time, which can have a measurable effect on the system. + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ### Default flushing interval for all outputs. You shouldn't set this below - ### interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" - ### Jitter the flush interval by a random amount. This is primarily to avoid - ### large write spikes for users running a large number of telegraf instances. - ### ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - ### Run telegraf in debug mode + ## Run telegraf in debug mode debug = false - ### Run telegraf in quiet mode + ## Run telegraf in quiet mode quiet = false - ### Override default hostname, if empty use os.Hostname() + ## Override default hostname, if empty use os.Hostname() hostname = "" diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index e46960101..cd2ebe25c 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -104,9 +104,9 @@ type Aerospike struct { } var sampleConfig = ` - ### Aerospike servers to connect to (with port) - ### This plugin will query all namespaces the aerospike - ### server has configured and get stats for them. + ## Aerospike servers to connect to (with port) + ## This plugin will query all namespaces the aerospike + ## server has configured and get stats for them. servers = ["localhost:3000"] ` diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index faedf7f7d..b6e3e50f1 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -20,7 +20,7 @@ type Apache struct { } var sampleConfig = ` - ### An array of Apache status URI to gather stats. + ## An array of Apache status URI to gather stats. urls = ["http://localhost/server-status?auto"] ` diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 25005fce3..1171dbd92 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -18,13 +18,13 @@ type Bcache struct { } var sampleConfig = ` - ### Bcache sets path - ### If not specified, then default is: + ## Bcache sets path + ## If not specified, then default is: bcachePath = "/sys/fs/bcache" - ### By default, telegraf gather stats for all bcache devices - ### Setting devices will restrict the stats to the specified - ### bcache devices. + ## By default, telegraf gather stats for all bcache devices + ## Setting devices will restrict the stats to the specified + ## bcache devices. bcacheDevs = ["bcache0"] ` diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index 7cec65777..ba64e4a6d 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -75,8 +75,8 @@ func (*CouchDB) Description() string { func (*CouchDB) SampleConfig() string { return ` - ### Works with CouchDB stats endpoints out of the box - ### Multiple HOSTs from which to read CouchDB stats: + ## Works with CouchDB stats endpoints out of the box + ## Multiple HOSTs from which to read CouchDB stats: hosts = ["http://localhost:8086/_stats"] ` } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 51457bec1..a311b6739 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -22,11 +22,11 @@ type Disque struct { } var sampleConfig = ` - ### An array of URI to gather stats about. Specify an ip or hostname - ### with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, - ### 10.0.0.1:10000, etc. + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. - ### If no servers are specified, then localhost is used as the host. + ## If no servers are specified, then localhost is used as the host. servers = ["localhost"] ` diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 60abf71d1..0d89979c1 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -21,11 +21,11 @@ type Docker struct { } var sampleConfig = ` - ### Docker Endpoint - ### To use TCP, set endpoint = "tcp://[ip]:[port]" - ### To use environment variables (ie, docker-machine), set endpoint = "ENV" + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" endpoint = "unix:///var/run/docker.sock" - ### Only collect metrics for these containers, collect all if empty + ## Only collect metrics for these containers, collect all if empty container_names = [] ` diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index de9ef0cfe..75829f595 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -24,13 +24,13 @@ func (d *Dovecot) Description() string { } var sampleConfig = ` - ### specify dovecot servers via an address:port list - ### e.g. - ### localhost:24242 - ### - ### If no servers are specified, then localhost is used as the host. + ## specify dovecot servers via an address:port list + ## e.g. + ## localhost:24242 + ## + ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] - ### Only collect metrics for these domains, collect all if empty + ## Only collect metrics for these domains, collect all if empty domains = [] ` diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 8c2c055cb..aae97f4d7 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -59,14 +59,14 @@ type indexHealth struct { } const sampleConfig = ` - ### specify a list of one or more Elasticsearch servers + ## specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] - ### set local to false when you want to read the indices stats from all nodes - ### within the cluster + ## set local to false when you want to read the indices stats from all nodes + ## within the cluster local = true - ### set cluster_health to true when you want to also obtain cluster level stats + ## set cluster_health to true when you want to also obtain cluster level stats cluster_health = false ` diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index daf800db3..eddc86ada 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -37,19 +37,19 @@ and strings will be ignored. # measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Below configuration will be used for data_format = "graphite", can be ignored for other data_format - ### If matching multiple measurement files, this string will be used to join the matched values. + ## Below configuration will be used for data_format = "graphite", can be ignored for other data_format + ## If matching multiple measurement files, this string will be used to join the matched values. #separator = "." - ### Each template line requires a template pattern. It can have an optional - ### filter before the template and separated by spaces. It can also have optional extra - ### tags following the template. Multiple tags should be separated by commas and no spaces - ### similar to the line protocol format. The can be only one default template. - ### Templates support below format: - ### 1. filter + template - ### 2. filter + template + extra tag - ### 3. filter + template with field key - ### 4. default template + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. The can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag + ## 3. filter + template with field key + ## 4. default template #templates = [ # "*.app env.service.resource.measurement", # "stats.* .host.measurement* region=us-west,agent=sensu", @@ -141,19 +141,19 @@ We can also change the data_format to "graphite" to use the metrics collecting s # measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Below configuration will be used for data_format = "graphite", can be ignored for other data_format - ### If matching multiple measurement files, this string will be used to join the matched values. + ## Below configuration will be used for data_format = "graphite", can be ignored for other data_format + ## If matching multiple measurement files, this string will be used to join the matched values. separator = "." - ### Each template line requires a template pattern. It can have an optional - ### filter before the template and separated by spaces. It can also have optional extra - ### tags following the template. Multiple tags should be separated by commas and no spaces - ### similar to the line protocol format. The can be only one default template. - ### Templates support below format: - ### 1. filter + template - ### 2. filter + template + extra tag - ### 3. filter + template with field key - ### 4. default template + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. The can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag + ## 3. filter + template with field key + ## 4. default template templates = [ "*.app env.service.resource.measurement", "stats.* .host.measurement* region=us-west,agent=sensu", diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 86309bf73..5231fd013 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -14,16 +14,16 @@ import ( ) const sampleConfig = ` - ### Commands array + ## Commands array commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - ### measurement name suffix (for separating different commands) + ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/github_webhooks/github_webhooks.go b/plugins/inputs/github_webhooks/github_webhooks.go index 6dc97f5a3..bc3f184be 100644 --- a/plugins/inputs/github_webhooks/github_webhooks.go +++ b/plugins/inputs/github_webhooks/github_webhooks.go @@ -31,7 +31,7 @@ func NewGithubWebhooks() *GithubWebhooks { func (gh *GithubWebhooks) SampleConfig() string { return ` - ### Address and port to host Webhook listener on + ## Address and port to host Webhook listener on service_address = ":1618" ` } diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 92969a057..233cd8481 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -86,13 +86,13 @@ type haproxy struct { } var sampleConfig = ` - ### An array of address to gather stats about. Specify an ip on hostname - ### with optional port. ie localhost, 10.10.3.33:1936, etc. + ## An array of address to gather stats about. Specify an ip on hostname + ## with optional port. ie localhost, 10.10.3.33:1936, etc. - ### If no servers are specified, then default to 127.0.0.1:1936 + ## If no servers are specified, then default to 127.0.0.1:1936 servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] - ### Or you can also use local socket(not work yet) - ### servers = ["socket://run/haproxy/admin.sock"] + ## Or you can also use local socket(not work yet) + ## servers = ["socket://run/haproxy/admin.sock"] ` func (r *haproxy) SampleConfig() string { diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index fa6ab70db..d5dddd7d4 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -47,33 +47,33 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { } var sampleConfig = ` - ### NOTE This plugin only reads numerical measurements, strings and booleans - ### will be ignored. + ## NOTE This plugin only reads numerical measurements, strings and booleans + ## will be ignored. - ### a name for the service being polled + ## a name for the service being polled name = "webserver_stats" - ### URL of each server in the service's cluster + ## URL of each server in the service's cluster servers = [ "http://localhost:9999/stats/", "http://localhost:9998/stats/", ] - ### HTTP method to use: GET or POST (case-sensitive) + ## HTTP method to use: GET or POST (case-sensitive) method = "GET" - ### List of tag names to extract from top-level of JSON server response + ## List of tag names to extract from top-level of JSON server response # tag_keys = [ # "my_tag_1", # "my_tag_2" # ] - ### HTTP parameters (all values must be strings) + ## HTTP parameters (all values must be strings) [inputs.httpjson.parameters] event_type = "cpu_spike" threshold = "0.75" - ### HTTP Header parameters (all values must be strings) + ## HTTP Header parameters (all values must be strings) # [inputs.httpjson.headers] # X-Auth-Token = "my-xauth-token" # apiVersion = "v1" diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index b12990cf1..63a3c1854 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -22,11 +22,11 @@ func (*InfluxDB) Description() string { func (*InfluxDB) SampleConfig() string { return ` - ### Works with InfluxDB debug endpoints out of the box, - ### but other services can use this format too. - ### See the influxdb plugin's README for more details. + ## Works with InfluxDB debug endpoints out of the box, + ## but other services can use this format too. + ## See the influxdb plugin's README for more details. - ### Multiple URLs from which to read InfluxDB-formatted JSON + ## Multiple URLs from which to read InfluxDB-formatted JSON urls = [ "http://localhost:8086/debug/vars" ] diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 77546006f..2e0bba6d5 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -46,10 +46,10 @@ type Jolokia struct { func (j *Jolokia) SampleConfig() string { return ` - ### This is the context root used to compose the jolokia url + ## This is the context root used to compose the jolokia url context = "/jolokia/read" - ### List of servers exposing jolokia read service + ## List of servers exposing jolokia read service [[inputs.jolokia.servers]] name = "stable" host = "192.168.103.2" @@ -57,10 +57,10 @@ func (j *Jolokia) SampleConfig() string { # username = "myuser" # password = "mypassword" - ### List of metrics collected on above servers - ### Each metric consists in a name, a jmx path and either - ### a pass or drop slice attribute. - ### This collect all heap memory usage metrics. + ## List of metrics collected on above servers + ## Each metric consists in a name, a jmx path and either + ## a pass or drop slice attribute. + ## This collect all heap memory usage metrics. [[inputs.jolokia.metrics]] name = "heap_memory_usage" jmx = "/java.lang:type=Memory/HeapMemoryUsage" diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 2e9d8cf3d..885c67a28 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -11,21 +11,21 @@ from the same topic in parallel. ```toml # Read metrics from Kafka topic(s) [[inputs.kafka_consumer]] - ### topic(s) to consume + ## topic(s) to consume topics = ["telegraf"] - ### an array of Zookeeper connection strings + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] - ### the name of the consumer group + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" - ### Maximum number of metrics to buffer between collection intervals + ## Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - ### Offset (must be either "oldest" or "newest") + ## Offset (must be either "oldest" or "newest") offset = "oldest" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 98f2b2990..bc0d225c6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -44,19 +44,19 @@ type Kafka struct { } var sampleConfig = ` - ### topic(s) to consume + ## topic(s) to consume topics = ["telegraf"] - ### an array of Zookeeper connection strings + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] - ### the name of the consumer group + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" - ### Offset (must be either "oldest" or "newest") + ## Offset (must be either "oldest" or "newest") offset = "oldest" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 4a52706b3..f4910ad0c 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -132,8 +132,8 @@ var serverTypeMapping = map[string]ServerType{ } var sampleConfig = ` - ### An array of URI to gather stats about LeoFS. - ### Specify an ip or hostname with port. ie 127.0.0.1:4020 + ## An array of URI to gather stats about LeoFS. + ## Specify an ip or hostname with port. ie 127.0.0.1:4020 servers = ["127.0.0.1:4021"] ` diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 26d0e3702..6ac41d391 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -29,9 +29,9 @@ type Lustre2 struct { } var sampleConfig = ` - ### An array of /proc globs to search for Lustre stats - ### If not specified, the default will work on Lustre 2.5.x - ### + ## An array of /proc globs to search for Lustre stats + ## If not specified, the default will work on Lustre 2.5.x + ## # ost_procfiles = [ # "/proc/fs/lustre/obdfilter/*/stats", # "/proc/fs/lustre/osd-ldiskfs/*/stats" diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index 290c01bfd..d7255191a 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -17,13 +17,13 @@ type MailChimp struct { } var sampleConfig = ` - ### MailChimp API key - ### get from https://admin.mailchimp.com/account/api/ + ## MailChimp API key + ## get from https://admin.mailchimp.com/account/api/ api_key = "" # required - ### Reports for campaigns sent more than days_old ago will not be collected. - ### 0 means collect all. + ## Reports for campaigns sent more than days_old ago will not be collected. + ## 0 means collect all. days_old = 0 - ### Campaign ID to get, if empty gets all campaigns, this option overrides days_old + ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old # campaign_id = "" ` diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 19654937c..24ff09d77 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -19,8 +19,8 @@ type Memcached struct { } var sampleConfig = ` - ### An array of address to gather stats about. Specify an ip on hostname - ### with optional port. ie localhost, 10.0.0.1:11211, etc. + ## An array of address to gather stats about. Specify an ip on hostname + ## with optional port. ie localhost, 10.0.0.1:11211, etc. servers = ["localhost:11211"] # unix_sockets = ["/var/run/memcached.sock"] ` diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 4054ccd54..3be04477b 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -26,11 +26,11 @@ type Ssl struct { } var sampleConfig = ` - ### An array of URI to gather stats about. Specify an ip or hostname - ### with optional port add password. ie, - ### mongodb://user:auth_key@10.10.3.30:27017, - ### mongodb://10.10.3.33:18832, - ### 10.0.0.1:10000, etc. + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port add password. ie, + ## mongodb://user:auth_key@10.10.3.30:27017, + ## mongodb://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. servers = ["127.0.0.1:27017"] ` diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 07a64e901..787494975 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -11,34 +11,34 @@ The plugin expects messages in the # Read metrics from MQTT topic(s) [[inputs.mqtt_consumer]] servers = ["localhost:1883"] - ### MQTT QoS, must be 0, 1, or 2 + ## MQTT QoS, must be 0, 1, or 2 qos = 0 - ### Topics to subscribe to + ## Topics to subscribe to topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - ### Maximum number of metrics to buffer between collection intervals + ## Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - ### username and password to connect MQTT server. + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index e9a7ef8b1..2d0fbef06 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -47,31 +47,31 @@ type MQTTConsumer struct { var sampleConfig = ` servers = ["localhost:1883"] - ### MQTT QoS, must be 0, 1, or 2 + ## MQTT QoS, must be 0, 1, or 2 qos = 0 - ### Topics to subscribe to + ## Topics to subscribe to topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - ### username and password to connect MQTT server. + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 272baddb1..b2e2729a9 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -15,14 +15,14 @@ type Mysql struct { } var sampleConfig = ` - ### specify servers via a url matching: - ### [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] - ### see https://github.com/go-sql-driver/mysql#dsn-data-source-name - ### e.g. - ### root:passwd@tcp(127.0.0.1:3306)/?tls=false - ### root@tcp(127.0.0.1:3306)/?tls=false - ### - ### If no servers are specified, then localhost is used as the host. + ## specify servers via a url matching: + ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] + ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name + ## e.g. + ## root:passwd@tcp(127.0.0.1:3306)/?tls=false + ## root@tcp(127.0.0.1:3306)/?tls=false + ## + ## If no servers are specified, then localhost is used as the host. servers = ["tcp(127.0.0.1:3306)/"] ` diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 42993e813..90563ff55 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -12,20 +12,20 @@ from a NATS cluster in parallel. ```toml # Read metrics from NATS subject(s) [[inputs.nats_consumer]] - ### urls of NATS servers + ## urls of NATS servers servers = ["nats://localhost:4222"] - ### Use Transport Layer Security + ## Use Transport Layer Security secure = false - ### subject(s) to consume + ## subject(s) to consume subjects = ["telegraf"] - ### name a queue group + ## name a queue group queue_group = "telegraf_consumers" - ### Maximum number of metrics to buffer between collection intervals + ## Maximum number of metrics to buffer between collection intervals metric_buffer = 100000 - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index c0bf50849..235601100 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -46,19 +46,19 @@ type natsConsumer struct { } var sampleConfig = ` - ### urls of NATS servers + ## urls of NATS servers servers = ["nats://localhost:4222"] - ### Use Transport Layer Security + ## Use Transport Layer Security secure = false - ### subject(s) to consume + ## subject(s) to consume subjects = ["telegraf"] - ### name a queue group + ## name a queue group queue_group = "telegraf_consumers" - ### Data format to consume. This can be "json", "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## Data format to consume. This can be "json", "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ` diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 60468c157..66bf2ae7b 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -27,17 +27,17 @@ func (_ *NetResponse) Description() string { } var sampleConfig = ` - ### Protocol, must be "tcp" or "udp" + ## Protocol, must be "tcp" or "udp" protocol = "tcp" - ### Server address (default localhost) + ## Server address (default localhost) address = "github.com:80" - ### Set timeout (default 1.0 seconds) + ## Set timeout (default 1.0 seconds) timeout = 1.0 - ### Set read timeout (default 1.0 seconds) + ## Set read timeout (default 1.0 seconds) read_timeout = 1.0 - ### Optional string sent to the server + ## Optional string sent to the server # send = "ssh" - ### Optional expected string in answer + ## Optional expected string in answer # expect = "ssh" ` diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 4ceca01f2..3b008fbf3 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -20,7 +20,7 @@ type Nginx struct { } var sampleConfig = ` - ### An array of Nginx stub_status URI to gather stats. + ## An array of Nginx stub_status URI to gather stats. urls = ["http://localhost/status"] ` diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 1cf7d4dcc..6b3be66f2 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -41,7 +41,7 @@ type NSQ struct { } var sampleConfig = ` - ### An array of NSQD HTTP API endpoints + ## An array of NSQD HTTP API endpoints endpoints = ["http://localhost:4151"] ` diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index 802107f4f..84e92cb1a 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -126,14 +126,14 @@ func (p *process) getUptime() int64 { } var sampleConfig = ` - ### Path of passenger-status. - ### - ### Plugin gather metric via parsing XML output of passenger-status - ### More information about the tool: - ### https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html - ### - ### If no path is specified, then the plugin simply execute passenger-status - ### hopefully it can be found in your PATH + ## Path of passenger-status. + ## + ## Plugin gather metric via parsing XML output of passenger-status + ## More information about the tool: + ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html + ## + ## If no path is specified, then the plugin simply execute passenger-status + ## hopefully it can be found in your PATH command = "passenger-status -v --show=xml" ` diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 157f87691..c07262342 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -41,25 +41,25 @@ type phpfpm struct { } var sampleConfig = ` - ### An array of addresses to gather stats about. Specify an ip or hostname - ### with optional port and path - ### - ### Plugin can be configured in three modes (either can be used): - ### - http: the URL must start with http:// or https://, ie: - ### "http://localhost/status" - ### "http://192.168.130.1/status?full" - ### - ### - unixsocket: path to fpm socket, ie: - ### "/var/run/php5-fpm.sock" - ### or using a custom fpm status path: - ### "/var/run/php5-fpm.sock:fpm-custom-status-path" - ### - ### - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: - ### "fcgi://10.0.0.12:9000/status" - ### "cgi://10.0.10.12:9001/status" - ### - ### Example of multiple gathering from local socket and remove host - ### urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] + ## An array of addresses to gather stats about. Specify an ip or hostname + ## with optional port and path + ## + ## Plugin can be configured in three modes (either can be used): + ## - http: the URL must start with http:// or https://, ie: + ## "http://localhost/status" + ## "http://192.168.130.1/status?full" + ## + ## - unixsocket: path to fpm socket, ie: + ## "/var/run/php5-fpm.sock" + ## or using a custom fpm status path: + ## "/var/run/php5-fpm.sock:fpm-custom-status-path" + ## + ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: + ## "fcgi://10.0.0.12:9000/status" + ## "cgi://10.0.10.12:9001/status" + ## + ## Example of multiple gathering from local socket and remove host + ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] urls = ["http://localhost/status"] ` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index ab5df6e82..1798a5eb7 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -44,18 +44,18 @@ func (_ *Ping) Description() string { } var sampleConfig = ` - ### NOTE: this plugin forks the ping command. You may need to set capabilities - ### via setcap cap_net_raw+p /bin/ping + ## NOTE: this plugin forks the ping command. You may need to set capabilities + ## via setcap cap_net_raw+p /bin/ping - ### urls to ping + ## urls to ping urls = ["www.google.com"] # required - ### number of pings to send (ping -c ) + ## number of pings to send (ping -c ) count = 1 # required - ### interval, in s, at which to ping. 0 == default (ping -i ) + ## interval, in s, at which to ping. 0 == default (ping -i ) ping_interval = 0.0 - ### ping timeout, in s. 0 == no timeout (ping -t ) + ## ping timeout, in s. 0 == no timeout (ping -t ) timeout = 0.0 - ### interface to send ping from (ping -I ) + ## interface to send ping from (ping -I ) interface = "" ` diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 660f1b318..fe2a56576 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -23,22 +23,22 @@ type Postgresql struct { var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var sampleConfig = ` - ### specify address via a url matching: - ### postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] - ### or a simple string: - ### host=localhost user=pqotest password=... sslmode=... dbname=app_production - ### - ### All connection parameters are optional. - ### - ### Without the dbname parameter, the driver will default to a database - ### with the same name as the user. This dbname is just for instantiating a - ### connection with the server and doesn't restrict the databases we are trying - ### to grab metrics for. - ### + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + ## address = "host=localhost user=postgres sslmode=disable" - ### A list of databases to pull metrics about. If not specified, metrics for all - ### databases are gathered. + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. # databases = ["app_production", "testing"] ` diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index f011f8716..0824ff672 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -18,8 +18,8 @@ type Powerdns struct { } var sampleConfig = ` - ### An array of sockets to gather stats about. - ### Specify a path to unix socket. + ## An array of sockets to gather stats about. + ## Specify a path to unix socket. unix_sockets = ["/var/run/pdns.controlsocket"] ` diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 6305416b7..d3f18d5ea 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -30,15 +30,15 @@ func NewProcstat() *Procstat { } var sampleConfig = ` - ### Must specify one of: pid_file, exe, or pattern - ### PID file to monitor process + ## Must specify one of: pid_file, exe, or pattern + ## PID file to monitor process pid_file = "/var/run/nginx.pid" - ### executable name (ie, pgrep ) + ## executable name (ie, pgrep ) # exe = "nginx" - ### pattern as argument for pgrep (ie, pgrep -f ) + ## pattern as argument for pgrep (ie, pgrep -f ) # pattern = "nginx" - ### Field name prefix + ## Field name prefix prefix = "" ` diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index aea5c5f95..188e6b914 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -17,7 +17,7 @@ type Prometheus struct { } var sampleConfig = ` - ### An array of urls to scrape metrics from. + ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] ` diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 882b1e3b8..f66aa989f 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -18,7 +18,7 @@ type PuppetAgent struct { } var sampleConfig = ` - ### Location of puppet last run summary file + ## Location of puppet last run summary file location = "/var/lib/puppet/state/last_run_summary.yaml" ` diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 8b287204f..e51d65e15 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -107,8 +107,8 @@ var sampleConfig = ` # username = "guest" # password = "guest" - ### A list of nodes to pull metrics about. If not specified, metrics for - ### all nodes are gathered. + ## A list of nodes to pull metrics about. If not specified, metrics for + ## all nodes are gathered. # nodes = ["rabbit@node1", "rabbit@node2"] ` diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 572422f59..fed22b693 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -21,7 +21,7 @@ type Raindrops struct { } var sampleConfig = ` - ### An array of raindrops middleware URI to gather stats. + ## An array of raindrops middleware URI to gather stats. urls = ["http://localhost:8080/_raindrops"] ` diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 88420beac..b8862f6bc 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -19,14 +19,14 @@ type Redis struct { } var sampleConfig = ` - ### specify servers via a url matching: - ### [protocol://][:password]@address[:port] - ### e.g. - ### tcp://localhost:6379 - ### tcp://:password@192.168.99.100 - ### - ### If no servers are specified, then localhost is used as the host. - ### If no port is specified, 6379 is used + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:6379 + ## tcp://:password@192.168.99.100 + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] ` diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 94d31fe5f..32237a80f 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -16,11 +16,11 @@ type RethinkDB struct { } var sampleConfig = ` - ### An array of URI to gather stats about. Specify an ip or hostname - ### with optional port add password. ie, - ### rethinkdb://user:auth_key@10.10.3.30:28105, - ### rethinkdb://10.10.3.33:18832, - ### 10.0.0.1:10000, etc. + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port add password. ie, + ## rethinkdb://user:auth_key@10.10.3.30:28105, + ## rethinkdb://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. servers = ["127.0.0.1:28015"] ` diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 82cc7df89..b2c2919cc 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -20,15 +20,15 @@ func (_ *Sensors) Description() string { } var sensorsSampleConfig = ` - ### By default, telegraf gathers stats from all sensors detected by the - ### lm-sensors module. - ### - ### Only collect stats from the selected sensors. Sensors are listed as - ### :. This information can be found by running the - ### sensors command, e.g. sensors -u - ### - ### A * as the feature name will return all features of the chip - ### + ## By default, telegraf gathers stats from all sensors detected by the + ## lm-sensors module. + ## + ## Only collect stats from the selected sensors. Sensors are listed as + ## :. This information can be found by running the + ## sensors command, e.g. sensors -u + ## + ## A * as the feature name will return all features of the chip + ## # sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"] ` diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 1932fed41..371bc2ad9 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -72,11 +72,11 @@ var initNode = Node{ var NameToOid = make(map[string]string) var sampleConfig = ` - ### Use 'oids.txt' file to translate oids to names - ### To generate 'oids.txt' you need to run: - ### snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ### Or if you have an other MIB folder with custom MIBs - ### snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt snmptranslate_file = "/tmp/oids.txt" [[inputs.snmp.host]] address = "192.168.2.2:161" diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 83d88b3c2..3b29a32c1 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -31,12 +31,12 @@ var queries MapQuery var defaultServer = "Server=.;app name=telegraf;log=1;" var sampleConfig = ` - ### Specify instances to monitor with a list of connection strings. - ### All connection parameters are optional. - ### By default, the host is localhost, listening on default port, TCP 1433. - ### for Windows, the user is the currently running AD user (SSO). - ### See https://github.com/denisenkom/go-mssqldb for detailed connection - ### parameters. + ## Specify instances to monitor with a list of connection strings. + ## All connection parameters are optional. + ## By default, the host is localhost, listening on default port, TCP 1433. + ## for Windows, the user is the currently running AD user (SSO). + ## See https://github.com/denisenkom/go-mssqldb for detailed connection + ## parameters. # servers = [ # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 294c12b84..5bb18657c 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -5,39 +5,39 @@ ```toml # Statsd Server [[inputs.statsd]] - ### Address and port to host UDP listener on + ## Address and port to host UDP listener on service_address = ":8125" - ### Delete gauges every interval (default=false) + ## Delete gauges every interval (default=false) delete_gauges = false - ### Delete counters every interval (default=false) + ## Delete counters every interval (default=false) delete_counters = false - ### Delete sets every interval (default=false) + ## Delete sets every interval (default=false) delete_sets = false - ### Delete timings & histograms every interval (default=true) + ## Delete timings & histograms every interval (default=true) delete_timings = true - ### Percentiles to calculate for timing & histogram stats + ## Percentiles to calculate for timing & histogram stats percentiles = [90] - ### convert measurement names, "." to "_" and "-" to "__" + ## convert measurement names, "." to "_" and "-" to "__" convert_names = true - ### Statsd data translation templates, more info can be read here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## Statsd data translation templates, more info can be read here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ # "cpu.* measurement*" # ] - ### Number of UDP messages allowed to queue up, once filled, - ### the statsd server will start dropping packets + ## Number of UDP messages allowed to queue up, once filled, + ## the statsd server will start dropping packets allowed_pending_messages = 10000 - ### Number of timing/histogram values to track per-measurement in the - ### calculation of percentiles. Raising this limit increases the accuracy - ### of percentiles but also increases the memory usage and cpu time. + ## Number of timing/histogram values to track per-measurement in the + ## calculation of percentiles. Raising this limit increases the accuracy + ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 - ### UDP packet size for the server to listen for. This will depend on the size - ### of the packets that the client is sending, which is usually 1500 bytes. + ## UDP packet size for the server to listen for. This will depend on the size + ## of the packets that the client is sending, which is usually 1500 bytes. udp_packet_size = 1500 ``` diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index bc792149a..830e9d25c 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -123,39 +123,39 @@ func (_ *Statsd) Description() string { } const sampleConfig = ` - ### Address and port to host UDP listener on + ## Address and port to host UDP listener on service_address = ":8125" - ### Delete gauges every interval (default=false) + ## Delete gauges every interval (default=false) delete_gauges = false - ### Delete counters every interval (default=false) + ## Delete counters every interval (default=false) delete_counters = false - ### Delete sets every interval (default=false) + ## Delete sets every interval (default=false) delete_sets = false - ### Delete timings & histograms every interval (default=true) + ## Delete timings & histograms every interval (default=true) delete_timings = true - ### Percentiles to calculate for timing & histogram stats + ## Percentiles to calculate for timing & histogram stats percentiles = [90] - ### convert measurement names, "." to "_" and "-" to "__" + ## convert measurement names, "." to "_" and "-" to "__" convert_names = true - ### Statsd data translation templates, more info can be read here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## Statsd data translation templates, more info can be read here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # templates = [ # "cpu.* measurement*" # ] - ### Number of UDP messages allowed to queue up, once filled, - ### the statsd server will start dropping packets + ## Number of UDP messages allowed to queue up, once filled, + ## the statsd server will start dropping packets allowed_pending_messages = 10000 - ### Number of timing/histogram values to track per-measurement in the - ### calculation of percentiles. Raising this limit increases the accuracy - ### of percentiles but also increases the memory usage and cpu time. + ## Number of timing/histogram values to track per-measurement in the + ## calculation of percentiles. Raising this limit increases the accuracy + ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 - ### UDP packet size for the server to listen for. This will depend on the size - ### of the packets that the client is sending, which is usually 1500 bytes. + ## UDP packet size for the server to listen for. This will depend on the size + ## of the packets that the client is sending, which is usually 1500 bytes. udp_packet_size = 1500 ` diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index 47b3368a7..bef2a28f4 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -28,11 +28,11 @@ func (_ *CPUStats) Description() string { } var sampleConfig = ` - ### Whether to report per-cpu stats or not + ## Whether to report per-cpu stats or not percpu = true - ### Whether to report total system cpu stats or not + ## Whether to report total system cpu stats or not totalcpu = true - ### Comment this line if you want the raw CPU time metrics + ## Comment this line if you want the raw CPU time metrics drop = ["time_*"] ` diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index b8c611427..0488c839a 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -21,8 +21,8 @@ func (_ *DiskStats) Description() string { } var diskSampleConfig = ` - ### By default, telegraf gather stats for all mountpoints. - ### Setting mountpoints will restrict the stats to the specified mountpoints. + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. # mount_points = ["/"] ` diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go index ea8b66266..f6bc05818 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/system/net.go @@ -21,10 +21,10 @@ func (_ *NetIOStats) Description() string { } var netSampleConfig = ` - ### By default, telegraf gathers stats from any up interface (excluding loopback) - ### Setting interfaces will tell it to gather these explicit interfaces, - ### regardless of status. - ### + ## By default, telegraf gathers stats from any up interface (excluding loopback) + ## Setting interfaces will tell it to gather these explicit interfaces, + ## regardless of status. + ## # interfaces = ["eth0"] ` diff --git a/plugins/inputs/trig/trig.go b/plugins/inputs/trig/trig.go index 51879dfc1..647794f0a 100644 --- a/plugins/inputs/trig/trig.go +++ b/plugins/inputs/trig/trig.go @@ -13,7 +13,7 @@ type Trig struct { } var TrigConfig = ` - ### Set the amplitude + ## Set the amplitude amplitude = 10.0 ` diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index d5ae12dee..cda56943f 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -17,9 +17,9 @@ type Twemproxy struct { } var sampleConfig = ` - ### Twemproxy stats address and port (no scheme) + ## Twemproxy stats address and port (no scheme) addr = "localhost:22222" - ### Monitor pool name + ## Monitor pool name pools = ["redis_pool", "mc_pool"] ` diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index e243588a6..8279f1c7a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -14,12 +14,12 @@ import ( ) var sampleConfig string = ` - ### By default this plugin returns basic CPU and Disk statistics. - ### See the README file for more examples. - ### Uncomment examples below or write your own as you see fit. If the system - ### being polled for data does not have the Object at startup of the Telegraf - ### agent, it will not be gathered. - ### Settings: + ## By default this plugin returns basic CPU and Disk statistics. + ## See the README file for more examples. + ## Uncomment examples below or write your own as you see fit. If the system + ## being polled for data does not have the Object at startup of the Telegraf + ## agent, it will not be gathered. + ## Settings: # PrintValid = false # Print All matching performance counters [[inputs.win_perf_counters.object]] diff --git a/plugins/inputs/zfs/zfs.go b/plugins/inputs/zfs/zfs.go index 57d1fece4..bcbe03e95 100644 --- a/plugins/inputs/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -23,15 +23,15 @@ type poolInfo struct { } var sampleConfig = ` - ### ZFS kstat path - ### If not specified, then default is: + ## ZFS kstat path + ## If not specified, then default is: kstatPath = "/proc/spl/kstat/zfs" - ### By default, telegraf gather all zfs stats - ### If not specified, then default is: + ## By default, telegraf gather all zfs stats + ## If not specified, then default is: kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] - ### By default, don't gather zpool stats + ## By default, don't gather zpool stats poolMetrics = false ` diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index b18757cd6..0f2b2e06f 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -20,11 +20,11 @@ type Zookeeper struct { } var sampleConfig = ` - ### An array of address to gather stats about. Specify an ip or hostname - ### with port. ie localhost:2181, 10.0.0.1:2181, etc. + ## An array of address to gather stats about. Specify an ip or hostname + ## with port. ie localhost:2181, 10.0.0.1:2181, etc. - ### If no servers are specified, then localhost is used as the host. - ### If no port is specified, 2181 is used + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 2181 is used servers = [":2181"] ` diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 7d5cd5338..f88c2ddc5 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -22,13 +22,13 @@ type Amon struct { } var sampleConfig = ` - ### Amon Server Key + ## Amon Server Key server_key = "my-server-key" # required. - ### Amon Instance URL + ## Amon Instance URL amon_instance = "https://youramoninstance" # required - ### Connection timeout. + ## Connection timeout. # timeout = "5s" ` diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index ea80ad6a7..948007117 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -52,32 +52,32 @@ const ( ) var sampleConfig = ` - ### AMQP url + ## AMQP url url = "amqp://localhost:5672/influxdb" - ### AMQP exchange + ## AMQP exchange exchange = "telegraf" - ### Telegraf tag to use as a routing key - ### ie, if this tag exists, it's value will be used as the routing key + ## Telegraf tag to use as a routing key + ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ### InfluxDB retention policy + ## InfluxDB retention policy # retention_policy = "default" - ### InfluxDB database + ## InfluxDB database # database = "telegraf" - ### InfluxDB precision + ## InfluxDB precision # precision = "s" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index a2d0d7b10..42d98b5be 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -25,10 +25,10 @@ type CloudWatch struct { } var sampleConfig = ` - ### Amazon REGION + ## Amazon REGION region = 'us-east-1' - ### Namespace for the CloudWatch MetricDatums + ## Namespace for the CloudWatch MetricDatums namespace = 'InfluxData/Telegraf' ` diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 208757284..5d6fab165 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -24,10 +24,10 @@ type Datadog struct { } var sampleConfig = ` - ### Datadog API key + ## Datadog API key apikey = "my-secret-key" # required. - ### Connection timeout. + ## Connection timeout. # timeout = "5s" ` diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 3d431774c..e593e3cea 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -20,13 +20,13 @@ type File struct { } var sampleConfig = ` - ### Files to write to, "stdout" is a specially handled file. + ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 29ac774f4..717ce06c8 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -23,11 +23,11 @@ type Graphite struct { } var sampleConfig = ` - ### TCP endpoint for your graphite instance. + ## TCP endpoint for your graphite instance. servers = ["localhost:2003"] - ### Prefix metrics name + ## Prefix metrics name prefix = "" - ### timeout in seconds for the write connection to graphite + ## timeout in seconds for the write connection to graphite timeout = 2 ` diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index cb235f903..683227717 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -41,32 +41,32 @@ type InfluxDB struct { } var sampleConfig = ` - ### The full HTTP or UDP endpoint URL for your InfluxDB instance. - ### Multiple urls can be specified as part of the same cluster, - ### this means that only ONE of the urls will be written to each interval. + ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example urls = ["http://localhost:8086"] # required - ### The target database for metrics (telegraf will create it if not exists) + ## The target database for metrics (telegraf will create it if not exists) database = "telegraf" # required - ### Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - ### note: using "s" precision greatly improves InfluxDB compression + ## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". + ## note: using "s" precision greatly improves InfluxDB compression precision = "s" - ### Connection timeout (for the connection with InfluxDB), formatted as a string. - ### If not provided, will default to 0 (no timeout) + ## Connection timeout (for the connection with InfluxDB), formatted as a string. + ## If not provided, will default to 0 (no timeout) # timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Set the user agent for HTTP POSTs (can be useful for log differentiation) + ## Set the user agent for HTTP POSTs (can be useful for log differentiation) # user_agent = "telegraf" - ### Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) # udp_payload = 512 - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false ` diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 667212f62..8dea2b2a1 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -45,25 +45,25 @@ type Kafka struct { } var sampleConfig = ` - ### URLs of kafka brokers + ## URLs of kafka brokers brokers = ["localhost:9092"] - ### Kafka topic for producer messages + ## Kafka topic for producer messages topic = "telegraf" - ### Telegraf tag to use as a routing key - ### ie, if this tag exists, it's value will be used as the routing key + ## Telegraf tag to use as a routing key + ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index f293be5fd..01906a7f5 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -28,16 +28,16 @@ type KinesisOutput struct { } var sampleConfig = ` - ### Amazon REGION of kinesis endpoint. + ## Amazon REGION of kinesis endpoint. region = "ap-southeast-2" - ### Kinesis StreamName must exist prior to starting telegraf. + ## Kinesis StreamName must exist prior to starting telegraf. streamname = "StreamName" - ### PartitionKey as used for sharding data. + ## PartitionKey as used for sharding data. partitionkey = "PartitionKey" - ### format of the Data payload in the kinesis PutRecord, supported - ### String and Custom. + ## format of the Data payload in the kinesis PutRecord, supported + ## String and Custom. format = "string" - ### debug will show upstream aws messages. + ## debug will show upstream aws messages. debug = false ` diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 826926d16..3897e0b4f 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -23,20 +23,20 @@ type Librato struct { } var sampleConfig = ` - ### Librator API Docs - ### http://dev.librato.com/v1/metrics-authentication + ## Librator API Docs + ## http://dev.librato.com/v1/metrics-authentication - ### Librato API user + ## Librato API user api_user = "telegraf@influxdb.com" # required. - ### Librato API token + ## Librato API token api_token = "my-secret-token" # required. - ### Tag Field to populate source attribute (optional) - ### This is typically the _hostname_ from which the metric was obtained. + ## Tag Field to populate source attribute (optional) + ## This is typically the _hostname_ from which the metric was obtained. source_tag = "hostname" - ### Connection timeout. + ## Connection timeout. # timeout = "5s" ` diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index ea638f3da..6f8abe954 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -16,26 +16,26 @@ import ( var sampleConfig = ` servers = ["localhost:1883"] # required. - ### MQTT outputs send metrics to this topic format - ### "///" - ### ex: prefix/web01.example.com/mem + ## MQTT outputs send metrics to this topic format + ## "///" + ## ex: prefix/web01.example.com/mem topic_prefix = "telegraf" - ### username and password to connect MQTT server. + ## username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ### Optional SSL Config + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" - ### Use SSL but skip chain & host verification + ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index ef23fab97..75b998484 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -19,15 +19,15 @@ type NSQ struct { } var sampleConfig = ` - ### Location of nsqd instance listening on TCP + ## Location of nsqd instance listening on TCP server = "localhost:4150" - ### NSQ topic for producer messages + ## NSQ topic for producer messages topic = "telegraf" - ### Data format to output. This can be "influx" or "graphite" - ### Each data format has it's own unique set of configuration options, read - ### more about them here: - ### https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + ## Data format to output. This can be "influx" or "graphite" + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ` diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 2d58389e7..83a3429e3 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -22,17 +22,17 @@ type OpenTSDB struct { } var sampleConfig = ` - ### prefix for metrics keys + ## prefix for metrics keys prefix = "my.specific.prefix." ## Telnet Mode ## - ### DNS name of the OpenTSDB server in telnet mode + ## DNS name of the OpenTSDB server in telnet mode host = "opentsdb.example.com" - ### Port of the OpenTSDB server in telnet mode + ## Port of the OpenTSDB server in telnet mode port = 4242 - ### Debug true - Prints OpenTSDB communication + ## Debug true - Prints OpenTSDB communication debug = false ` diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 48bdddde6..df546c192 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -16,7 +16,7 @@ type PrometheusClient struct { } var sampleConfig = ` - ### Address to listen on + ## Address to listen on # listen = ":9126" ` diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index d20441391..c805bbd00 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -21,11 +21,11 @@ type Riemann struct { } var sampleConfig = ` - ### URL of server + ## URL of server url = "localhost:5555" - ### transport protocol to use either tcp or udp + ## transport protocol to use either tcp or udp transport = "tcp" - ### separator to use between input name and field name in Riemann service name + ## separator to use between input name and field name in Riemann service name separator = " " `