Cleanup comments and indentation in config file

This commit is contained in:
Cameron Sparr 2016-02-08 22:57:26 -07:00
parent fb75db2f1f
commit fc7fa4b6c5
45 changed files with 237 additions and 242 deletions
internal/config
plugins
inputs
aerospike
apache
bcache
couchdb
disque
docker
elasticsearch
github_webhooks
haproxy
httpjson
influxdb
jolokia
leofs
lustre2
mailchimp
memcached
mongodb
mysql
nginx
nsq
passenger
phpfpm
ping
postgresql
powerdns
procstat
prometheus
puppetagent
rabbitmq
redis
rethinkdb
sensors
snmp
sqlserver
system
trig
twemproxy
win_perf_counters
zfs
zookeeper
outputs/influxdb

View File

@ -127,7 +127,9 @@ func (c *Config) ListTags() string {
return strings.Join(tags, " ")
}
var header = `# Telegraf configuration
var header = `###############################################################################
# Telegraf Configuration #
###############################################################################
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
@ -145,35 +147,35 @@ var header = `# Telegraf configuration
# Configuration for telegraf agent
[agent]
# Default data collection interval for all inputs
### Default data collection interval for all inputs
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
### Rounds collection interval to 'interval'
### ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# Telegraf will cache metric_buffer_limit metrics for each output, and will
# flush this buffer on a successful write.
### Telegraf will cache metric_buffer_limit metrics for each output, and will
### flush this buffer on a successful write.
metric_buffer_limit = 10000
# Collection jitter is used to jitter the collection by a random amount.
# Each plugin will sleep for a random time within jitter before collecting.
# This can be used to avoid many plugins querying things like sysfs at the
# same time, which can have a measurable effect on the system.
### Collection jitter is used to jitter the collection by a random amount.
### Each plugin will sleep for a random time within jitter before collecting.
### This can be used to avoid many plugins querying things like sysfs at the
### same time, which can have a measurable effect on the system.
collection_jitter = "0s"
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
### Default flushing interval for all outputs. You shouldn't set this below
### interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
### Jitter the flush interval by a random amount. This is primarily to avoid
### large write spikes for users running a large number of telegraf instances.
### ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
### Run telegraf in debug mode
debug = false
# Run telegraf in quiet mode
### Run telegraf in quiet mode
quiet = false
# Override default hostname, if empty use os.Hostname()
### Override default hostname, if empty use os.Hostname()
hostname = ""

View File

@ -104,11 +104,9 @@ type Aerospike struct {
}
var sampleConfig = `
# Aerospike servers to connect to (with port)
# Default: servers = ["localhost:3000"]
#
# This plugin will query all namespaces the aerospike
# server has configured and get stats for them.
### Aerospike servers to connect to (with port)
### This plugin will query all namespaces the aerospike
### server has configured and get stats for them.
servers = ["localhost:3000"]
`

View File

@ -20,7 +20,7 @@ type Apache struct {
}
var sampleConfig = `
# An array of Apache status URI to gather stats.
### An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
`

View File

@ -18,14 +18,14 @@ type Bcache struct {
}
var sampleConfig = `
# Bcache sets path
# If not specified, then default is:
# bcachePath = "/sys/fs/bcache"
#
# By default, telegraf gather stats for all bcache devices
# Setting devices will restrict the stats to the specified
# bcache devices.
# bcacheDevs = ["bcache0", ...]
### Bcache sets path
### If not specified, then default is:
bcachePath = "/sys/fs/bcache"
### By default, telegraf gather stats for all bcache devices
### Setting devices will restrict the stats to the specified
### bcache devices.
bcacheDevs = ["bcache0"]
`
func (b *Bcache) SampleConfig() string {

View File

@ -75,10 +75,10 @@ func (*CouchDB) Description() string {
func (*CouchDB) SampleConfig() string {
return `
# Works with CouchDB stats endpoints out of the box
# Multiple HOSTs from which to read CouchDB stats:
hosts = ["http://localhost:8086/_stats"]
`
### Works with CouchDB stats endpoints out of the box
### Multiple HOSTs from which to read CouchDB stats:
hosts = ["http://localhost:8086/_stats"]
`
}
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {

View File

@ -22,11 +22,11 @@ type Disque struct {
}
var sampleConfig = `
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
#
# If no servers are specified, then localhost is used as the host.
### An array of URI to gather stats about. Specify an ip or hostname
### with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
### 10.0.0.1:10000, etc.
### If no servers are specified, then localhost is used as the host.
servers = ["localhost"]
`

View File

@ -21,11 +21,11 @@ type Docker struct {
}
var sampleConfig = `
# Docker Endpoint
# To use TCP, set endpoint = "tcp://[ip]:[port]"
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
### Docker Endpoint
### To use TCP, set endpoint = "tcp://[ip]:[port]"
### To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
# Only collect metrics for these containers, collect all if empty
### Only collect metrics for these containers, collect all if empty
container_names = []
`

View File

@ -59,14 +59,14 @@ type indexHealth struct {
}
const sampleConfig = `
# specify a list of one or more Elasticsearch servers
### specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
# set local to false when you want to read the indices stats from all nodes
# within the cluster
### set local to false when you want to read the indices stats from all nodes
### within the cluster
local = true
# set cluster_health to true when you want to also obtain cluster level stats
### set cluster_health to true when you want to also obtain cluster level stats
cluster_health = false
`

View File

@ -31,7 +31,7 @@ func NewGithubWebhooks() *GithubWebhooks {
func (gh *GithubWebhooks) SampleConfig() string {
return `
# Address and port to host Webhook listener on
### Address and port to host Webhook listener on
service_address = ":1618"
`
}

View File

@ -86,13 +86,13 @@ type haproxy struct {
}
var sampleConfig = `
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:1936, etc.
#
# If no servers are specified, then default to 127.0.0.1:1936
### An array of address to gather stats about. Specify an ip on hostname
### with optional port. ie localhost, 10.10.3.33:1936, etc.
### If no servers are specified, then default to 127.0.0.1:1936
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# Or you can also use local socket(not work yet)
# servers = ["socket://run/haproxy/admin.sock"]
### Or you can also use local socket(not work yet)
### servers = ["socket://run/haproxy/admin.sock"]
`
func (r *haproxy) SampleConfig() string {

View File

@ -46,37 +46,36 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
}
var sampleConfig = `
# NOTE This plugin only reads numerical measurements, strings and booleans
# will be ignored.
### NOTE This plugin only reads numerical measurements, strings and booleans
### will be ignored.
# a name for the service being polled
### a name for the service being polled
name = "webserver_stats"
# URL of each server in the service's cluster
### URL of each server in the service's cluster
servers = [
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
# HTTP method to use (case-sensitive)
### HTTP method to use (case-sensitive)
method = "GET"
# List of tag names to extract from top-level of JSON server response
### List of tag names to extract from top-level of JSON server response
# tag_keys = [
# "my_tag_1",
# "my_tag_2"
# ]
# HTTP parameters (all values must be strings)
### HTTP parameters (all values must be strings)
[inputs.httpjson.parameters]
event_type = "cpu_spike"
threshold = "0.75"
# HTTP Header parameters (all values must be strings)
### HTTP Header parameters (all values must be strings)
# [inputs.httpjson.headers]
# X-Auth-Token = "my-xauth-token"
# apiVersion = "v1"
`
func (h *HttpJson) SampleConfig() string {

View File

@ -22,11 +22,11 @@ func (*InfluxDB) Description() string {
func (*InfluxDB) SampleConfig() string {
return `
# Works with InfluxDB debug endpoints out of the box,
# but other services can use this format too.
# See the influxdb plugin's README for more details.
### Works with InfluxDB debug endpoints out of the box,
### but other services can use this format too.
### See the influxdb plugin's README for more details.
# Multiple URLs from which to read InfluxDB-formatted JSON
### Multiple URLs from which to read InfluxDB-formatted JSON
urls = [
"http://localhost:8086/debug/vars"
]

View File

@ -46,10 +46,10 @@ type Jolokia struct {
func (j *Jolokia) SampleConfig() string {
return `
# This is the context root used to compose the jolokia url
### This is the context root used to compose the jolokia url
context = "/jolokia/read"
# List of servers exposing jolokia read service
### List of servers exposing jolokia read service
[[inputs.jolokia.servers]]
name = "stable"
host = "192.168.103.2"
@ -57,9 +57,10 @@ func (j *Jolokia) SampleConfig() string {
# username = "myuser"
# password = "mypassword"
# List of metrics collected on above servers
# Each metric consists in a name, a jmx path and either a pass or drop slice attributes
# This collect all heap memory usage metrics
### List of metrics collected on above servers
### Each metric consists in a name, a jmx path and either
### a pass or drop slice attribute.
### This collect all heap memory usage metrics.
[[inputs.jolokia.metrics]]
name = "heap_memory_usage"
jmx = "/java.lang:type=Memory/HeapMemoryUsage"

View File

@ -132,10 +132,8 @@ var serverTypeMapping = map[string]ServerType{
}
var sampleConfig = `
# An array of URI to gather stats about LeoFS.
# Specify an ip or hostname with port. ie 127.0.0.1:4020
#
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
### An array of URI to gather stats about LeoFS.
### Specify an ip or hostname with port. ie 127.0.0.1:4020
servers = ["127.0.0.1:4021"]
`

View File

@ -29,10 +29,13 @@ type Lustre2 struct {
}
var sampleConfig = `
# An array of /proc globs to search for Lustre stats
# If not specified, the default will work on Lustre 2.5.x
#
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
### An array of /proc globs to search for Lustre stats
### If not specified, the default will work on Lustre 2.5.x
###
# ost_procfiles = [
# "/proc/fs/lustre/obdfilter/*/stats",
# "/proc/fs/lustre/osd-ldiskfs/*/stats"
# ]
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
`

View File

@ -17,13 +17,13 @@ type MailChimp struct {
}
var sampleConfig = `
# MailChimp API key
# get from https://admin.mailchimp.com/account/api/
### MailChimp API key
### get from https://admin.mailchimp.com/account/api/
api_key = "" # required
# Reports for campaigns sent more than days_old ago will not be collected.
# 0 means collect all.
### Reports for campaigns sent more than days_old ago will not be collected.
### 0 means collect all.
days_old = 0
# Campaign ID to get, if empty gets all campaigns, this option overrides days_old
### Campaign ID to get, if empty gets all campaigns, this option overrides days_old
# campaign_id = ""
`

View File

@ -19,10 +19,8 @@ type Memcached struct {
}
var sampleConfig = `
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.0.0.1:11211, etc.
#
# If no servers are specified, then localhost is used as the host.
### An array of address to gather stats about. Specify an ip on hostname
### with optional port. ie localhost, 10.0.0.1:11211, etc.
servers = ["localhost:11211"]
# unix_sockets = ["/var/run/memcached.sock"]
`

View File

@ -26,11 +26,11 @@ type Ssl struct {
}
var sampleConfig = `
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
### An array of URI to gather stats about. Specify an ip or hostname
### with optional port add password. ie,
### mongodb://user:auth_key@10.10.3.30:27017,
### mongodb://10.10.3.33:18832,
### 10.0.0.1:10000, etc.
servers = ["127.0.0.1:27017"]
`

View File

@ -15,14 +15,14 @@ type Mysql struct {
}
var sampleConfig = `
# specify servers via a url matching:
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# see https://github.com/go-sql-driver/mysql#dsn-data-source-name
# e.g.
# root:passwd@tcp(127.0.0.1:3306)/?tls=false
# root@tcp(127.0.0.1:3306)/?tls=false
#
# If no servers are specified, then localhost is used as the host.
### specify servers via a url matching:
### [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
### see https://github.com/go-sql-driver/mysql#dsn-data-source-name
### e.g.
### root:passwd@tcp(127.0.0.1:3306)/?tls=false
### root@tcp(127.0.0.1:3306)/?tls=false
###
### If no servers are specified, then localhost is used as the host.
servers = ["tcp(127.0.0.1:3306)/"]
`

View File

@ -20,7 +20,7 @@ type Nginx struct {
}
var sampleConfig = `
# An array of Nginx stub_status URI to gather stats.
### An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
`

View File

@ -41,7 +41,7 @@ type NSQ struct {
}
var sampleConfig = `
# An array of NSQD HTTP API endpoints
### An array of NSQD HTTP API endpoints
endpoints = ["http://localhost:4151"]
`

View File

@ -126,16 +126,15 @@ func (p *process) getUptime() int64 {
}
var sampleConfig = `
# Path of passenger-status.
#
# Plugin gather metric via parsing XML output of passenger-status
# More information about the tool:
# https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
#
#
# If no path is specified, then the plugin simply execute passenger-status
# hopefully it can be found in your PATH
command = "passenger-status -v --show=xml"
### Path of passenger-status.
###
### Plugin gather metric via parsing XML output of passenger-status
### More information about the tool:
### https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
###
### If no path is specified, then the plugin simply execute passenger-status
### hopefully it can be found in your PATH
command = "passenger-status -v --show=xml"
`
func (r *passenger) SampleConfig() string {

View File

@ -41,26 +41,25 @@ type phpfpm struct {
}
var sampleConfig = `
# An array of addresses to gather stats about. Specify an ip or hostname
# with optional port and path
#
# Plugin can be configured in three modes (either can be used):
# - http: the URL must start with http:// or https://, ie:
# "http://localhost/status"
# "http://192.168.130.1/status?full"
#
# - unixsocket: path to fpm socket, ie:
# "/var/run/php5-fpm.sock"
# or using a custom fpm status path:
# "/var/run/php5-fpm.sock:fpm-custom-status-path"
#
# - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
# "fcgi://10.0.0.12:9000/status"
# "cgi://10.0.10.12:9001/status"
#
# Example of multiple gathering from local socket and remove host
# urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
# If no servers are specified, then default to http://127.0.0.1/status
### An array of addresses to gather stats about. Specify an ip or hostname
### with optional port and path
###
### Plugin can be configured in three modes (either can be used):
### - http: the URL must start with http:// or https://, ie:
### "http://localhost/status"
### "http://192.168.130.1/status?full"
###
### - unixsocket: path to fpm socket, ie:
### "/var/run/php5-fpm.sock"
### or using a custom fpm status path:
### "/var/run/php5-fpm.sock:fpm-custom-status-path"
###
### - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
### "fcgi://10.0.0.12:9000/status"
### "cgi://10.0.10.12:9001/status"
###
### Example of multiple gathering from local socket and remove host
### urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
urls = ["http://localhost/status"]
`

View File

@ -44,18 +44,18 @@ func (_ *Ping) Description() string {
}
var sampleConfig = `
# NOTE: this plugin forks the ping command. You may need to set capabilities
# via setcap cap_net_raw+p /bin/ping
### NOTE: this plugin forks the ping command. You may need to set capabilities
### via setcap cap_net_raw+p /bin/ping
# urls to ping
### urls to ping
urls = ["www.google.com"] # required
# number of pings to send (ping -c <COUNT>)
### number of pings to send (ping -c <COUNT>)
count = 1 # required
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
### interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
ping_interval = 0.0
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
### ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
timeout = 0.0
# interface to send ping from (ping -I <INTERFACE>)
### interface to send ping from (ping -I <INTERFACE>)
interface = ""
`

View File

@ -21,22 +21,22 @@ type Postgresql struct {
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
var sampleConfig = `
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
#
# All connection parameters are optional.
#
# Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying
# to grab metrics for.
#
### specify address via a url matching:
### postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
### or a simple string:
### host=localhost user=pqotest password=... sslmode=... dbname=app_production
###
### All connection parameters are optional.
###
### Without the dbname parameter, the driver will default to a database
### with the same name as the user. This dbname is just for instantiating a
### connection with the server and doesn't restrict the databases we are trying
### to grab metrics for.
###
address = "host=localhost user=postgres sslmode=disable"
# A list of databases to pull metrics about. If not specified, metrics for all
# databases are gathered.
### A list of databases to pull metrics about. If not specified, metrics for all
### databases are gathered.
# databases = ["app_production", "testing"]
`

View File

@ -18,10 +18,8 @@ type Powerdns struct {
}
var sampleConfig = `
# An array of sockets to gather stats about.
# Specify a path to unix socket.
#
# If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path.
### An array of sockets to gather stats about.
### Specify a path to unix socket.
unix_sockets = ["/var/run/pdns.controlsocket"]
`

View File

@ -30,15 +30,15 @@ func NewProcstat() *Procstat {
}
var sampleConfig = `
# Must specify one of: pid_file, exe, or pattern
# PID file to monitor process
### Must specify one of: pid_file, exe, or pattern
### PID file to monitor process
pid_file = "/var/run/nginx.pid"
# executable name (ie, pgrep <exe>)
### executable name (ie, pgrep <exe>)
# exe = "nginx"
# pattern as argument for pgrep (ie, pgrep -f <pattern>)
### pattern as argument for pgrep (ie, pgrep -f <pattern>)
# pattern = "nginx"
# Field name prefix
### Field name prefix
prefix = ""
`

View File

@ -17,7 +17,7 @@ type Prometheus struct {
}
var sampleConfig = `
# An array of urls to scrape metrics from.
### An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
`

View File

@ -18,7 +18,7 @@ type PuppetAgent struct {
}
var sampleConfig = `
# Location of puppet last run summary file
### Location of puppet last run summary file
location = "/var/lib/puppet/state/last_run_summary.yaml"
`

View File

@ -107,8 +107,8 @@ var sampleConfig = `
# username = "guest"
# password = "guest"
# A list of nodes to pull metrics about. If not specified, metrics for
# all nodes are gathered.
### A list of nodes to pull metrics about. If not specified, metrics for
### all nodes are gathered.
# nodes = ["rabbit@node1", "rabbit@node2"]
`

View File

@ -19,14 +19,14 @@ type Redis struct {
}
var sampleConfig = `
# specify servers via a url matching:
# [protocol://][:password]@address[:port]
# e.g.
# tcp://localhost:6379
# tcp://:password@192.168.99.100
#
# If no servers are specified, then localhost is used as the host.
# If no port is specified, 6379 is used
### specify servers via a url matching:
### [protocol://][:password]@address[:port]
### e.g.
### tcp://localhost:6379
### tcp://:password@192.168.99.100
###
### If no servers are specified, then localhost is used as the host.
### If no port is specified, 6379 is used
servers = ["tcp://localhost:6379"]
`

View File

@ -16,11 +16,11 @@ type RethinkDB struct {
}
var sampleConfig = `
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
#
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
### An array of URI to gather stats about. Specify an ip or hostname
### with optional port add password. ie,
### rethinkdb://user:auth_key@10.10.3.30:28105,
### rethinkdb://10.10.3.33:18832,
### 10.0.0.1:10000, etc.
servers = ["127.0.0.1:28015"]
`

View File

@ -20,15 +20,15 @@ func (_ *Sensors) Description() string {
}
var sensorsSampleConfig = `
# By default, telegraf gathers stats from all sensors detected by the
# lm-sensors module.
#
# Only collect stats from the selected sensors. Sensors are listed as
# <chip name>:<feature name>. This information can be found by running the
# sensors command, e.g. sensors -u
#
# A * as the feature name will return all features of the chip
#
### By default, telegraf gathers stats from all sensors detected by the
### lm-sensors module.
###
### Only collect stats from the selected sensors. Sensors are listed as
### <chip name>:<feature name>. This information can be found by running the
### sensors command, e.g. sensors -u
###
### A * as the feature name will return all features of the chip
###
# sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"]
`

View File

@ -72,11 +72,11 @@ var initNode = Node{
var NameToOid = make(map[string]string)
var sampleConfig = `
# Use 'oids.txt' file to translate oids to names
# To generate 'oids.txt' you need to run:
# snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
# Or if you have an other MIB folder with custom MIBs
# snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
### Use 'oids.txt' file to translate oids to names
### To generate 'oids.txt' you need to run:
### snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
### Or if you have an other MIB folder with custom MIBs
### snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
snmptranslate_file = "/tmp/oids.txt"
[[inputs.snmp.host]]
address = "192.168.2.2:161"

View File

@ -31,14 +31,14 @@ var queries MapQuery
var defaultServer = "Server=.;app name=telegraf;log=1;"
var sampleConfig = `
# Specify instances to monitor with a list of connection strings.
# All connection parameters are optional.
# By default, the host is localhost, listening on default port, TCP 1433.
# for Windows, the user is the currently running AD user (SSO).
# See https://github.com/denisenkom/go-mssqldb for detailed connection parameters.
### Specify instances to monitor with a list of connection strings.
### All connection parameters are optional.
### By default, the host is localhost, listening on default port, TCP 1433.
### for Windows, the user is the currently running AD user (SSO).
### See https://github.com/denisenkom/go-mssqldb for detailed connection
### parameters.
# servers = [
# "Server=192.168.1.10;Port=1433;User Id=telegraf;Password=T$l$gr@f69*;app name=telegraf;log=1;",
# "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
# ]
`

View File

@ -28,11 +28,11 @@ func (_ *CPUStats) Description() string {
}
var sampleConfig = `
# Whether to report per-cpu stats or not
### Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
### Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
### Comment this line if you want the raw CPU time metrics
drop = ["time_*"]
`

View File

@ -21,8 +21,8 @@ func (_ *DiskStats) Description() string {
}
var diskSampleConfig = `
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
### By default, telegraf gather stats for all mountpoints.
### Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points = ["/"]
`

View File

@ -21,11 +21,11 @@ func (_ *NetIOStats) Description() string {
}
var netSampleConfig = `
# By default, telegraf gathers stats from any up interface (excluding loopback)
# Setting interfaces will tell it to gather these explicit interfaces,
# regardless of status.
#
# interfaces = ["eth0", ... ]
### By default, telegraf gathers stats from any up interface (excluding loopback)
### Setting interfaces will tell it to gather these explicit interfaces,
### regardless of status.
###
# interfaces = ["eth0"]
`
func (_ *NetIOStats) SampleConfig() string {

View File

@ -13,7 +13,7 @@ type NetStats struct {
}
func (_ *NetStats) Description() string {
return "Read metrics about TCP status such as established, time wait etc and UDP sockets counts."
return "Read TCP metrics such as established, time wait and sockets counts."
}
var tcpstatSampleConfig = ""

View File

@ -13,7 +13,7 @@ type Trig struct {
}
var TrigConfig = `
# Set the amplitude
### Set the amplitude
amplitude = 10.0
`
@ -42,5 +42,5 @@ func (s *Trig) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("Trig", func() telegraf.Input { return &Trig{x: 0.0} })
inputs.Add("trig", func() telegraf.Input { return &Trig{x: 0.0} })
}

View File

@ -17,9 +17,9 @@ type Twemproxy struct {
}
var sampleConfig = `
# Twemproxy stats address and port (no scheme)
### Twemproxy stats address and port (no scheme)
addr = "localhost:22222"
# Monitor pool name
### Monitor pool name
pools = ["redis_pool", "mc_pool"]
`

View File

@ -14,12 +14,12 @@ import (
)
var sampleConfig string = `
# By default this plugin returns basic CPU and Disk statistics.
# See the README file for more examples.
# Uncomment examples below or write your own as you see fit. If the system
# being polled for data does not have the Object at startup of the Telegraf
# agent, it will not be gathered.
# Settings:
### By default this plugin returns basic CPU and Disk statistics.
### See the README file for more examples.
### Uncomment examples below or write your own as you see fit. If the system
### being polled for data does not have the Object at startup of the Telegraf
### agent, it will not be gathered.
### Settings:
# PrintValid = false # Print All matching performance counters
[[inputs.win_perf_counters.object]]

View File

@ -23,16 +23,16 @@ type poolInfo struct {
}
var sampleConfig = `
# ZFS kstat path
# If not specified, then default is:
# kstatPath = "/proc/spl/kstat/zfs"
#
# By default, telegraf gather all zfs stats
# If not specified, then default is:
# kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
#
# By default, don't gather zpool stats
# poolMetrics = false
### ZFS kstat path
### If not specified, then default is:
kstatPath = "/proc/spl/kstat/zfs"
### By default, telegraf gather all zfs stats
### If not specified, then default is:
kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
### By default, don't gather zpool stats
poolMetrics = false
`
func (z *Zfs) SampleConfig() string {

View File

@ -20,11 +20,11 @@ type Zookeeper struct {
}
var sampleConfig = `
# An array of address to gather stats about. Specify an ip or hostname
# with port. ie localhost:2181, 10.0.0.1:2181, etc.
### An array of address to gather stats about. Specify an ip or hostname
### with port. ie localhost:2181, 10.0.0.1:2181, etc.
# If no servers are specified, then localhost is used as the host.
# If no port is specified, 2181 is used
### If no servers are specified, then localhost is used as the host.
### If no port is specified, 2181 is used
servers = [":2181"]
`

View File

@ -33,14 +33,14 @@ type InfluxDB struct {
var sampleConfig = `
### The full HTTP or UDP endpoint URL for your InfluxDB instance.
### Multiple urls can be specified but it is assumed that they are part of the same
### cluster, this means that only ONE of the urls will be written to each interval.
### Multiple urls can be specified as part of the same cluster,
### this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
### The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
### Precision of writes, valid values are n, u, ms, s, m, and h
### note: using second precision greatly helps InfluxDB compression
### note: using "s" precision greatly improves InfluxDB compression
precision = "s"
### Connection timeout (for the connection with InfluxDB), formatted as a string.