Update telegraf.conf

This commit is contained in:
Daniel Nelson 2017-04-27 11:52:50 -07:00
parent cd103c85db
commit b9ce455bba
No known key found for this signature in database
GPG Key ID: CAAD59C9444F6155
1 changed files with 84 additions and 19 deletions

View File

@ -55,10 +55,13 @@
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "ns", "us" (or "µs"), "ms", "s".
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
@ -146,6 +149,10 @@
# ## InfluxDB database
# # database = "telegraf"
#
# ## Write timeout, formatted as a string. If not provided, will default
# ## to 5s. 0s means no timeout (not recommended).
# # timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
@ -292,7 +299,7 @@
# ## Kafka topic for producer messages
# topic = "telegraf"
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key
# ## ie, if this tag exists, its value will be used as the routing key
# routing_tag = "host"
#
# ## CompressionCodec represents the various compression codecs recognized by
@ -329,7 +336,7 @@
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
@ -359,6 +366,11 @@
# streamname = "StreamName"
# ## PartitionKey as used for sharding data.
# partitionkey = "PartitionKey"
# ## If set the paritionKey will be a random UUID on every put.
# ## This allows for scaling across multiple shards in a stream.
# ## This will cause issues with ordering.
# use_random_partitionkey = false
#
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
@ -545,6 +557,12 @@
# # address = "unix:///tmp/telegraf.sock"
# # address = "unixgram:///tmp/telegraf.sock"
#
# ## Period between keep alive probes.
# ## Only applies to TCP sockets.
# ## 0 disables keep alive probes.
# ## Defaults to the OS configuration.
# # keep_alive_period = "5m"
#
# ## Data format to generate.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -893,6 +911,12 @@
# servers = ["localhost"]
# # Provide a native collection for dmsetup based statistics for dm-cache
# [[inputs.dmcache]]
# ## Whether to report per-device stats or not
# per_device = true
# # Query given DNS server and gives statistics
# [[inputs.dns_query]]
# ## servers to query
@ -929,6 +953,10 @@
# ## Whether to report for each container total blkio and network stats or not
# total = false
#
# ## docker labels to include and exclude as tags. Globs accepted.
# ## Note that an empty array for both will include all labels as tags
# docker_label_include = []
# docker_label_exclude = []
# # Read statistics from one or many dovecot servers
@ -1057,19 +1085,26 @@
# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
# ## Make sure you specify the complete path to the stats endpoint
# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
# #
#
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
# ##
#
# ## You can also use local socket with standard wildcard globbing.
# ## Server address not starting with 'http' will be treated as a possible
# ## socket, so both examples below are valid.
# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
# #
# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
#
# ## By default, some of the fields are renamed from what haproxy calls them.
# ## Setting this option to true results in the plugin keeping the original
# ## field names.
# ## keep_field_names = true
# # keep_field_names = true
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Monitor disks' temperatures using hddtemp
@ -1121,7 +1156,10 @@
# ## NOTE This plugin only reads numerical measurements, strings and booleans
# ## will be ignored.
#
# ## a name for the service being polled
# ## Name for the service being polled. Will be appended to the name of the
# ## measurement e.g. httpjson_webserver_stats
# ##
# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
# name = "webserver_stats"
#
# ## URL of each server in the service's cluster
@ -1141,12 +1179,14 @@
# # "my_tag_2"
# # ]
#
# ## HTTP parameters (all values must be strings)
# [inputs.httpjson.parameters]
# event_type = "cpu_spike"
# threshold = "0.75"
# ## HTTP parameters (all values must be strings). For "GET" requests, data
# ## will be included in the query. For "POST" requests, data will be included
# ## in the request body as "x-www-form-urlencoded".
# # [inputs.httpjson.parameters]
# # event_type = "cpu_spike"
# # threshold = "0.75"
#
# ## HTTP Header parameters (all values must be strings)
# ## HTTP Headers (all values must be strings)
# # [inputs.httpjson.headers]
# # X-Auth-Token = "my-xauth-token"
# # apiVersion = "v1"
@ -1181,6 +1221,13 @@
# # collect_memstats = true
# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
# [[inputs.interrupts]]
# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
# # [inputs.interrupts.tagdrop]
# # irq = [ "NET_RX", "TASKLET" ]
# # Read metrics from the bare metal servers via IPMI
# [[inputs.ipmi_sensor]]
# ## optionally specify the path to the ipmitool executable
@ -1306,6 +1353,11 @@
# servers = ["127.0.0.1:4021"]
# # Provides Linux sysctl fs metrics
# [[inputs.linux_sysctl_fs]]
# # no configuration
# # Read metrics from local Lustre service on OST, MDS
# [[inputs.lustre2]]
# ## An array of /proc globs to search for Lustre stats
@ -1562,7 +1614,7 @@
# ## NOTE: this plugin forks the ping command. You may need to set capabilities
# ## via setcap cap_net_raw+p /bin/ping
# #
# ## urls to ping
# ## List of urls to ping
# urls = ["www.google.com"] # required
# ## number of pings to send per collection (ping -c <COUNT>)
# # count = 1
@ -2144,10 +2196,14 @@
# offset = "oldest"
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
#
# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
# ## larger messages are dropped
# max_message_len = 65536
# # Stream and parse log file(s).
@ -2284,6 +2340,12 @@
# ## Defaults to the OS default.
# # read_buffer_size = 65535
#
# ## Period between keep alive probes.
# ## Only applies to TCP sockets.
# ## 0 disables keep alive probes.
# ## Defaults to the OS configuration.
# # keep_alive_period = "5m"
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -2389,4 +2451,7 @@
#
# [inputs.webhooks.rollbar]
# path = "/rollbar"
#
# [inputs.webhooks.papertrail]
# path = "/papertrail"