Update etc/telegraf.conf

This commit is contained in:
Cameron Sparr 2016-11-03 14:31:55 +00:00
parent e43cfc2fce
commit 16081b2d1a
1 changed files with 89 additions and 29 deletions

View File

@ -66,7 +66,7 @@
debug = false debug = false
## Run telegraf in quiet mode (error log messages only). ## Run telegraf in quiet mode (error log messages only).
quiet = false quiet = false
## Specify the log file name. The empty string means to log to stdout. ## Specify the log file name. The empty string means to log to stderr.
logfile = "" logfile = ""
## Override default hostname, if empty use os.Hostname() ## Override default hostname, if empty use os.Hostname()
@ -441,6 +441,30 @@
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
# # Print all metrics that pass through this filter.
# [[processors.printer]]
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
############################################################################### ###############################################################################
# INPUT PLUGINS # # INPUT PLUGINS #
############################################################################### ###############################################################################
@ -582,15 +606,18 @@
# # Read specific statistics per cgroup # # Read specific statistics per cgroup
# [[inputs.cgroup]] # [[inputs.cgroup]]
# ## Directories in which to look for files, globs are supported. # ## Directories in which to look for files, globs are supported.
# # paths = [ # ## Consider restricting paths to the set of cgroups you really
# # "/cgroup/memory", # ## want to monitor if you have a large number of cgroups, to avoid
# # "/cgroup/memory/child1", # ## any cardinality issues.
# # "/cgroup/memory/child2/*", # # paths = [
# # ] # # "/cgroup/memory",
# ## cgroup stat fields, as file names, globs are supported. # # "/cgroup/memory/child1",
# ## these file names are appended to each path from above. # # "/cgroup/memory/child2/*",
# # files = ["memory.*usage*", "memory.limit_in_bytes"] # # ]
# ## cgroup stat fields, as file names, globs are supported.
# ## these file names are appended to each path from above.
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
# # Pull Metric Statistics from Amazon CloudWatch # # Pull Metric Statistics from Amazon CloudWatch
@ -850,12 +877,15 @@
# ## An array of address to gather stats about. Specify an ip on hostname # ## An array of address to gather stats about. Specify an ip on hostname
# ## with optional port. ie localhost, 10.10.3.33:1936, etc. # ## with optional port. ie localhost, 10.10.3.33:1936, etc.
# ## Make sure you specify the complete path to the stats endpoint # ## Make sure you specify the complete path to the stats endpoint
# ## ie 10.10.3.33:1936/haproxy?stats # ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
# # # #
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
# servers = ["http://myhaproxy.com:1936/haproxy?stats"] # servers = ["http://myhaproxy.com:1936/haproxy?stats"]
# ## Or you can also use local socket # ##
# ## servers = ["socket:/run/haproxy/admin.sock"] # ## You can also use local socket with standard wildcard globbing.
# ## Server address not starting with 'http' will be treated as a possible
# ## socket, so both examples below are valid.
# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
# # HTTP/HTTPS request given an address a method and a timeout # # HTTP/HTTPS request given an address a method and a timeout
@ -1000,6 +1030,22 @@
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
# # Read metrics from the kubernetes kubelet api
# [[inputs.kubernetes]]
# ## URL for the kubelet
# url = "http://1.1.1.1:10255"
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
# # ssl_key = /path/to/keyfile
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read metrics from a LeoFS Server via SNMP # # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]] # [[inputs.leofs]]
# ## An array of URI to gather stats about LeoFS. # ## An array of URI to gather stats about LeoFS.
@ -1119,13 +1165,13 @@
# ## gather metrics from SHOW BINARY LOGS command output # ## gather metrics from SHOW BINARY LOGS command output
# gather_binary_logs = false # gather_binary_logs = false
# # # #
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
# gather_table_io_waits = false # gather_table_io_waits = false
# # # #
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
# gather_table_lock_waits = false # gather_table_lock_waits = false
# # # #
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
# gather_index_io_waits = false # gather_index_io_waits = false
# # # #
# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
@ -1247,13 +1293,13 @@
# ## urls to ping # ## urls to ping
# urls = ["www.google.com"] # required # urls = ["www.google.com"] # required
# ## number of pings to send per collection (ping -c <COUNT>) # ## number of pings to send per collection (ping -c <COUNT>)
# count = 1 # required # # count = 1
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>) # ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
# ping_interval = 0.0 # # ping_interval = 1.0
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>) # ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
# timeout = 1.0 # # timeout = 1.0
# ## interface to send ping from (ping -I <INTERFACE>) # ## interface to send ping from (ping -I <INTERFACE>)
# interface = "" # # interface = ""
# # Read metrics from one or many postgresql servers # # Read metrics from one or many postgresql servers
@ -1681,9 +1727,18 @@
# ## Address and port to host HTTP listener on # ## Address and port to host HTTP listener on
# service_address = ":8186" # service_address = ":8186"
# #
# ## timeouts # ## maximum duration before timing out read of the request
# read_timeout = "10s" # read_timeout = "10s"
# ## maximum duration before timing out write of the response
# write_timeout = "10s" # write_timeout = "10s"
#
# ## Maximum allowed http request body size in bytes.
# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
# max_body_size = 0
#
# ## Maximum line size allowed to be sent in bytes.
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0
# # Read metrics from Kafka topic(s) # # Read metrics from Kafka topic(s)
@ -1778,13 +1833,18 @@
# # Read metrics from NATS subject(s) # # Read metrics from NATS subject(s)
# [[inputs.nats_consumer]] # [[inputs.nats_consumer]]
# ## urls of NATS servers # ## urls of NATS servers
# servers = ["nats://localhost:4222"] # # servers = ["nats://localhost:4222"]
# ## Use Transport Layer Security # ## Use Transport Layer Security
# secure = false # # secure = false
# ## subject(s) to consume # ## subject(s) to consume
# subjects = ["telegraf"] # # subjects = ["telegraf"]
# ## name a queue group # ## name a queue group
# queue_group = "telegraf_consumers" # # queue_group = "telegraf_consumers"
#
# ## Sets the limits for pending msgs and bytes for each subscription
# ## These shouldn't need to be adjusted except in very high throughput scenarios
# # pending_message_limit = 65536
# # pending_bytes_limit = 67108864
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
@ -1871,14 +1931,14 @@
# # Generic TCP listener # # Generic TCP listener
# [[inputs.tcp_listener]] # [[inputs.tcp_listener]]
# ## Address and port to host TCP listener on # ## Address and port to host TCP listener on
# service_address = ":8094" # # service_address = ":8094"
# #
# ## Number of TCP messages allowed to queue up. Once filled, the # ## Number of TCP messages allowed to queue up. Once filled, the
# ## TCP listener will start dropping packets. # ## TCP listener will start dropping packets.
# allowed_pending_messages = 10000 # # allowed_pending_messages = 10000
# #
# ## Maximum number of concurrent TCP connections to allow # ## Maximum number of concurrent TCP connections to allow
# max_tcp_connections = 250 # # max_tcp_connections = 250
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
@ -1890,11 +1950,11 @@
# # Generic UDP listener # # Generic UDP listener
# [[inputs.udp_listener]] # [[inputs.udp_listener]]
# ## Address and port to host UDP listener on # ## Address and port to host UDP listener on
# service_address = ":8092" # # service_address = ":8092"
# #
# ## Number of UDP messages allowed to queue up. Once filled, the # ## Number of UDP messages allowed to queue up. Once filled, the
# ## UDP listener will start dropping packets. # ## UDP listener will start dropping packets.
# allowed_pending_messages = 10000 # # allowed_pending_messages = 10000
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read