Update sample telegraf.conf

This commit is contained in:
Daniel Nelson 2018-09-18 18:15:10 -07:00
parent a75c789e3e
commit dab6ed7d8f
No known key found for this signature in database
GPG Key ID: CAAD59C9444F6155
1 changed files with 231 additions and 36 deletions

View File

@ -47,8 +47,8 @@
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
@ -494,6 +494,51 @@
# # Content-Type = "text/plain; charset=utf-8"
# # Configuration for sending metrics to InfluxDB
# [[outputs.influxdb_v2]]
# ## The URLs of the InfluxDB cluster nodes.
# ##
# ## Multiple URLs can be specified for a single cluster, only ONE of the
# ## urls will be written to each interval.
# urls = ["http://127.0.0.1:9999"]
#
# ## Token for authentication.
# token = ""
#
# ## Organization is the name of the organization you wish to write to; must exist.
# organization = ""
#
# ## Bucket to the name fo the bucketwrite into; must exist.
# bucket = ""
#
# ## Timeout for HTTP messages.
# # timeout = "5s"
#
# ## Additional HTTP headers
# # http_headers = {"X-Special-Header" = "Special-Value"}
#
# ## HTTP Proxy override, if unset values the standard proxy environment
# ## variables are consulted to determine which proxy, if any, should be used.
# # http_proxy = "http://corporate.proxy:3128"
#
# ## HTTP User-Agent
# # user_agent = "telegraf"
#
# ## Content-Encoding for write request body, can be set to "gzip" to
# ## compress body or "identity" to apply no encoding.
# # content_encoding = "gzip"
#
# ## Enable or disable uint support for writing uints influxdb 2.0.
# # influx_uint_support = false
#
# ## Optional TLS Config for use on HTTP connections.
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Configuration for sending metrics to an Instrumental project
# [[outputs.instrumental]]
# ## Project API Token (required)
@ -992,13 +1037,13 @@
# # Map enum values according to given table.
# [[processors.enum]]
# [[processors.enum.fields]]
# [[processors.enum.mapping]]
# ## Name of the field to map
# source = "name"
# field = "status"
#
# ## Destination field to be used for the mapped value. By default the source
# ## field is used, overwriting the original value.
# # destination = "mapped"
# # dest = "status_code"
#
# ## Default value to be used for all values not contained in the mapping
# ## table. When unset, the unmodified value for the field will be used if no
@ -1006,9 +1051,10 @@
# # default = 0
#
# ## Table of mappings
# [processors.enum.fields.value_mappings]
# value1 = 1
# value2 = 2
# [processors.enum.mapping.value_mappings]
# green = 1
# yellow = 2
# red = 3
# # Apply metric modifications using override semantics.
@ -1078,27 +1124,6 @@
# # Rename measurements, tags, and fields that pass through this filter.
# [[processors.rename]]
# ## Measurement, tag, and field renamings are stored in separate sub-tables.
# ## Specify one sub-table per rename operation.
# # [[processors.rename.measurement]]
# # ## measurement to change
# # from = "kilobytes_per_second"
# # to = "kbps"
#
# # [[processors.rename.tag]]
# # ## tag to change
# # from = "host"
# # to = "hostname"
#
# # [[processors.rename.field]]
# # ## field to change
# # from = "lower"
# # to = "min"
#
# # [[processors.rename.field]]
# # ## field to change
# # from = "upper"
# # to = "max"
# # Perform string processing on tags, fields, and measurements
@ -1433,6 +1458,16 @@
# bcacheDevs = ["bcache0"]
# # Collects Beanstalkd server and tubes stats
# [[inputs.beanstalkd]]
# ## Server to collect data from
# server = "localhost:11300"
#
# ## List of tubes to gather stats about.
# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command
# tubes = ["notifications"]
# # Collect bond interface status, slaves statuses and failures count
# [[inputs.bond]]
# ## Sets 'proc' directory path
@ -2029,6 +2064,10 @@
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
#
# ## Credentials for basic HTTP authentication
# # username = "admin"
# # password = "admin"
#
# ## You can also use local socket with standard wildcard globbing.
# ## Server address not starting with 'http' will be treated as a possible
# ## socket, so both examples below are valid.
@ -2077,9 +2116,6 @@
# # username = "username"
# # password = "pa$$word"
#
# ## Tag all metrics with the url
# # tag_url = true
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
@ -2292,6 +2328,8 @@
# ## Setting 'use_lock' to true runs iptables with the "-w" option.
# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
# use_lock = false
# ## Define an alternate executable, such as "ip6tables". Default is "iptables".
# # binary = "ip6tables"
# ## defines the table to monitor:
# table = "filter"
# ## defines the chains to monitor.
@ -3058,6 +3096,9 @@
# ## If no port is specified, 6379 is used
# servers = ["tcp://localhost:6379"]
#
# ## specify server password
# # password = "s#cr@t%"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
@ -3435,6 +3476,11 @@
# # virtual_servers = [1]
# # Read metrics about temperature
# [[inputs.temp]]
# # no configuration
# # Read Tengine's basic status information (ngx_http_reqstat_module)
# [[inputs.tengine]]
# # An array of Tengine reqstat module URI to gather stats.
@ -3818,9 +3864,6 @@
# # Stream and parse log file(s).
# [[inputs.logparser]]
# ## DEPRECATED: The 'logparser' plugin is deprecated in 1.8. Please use the
# ## 'tail' plugin with the grok data_format as a replacement.
#
# ## Log files to parse.
# ## These accept standard unix glob matching rules, but with the addition of
# ## ** as a "super asterisk". ie:
@ -4158,7 +4201,7 @@
# parse_data_dog_tags = false
#
# ## Statsd data translation templates, more info can be read here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
# # templates = [
# # "cpu.* measurement*"
# # ]
@ -4254,6 +4297,158 @@
# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
# # Read metrics from VMware vCenter
# [[inputs.vsphere]]
# ## List of vCenter URLs to be monitored. These three lines must be uncommented
# ## and edited for the plugin to work.
# vcenters = [ "https://vcenter.local/sdk" ]
# username = "user@corp.local"
# password = "secret"
#
# ## VMs
# ## Typical VM metrics (if omitted or empty, all metrics are collected)
# vm_metric_include = [
# "cpu.demand.average",
# "cpu.idle.summation",
# "cpu.latency.average",
# "cpu.readiness.average",
# "cpu.ready.summation",
# "cpu.run.summation",
# "cpu.usagemhz.average",
# "cpu.used.summation",
# "cpu.wait.summation",
# "mem.active.average",
# "mem.granted.average",
# "mem.latency.average",
# "mem.swapin.average",
# "mem.swapinRate.average",
# "mem.swapout.average",
# "mem.swapoutRate.average",
# "mem.usage.average",
# "mem.vmmemctl.average",
# "net.bytesRx.average",
# "net.bytesTx.average",
# "net.droppedRx.summation",
# "net.droppedTx.summation",
# "net.usage.average",
# "power.power.average",
# "virtualDisk.numberReadAveraged.average",
# "virtualDisk.numberWriteAveraged.average",
# "virtualDisk.read.average",
# "virtualDisk.readOIO.latest",
# "virtualDisk.throughput.usage.average",
# "virtualDisk.totalReadLatency.average",
# "virtualDisk.totalWriteLatency.average",
# "virtualDisk.write.average",
# "virtualDisk.writeOIO.latest",
# "sys.uptime.latest",
# ]
# # vm_metric_exclude = [] ## Nothing is excluded by default
# # vm_instances = true ## true by default
#
# ## Hosts
# ## Typical host metrics (if omitted or empty, all metrics are collected)
# host_metric_include = [
# "cpu.coreUtilization.average",
# "cpu.costop.summation",
# "cpu.demand.average",
# "cpu.idle.summation",
# "cpu.latency.average",
# "cpu.readiness.average",
# "cpu.ready.summation",
# "cpu.swapwait.summation",
# "cpu.usage.average",
# "cpu.usagemhz.average",
# "cpu.used.summation",
# "cpu.utilization.average",
# "cpu.wait.summation",
# "disk.deviceReadLatency.average",
# "disk.deviceWriteLatency.average",
# "disk.kernelReadLatency.average",
# "disk.kernelWriteLatency.average",
# "disk.numberReadAveraged.average",
# "disk.numberWriteAveraged.average",
# "disk.read.average",
# "disk.totalReadLatency.average",
# "disk.totalWriteLatency.average",
# "disk.write.average",
# "mem.active.average",
# "mem.latency.average",
# "mem.state.latest",
# "mem.swapin.average",
# "mem.swapinRate.average",
# "mem.swapout.average",
# "mem.swapoutRate.average",
# "mem.totalCapacity.average",
# "mem.usage.average",
# "mem.vmmemctl.average",
# "net.bytesRx.average",
# "net.bytesTx.average",
# "net.droppedRx.summation",
# "net.droppedTx.summation",
# "net.errorsRx.summation",
# "net.errorsTx.summation",
# "net.usage.average",
# "power.power.average",
# "storageAdapter.numberReadAveraged.average",
# "storageAdapter.numberWriteAveraged.average",
# "storageAdapter.read.average",
# "storageAdapter.write.average",
# "sys.uptime.latest",
# ]
# # host_metric_exclude = [] ## Nothing excluded by default
# # host_instances = true ## true by default
#
# ## Clusters
# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
# # cluster_metric_exclude = [] ## Nothing excluded by default
# # cluster_instances = true ## true by default
#
# ## Datastores
# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
# # datastore_metric_exclude = [] ## Nothing excluded by default
# # datastore_instances = false ## false by default for Datastores only
#
# ## Datacenters
# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
# # datacenter_instances = false ## false by default for Datastores only
#
# ## Plugin Settings
# ## separator character to use for measurement and field names (default: "_")
# # separator = "_"
#
# ## number of objects to retreive per query for realtime resources (vms and hosts)
# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# # max_query_objects = 256
#
# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
# # max_query_metrics = 256
#
# ## number of go routines to use for collection and discovery of objects and metrics
# # collect_concurrency = 1
# # discover_concurrency = 1
#
# ## whether or not to force discovery of new objects on initial gather call before collecting metrics
# ## when true for large environments this may cause errors for time elapsed while collecting metrics
# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
# # force_discover_on_init = false
#
# ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
# # object_discovery_interval = "300s"
#
# ## timeout applies to any of the api request made to vcenter
# # timeout = "20s"
#
# ## Optional SSL Config
# # ssl_ca = "/path/to/cafile"
# # ssl_cert = "/path/to/certfile"
# # ssl_key = "/path/to/keyfile"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # A Webhooks Event collector
# [[inputs.webhooks]]
# ## Address and port to host Webhook listener on