Update telegraf.conf

This commit is contained in:
Daniel Nelson 2019-02-01 12:27:02 -08:00
parent 35381707db
commit a98483cc11
No known key found for this signature in database
GPG Key ID: CAAD59C9444F6155
1 changed files with 305 additions and 26 deletions

View File

@ -93,6 +93,7 @@
# urls = ["http://127.0.0.1:8086"]
## The target database for metrics; will be created as needed.
## For UDP url endpoint database needs to be configured on server side.
# database = "telegraf"
## If true, no CREATE DATABASE queries will be sent. Set to true when using
@ -293,6 +294,54 @@
# # resource_id = ""
# # Publish Telegraf metrics to a Google Cloud PubSub topic
# [[outputs.cloud_pubsub]]
# ## Required. Name of Google Cloud Platform (GCP) Project that owns
# ## the given PubSub subscription.
# project = "my-project"
#
# ## Required. Name of PubSub subscription to ingest metrics from.
# subscription = "my-subscription"
#
# ## Required. Data format to consume.
# ## Each data format has its own unique set of configuration options.
# ## Read more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
#
# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
# ## Application Default Credentials, which is preferred.
# # credentials_file = "path/to/my/creds.json"
#
# ## Optional. If true, will send all metrics per write in one PubSub message.
# # send_batched = true
#
# ## The following publish_* parameters specifically configures batching
# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
#
# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
# ## when it has this many PubSub messages. If send_batched is true,
# ## this is ignored and treated as if it were 1.
# # publish_count_threshold = 1000
#
# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
# ## when it has this many PubSub messages. If send_batched is true,
# ## this is ignored and treated as if it were 1
# # publish_byte_threshold = 1000000
#
# ## Optional. Specifically configures requests made to the PubSub API.
# # publish_num_go_routines = 2
#
# ## Optional. Specifies a timeout for requests to the PubSub API.
# # publish_timeout = "30s"
#
# ## Optional. PubSub attributes to add to metrics.
# # [[inputs.pubsub.attributes]]
# # my_attr = "tag_value"
# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
# ## Amazon REGION
@ -890,6 +939,9 @@
# ## If set, enable TLS with the given certificate.
# # tls_cert = "/etc/ssl/telegraf.crt"
# # tls_key = "/etc/ssl/telegraf.key"
#
# ## Export metric collection time.
# # export_timestamp = false
# # Configuration for the Riemann server to send metrics to
@ -981,38 +1033,44 @@
# # Configuration for Wavefront server to send metrics to
# [[outputs.wavefront]]
# ## DNS name of the wavefront proxy server
# host = "wavefront.example.com"
# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy
# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878
# url = "https://metrics.wavefront.com"
#
# ## Port that the Wavefront proxy server listens on
# port = 2878
# ## Authentication Token for Wavefront. Only required if using Direct Ingestion
# #token = "DUMMY_TOKEN"
#
# ## DNS name of the wavefront proxy server. Do not use if url is specified
# #host = "wavefront.example.com"
#
# ## Port that the Wavefront proxy server listens on. Do not use if url is specified
# #port = 2878
#
# ## prefix for metrics keys
# #prefix = "my.specific.prefix."
#
# ## whether to use "value" for name of simple fields
# ## whether to use "value" for name of simple fields. default is false
# #simple_fields = false
#
# ## character to use between metric and field name. defaults to . (dot)
# ## character to use between metric and field name. default is . (dot)
# #metric_separator = "."
#
# ## Convert metric name paths to use metricSeperator character
# ## When true (default) will convert all _ (underscore) chartacters in final metric name
# ## Convert metric name paths to use metricSeparator character
# ## When true will convert all _ (underscore) characters in final metric name. default is true
# #convert_paths = true
#
# ## Use Regex to sanitize metric and tag names from invalid characters
# ## Regex is more thorough, but significantly slower
# ## Regex is more thorough, but significantly slower. default is false
# #use_regex = false
#
# ## point tags to use as the source name for Wavefront (if none found, host will be used)
# #source_override = ["hostname", "agent_host", "node_host"]
# #source_override = ["hostname", "address", "agent_host", "node_host"]
#
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
# #convert_bool = true
#
# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for
# ## any metrics beginning with "elasticsearch"
# ## deprecated in 1.9; use the enum processor plugin
# #[[outputs.wavefront.string_to_number.elasticsearch]]
# # green = 1.0
# # yellow = 0.5
@ -1178,8 +1236,8 @@
# # field = "read_count"
# # suffix = "_count"
#
# ## Replace substrings within field names
# # [[processors.strings.trim_suffix]]
# ## Replace all non-overlapping instances of old with new
# # [[processors.strings.replace]]
# # measurement = "*"
# # old = ":"
# # new = "_"
@ -1242,12 +1300,14 @@
# # Keep the aggregate basicstats of each metric passing through.
# [[aggregators.basicstats]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
#
# ## Configures which basic stats to push as fields
# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
# # Create aggregate histograms.
@ -1339,6 +1399,8 @@
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
## Note: Most, but not all, udev properties can be accessed this way. Properties
## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
@ -1738,6 +1800,10 @@
# ## Works with CouchDB stats endpoints out of the box
# ## Multiple Hosts from which to read CouchDB stats:
# hosts = ["http://localhost:8086/_stats"]
#
# ## Use HTTP Basic Authentication.
# # basic_username = "telegraf"
# # basic_password = "p@ssw0rd"
# # Input plugin for DC/OS metrics
@ -2151,6 +2217,13 @@
# # username = "username"
# # password = "pa$$word"
#
# ## HTTP entity-body to send with POST/PUT requests.
# # body = ""
#
# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
# ## compress body or "identity" to apply no encoding.
# # content_encoding = "identity"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
@ -2309,9 +2382,17 @@
# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
# [[inputs.interrupts]]
# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is
# ## stored as a field.
# ##
# ## The default is false for backwards compatibility, and will be changed to
# ## true in a future version. It is recommended to set to true on new
# ## deployments.
# # cpu_as_tag = false
#
# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
# # [inputs.interrupts.tagdrop]
# # irq = [ "NET_RX", "TASKLET" ]
# # irq = [ "NET_RX", "TASKLET" ]
# # Read metrics from the bare metal servers via IPMI
@ -2378,6 +2459,50 @@
# # no configuration
# # Read jobs and cluster metrics from Jenkins instances
# [[inputs.jenkins]]
# ## The Jenkins URL
# url = "http://my-jenkins-instance:8080"
# # username = "admin"
# # password = "admin"
#
# ## Set response_timeout
# response_timeout = "5s"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional Max Job Build Age filter
# ## Default 1 hour, ignore builds older than max_build_age
# # max_build_age = "1h"
#
# ## Optional Sub Job Depth filter
# ## Jenkins can have unlimited layer of sub jobs
# ## This config will limit the layers of pulling, default value 0 means
# ## unlimited pulling until no more sub jobs
# # max_subjob_depth = 0
#
# ## Optional Sub Job Per Layer
# ## In workflow-multibranch-plugin, each branch will be created as a sub job.
# ## This config will limit to call only the lasted branches in each layer,
# ## empty will use default value 10
# # max_subjob_per_layer = 10
#
# ## Jobs to exclude from gathering
# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
#
# ## Nodes to exclude from gathering
# # node_exclude = [ "node1", "node2" ]
#
# ## Worker pool for jenkins plugin only
# ## Empty this field will use default value 5
# # max_connections = 5
# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
@ -2551,10 +2676,12 @@
# # Read metrics from the kubernetes kubelet api
# [[inputs.kubernetes]]
# ## URL for the kubelet
# url = "http://1.1.1.1:10255"
# url = "http://127.0.0.1:10255"
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
# ## Use bearer token for authorization. ('bearer_token' takes priority)
# # bearer_token = "/path/to/bearer/token"
# ## OR
# # bearer_token_string = "abc_123"
#
# ## Set response_timeout (default 5 seconds)
# # response_timeout = "5s"
@ -2693,6 +2820,31 @@
# # insecure_skip_verify = false
# # Aggregates the contents of multiple files into a single point
# [[inputs.multifile]]
# ## Base directory where telegraf will look for files.
# ## Omit this option to use absolute paths.
# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0"
#
# ## If true, Telegraf discard all data when a single file can't be read.
# ## Else, Telegraf omits the field generated from this file.
# # fail_early = true
#
# ## Files to parse each interval.
# [[inputs.multifile.file]]
# file = "in_pressure_input"
# dest = "pressure"
# conversion = "float"
# [[inputs.multifile.file]]
# file = "in_temp_input"
# dest = "temperature"
# conversion = "float(3)"
# [[inputs.multifile.file]]
# file = "in_humidityrelative_input"
# dest = "humidityrelative"
# conversion = "float(3)"
# # Read metrics from one or many mysql servers
# [[inputs.mysql]]
# ## specify servers via a url matching:
@ -2785,6 +2937,21 @@
# # response_timeout = "5s"
# # Neptune Apex data collector
# [[inputs.neptune_apex]]
# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.
# ## Measurements will be logged under "apex".
#
# ## The base URL of the local Apex(es). If you specify more than one server, they will
# ## be differentiated by the "source" tag.
# servers = [
# "http://apex.local",
# ]
#
# ## The response_timeout specifies how long to wait for a reply from the Apex.
# #response_timeout = "5s"
# # Read metrics about network interface usage
# [[inputs.net]]
# ## By default, telegraf gathers stats from any up interface (excluding loopback)
@ -2869,6 +3036,36 @@
# response_timeout = "5s"
# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
# [[inputs.nginx_upstream_check]]
# ## An URL where Nginx Upstream check module is enabled
# ## It should be set to return a JSON formatted response
# url = "http://127.0.0.1/status?format=json"
#
# ## HTTP method
# # method = "GET"
#
# ## Optional HTTP headers
# # headers = {"X-Special-Header" = "Special-Value"}
#
# ## Override HTTP "Host" header
# # host_header = "check.example.com"
#
# ## Timeout for HTTP requests
# timeout = "5s"
#
# ## Optional HTTP Basic Auth credentials
# # username = "username"
# # password = "pa$$word"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Read Nginx virtual host traffic status module information (nginx-module-vts)
# [[inputs.nginx_vts]]
# ## An array of ngx_http_status_module or status URI to gather stats.
@ -3604,7 +3801,7 @@
#
# ## When set to true, thread metrics are tagged with the thread id.
# ##
# ## The default is false for backwards compatibility, and will be change to
# ## The default is false for backwards compatibility, and will be changed to
# ## true in a future version. It is recommended to set to true on new
# ## deployments.
# thread_as_tag = false
@ -3627,6 +3824,9 @@
# ## Optional name for the varnish instance (or working directory) to query
# ## Usually appened after -n in varnish cli
# # instance_name = instanceName
#
# ## Timeout for varnishstat command
# # timeout = "1s"
# # Monitor wifi signal strength and quality
@ -3789,6 +3989,71 @@
# ]
# # Read metrics from Google PubSub
# [[inputs.cloud_pubsub]]
# ## Required. Name of Google Cloud Platform (GCP) Project that owns
# ## the given PubSub subscription.
# project = "my-project"
#
# ## Required. Name of PubSub subscription to ingest metrics from.
# subscription = "my-subscription"
#
# ## Required. Data format to consume.
# ## Each data format has its own unique set of configuration options.
# ## Read more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
#
# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
# ## Application Default Credentials, which is preferred.
# # credentials_file = "path/to/my/creds.json"
#
# ## Optional. Maximum byte length of a message to consume.
# ## Larger messages are dropped with an error. If less than 0 or unspecified,
# ## treated as no limit.
# # max_message_len = 1000000
#
# ## Optional. Maximum messages to read from PubSub that have not been written
# ## to an output. Defaults to 1000.
# ## For best throughput set based on the number of metrics within
# ## each message and the size of the output's metric_batch_size.
# ##
# ## For example, if each message contains 10 metrics and the output
# ## metric_batch_size is 1000, setting this to 100 will ensure that a
# ## full batch is collected and the write is triggered immediately without
# ## waiting until the next flush_interval.
# # max_undelivered_messages = 1000
#
# ## The following are optional Subscription ReceiveSettings in PubSub.
# ## Read more about these values:
# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
#
# ## Optional. Maximum number of seconds for which a PubSub subscription
# ## should auto-extend the PubSub ACK deadline for each message. If less than
# ## 0, auto-extension is disabled.
# # max_extension = 0
#
# ## Optional. Maximum number of unprocessed messages in PubSub
# ## (unacknowledged but not yet expired in PubSub).
# ## A value of 0 is treated as the default PubSub value.
# ## Negative values will be treated as unlimited.
# # max_outstanding_messages = 0
#
# ## Optional. Maximum size in bytes of unprocessed messages in PubSub
# ## (unacknowledged but not yet expired in PubSub).
# ## A value of 0 is treated as the default PubSub value.
# ## Negative values will be treated as unlimited.
# # max_outstanding_bytes = 0
#
# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
# ## to pull messages from PubSub concurrently. This limit applies to each
# ## subscription separately and is treated as the PubSub default if less than
# ## 1. Note this setting does not limit the number of messages that can be
# ## processed concurrently (use "max_outstanding_messages" instead).
# # max_receiver_go_routines = 0
# # Influx HTTP write listener
# [[inputs.http_listener]]
# ## Address and port to host HTTP listener on
@ -3946,6 +4211,8 @@
# brokers = ["localhost:9092"]
# ## topic(s) to consume
# topics = ["telegraf"]
# ## Add topic as tag if topic_tag is not empty
# # topic_tag = ""
#
# ## Optional Client id
# # client_id = "Telegraf"
@ -4319,8 +4586,10 @@
# ## - prometheus.io/port: If port is not 9102 use this annotation
# # monitor_kubernetes_pods = true
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
# ## Use bearer token for authorization. ('bearer_token' takes priority)
# # bearer_token = "/path/to/bearer/token"
# ## OR
# # bearer_token_string = "abc_123"
#
# ## Specify timeout duration for slower prometheus clients (default is 3s)
# # response_timeout = "3s"
@ -4440,7 +4709,7 @@
# percentile_limit = 1000
# # Accepts syslog messages per RFC5425
# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
# [[inputs.syslog]]
# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
# ## Protocol, address and port to host the syslog receiver.
@ -4468,6 +4737,16 @@
# ## 0 means unlimited.
# # read_timeout = "5s"
#
# ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
# ## or the non-transparent framing technique (RFC6587#section-3.4.2).
# ## Must be one of "octect-counting", "non-transparent".
# # framing = "octet-counting"
#
# ## The trailer to be expected in case of non-trasparent framing (default = "LF").
# ## Must be one of "LF", or "NUL".
# # trailer = "LF"
#
# ## Whether to parse in best effort mode or not (default = false).
# ## By default best effort parsing is off.
# # best_effort = false
@ -4626,7 +4905,7 @@
# ## Clusters
# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
# # cluster_metric_exclude = [] ## Nothing excluded by default
# # cluster_instances = true ## true by default
# # cluster_instances = false ## false by default
#
# ## Datastores
# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
@ -4663,7 +4942,7 @@
# # object_discovery_interval = "300s"
#
# ## timeout applies to any of the api request made to vcenter
# # timeout = "20s"
# # timeout = "60s"
#
# ## Optional SSL Config
# # ssl_ca = "/path/to/cafile"