Update telegraf.conf

This commit is contained in:
Daniel Nelson 2018-11-05 14:51:44 -08:00
parent 574fa5a6be
commit cb84993b7e
No known key found for this signature in database
GPG Key ID: CAAD59C9444F6155
1 changed files with 149 additions and 47 deletions

View File

@ -119,7 +119,7 @@
# user_agent = "telegraf"
## UDP payload size is the maximum packet size to send.
# udp_payload = 512
# udp_payload = "512B"
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
@ -715,10 +715,11 @@
# # method = "measurement"
# #
# ## Use the value of a tag for all writes, if the tag is not set the empty
# ## string will be used:
# ## default option will be used. When no default, defaults to "telegraf"
# # [outputs.kinesis.partition]
# # method = "tag"
# # key = "host"
# # default = "mykey"
#
#
# ## Data format to output.
@ -1285,7 +1286,7 @@
# drop_original = false
# # Count the occurance of values in fields.
# # Count the occurrence of values in fields.
# [[aggregators.valuecounter]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
@ -2005,10 +2006,11 @@
# ## Only count regular files. Defaults to true.
# regular_only = true
#
# ## Only count files that are at least this size in bytes. If size is
# ## Only count files that are at least this size. If size is
# ## a negative number, only count files that are smaller than the
# ## absolute value of size. Defaults to 0.
# size = 0
# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
# ## Without quotes and units, interpreted as size in bytes.
# size = "0B"
#
# ## Only count files that have not been touched for at least this
# ## duration. If mtime is negative, only count files that have been
@ -2369,6 +2371,11 @@
# chains = [ "INPUT" ]
# # Collect virtual and real server stats from Linux IPVS
# [[inputs.ipvs]]
# # no configuration
# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
@ -2848,10 +2855,38 @@
# response_timeout = "5s"
# # Read Nginx Plus Api documentation
# [[inputs.nginx_plus_api]]
# ## An array of API URI to gather stats.
# urls = ["http://localhost/api"]
#
# # Nginx API version, default: 3
# # api_version = 3
#
# # HTTP response timeout (default: 5s)
# response_timeout = "5s"
# # Read Nginx virtual host traffic status module information (nginx-module-vts)
# [[inputs.nginx_vts]]
# ## An array of ngx_http_status_module or status URI to gather stats.
# urls = ["http://localhost/status"]
#
# ## HTTP response timeout (default: 5s)
# response_timeout = "5s"
# # Read NSQ topic and channel statistics.
# [[inputs.nsq]]
# ## An array of NSQD HTTP API endpoints
# endpoints = ["http://localhost:4151"]
# endpoints = ["http://localhost:4151"]
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Collect kernel snmp counters and network interface statistics
@ -3045,28 +3080,6 @@
# # pid_finder = "pgrep"
# # Read metrics from one or many prometheus clients
# [[inputs.prometheus]]
# ## An array of urls to scrape metrics from.
# urls = ["http://localhost:9100/metrics"]
#
# ## An array of Kubernetes services to scrape metrics from.
# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
# ## Specify timeout duration for slower prometheus clients (default is 3s)
# # response_timeout = "3s"
#
# ## Optional TLS Config
# # tls_ca = /path/to/cafile
# # tls_cert = /path/to/certfile
# # tls_key = /path/to/keyfile
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Reads last_run_summary.yaml file and converts to measurments
# [[inputs.puppetagent]]
# ## Location of puppet last run summary file
@ -3614,6 +3627,13 @@
# # instance_name = instanceName
# # Monitor wifi signal strength and quality
# [[inputs.wireless]]
# ## Sets 'proc' directory path
# ## If not specified, then default is /proc
# # host_proc = "/proc"
# # Reads metrics from a SSL certificate
# [[inputs.x509_cert]]
# ## List certificate sources
@ -3716,6 +3736,16 @@
# ## Maximum number of messages server should give to the worker.
# # prefetch_count = 50
#
# ## Maximum messages to read from the broker that have not been written by an
# ## output. For best throughput set based on the number of metrics within
# ## each message and the size of the output's metric_batch_size.
# ##
# ## For example, if each message from the queue contains 10 metrics and the
# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
# ## full batch is collected and the write is triggered immediately without
# ## waiting until the next flush_interval.
# # max_undelivered_messages = 1000
#
# ## Auth method. PLAIN and EXTERNAL are supported
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
# ## described here: https://www.rabbitmq.com/plugins.html
@ -3768,12 +3798,12 @@
# write_timeout = "10s"
#
# ## Maximum allowed http request body size in bytes.
# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
# max_body_size = 0
# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
# max_body_size = "500MiB"
#
# ## Maximum line size allowed to be sent in bytes.
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0
# max_line_size = "64KiB"
#
# ## Set one or more allowed client CA certificate file names to
# ## enable mutually authenticated TLS connections
@ -3806,8 +3836,8 @@
# # write_timeout = "10s"
#
# ## Maximum allowed http request body size in bytes.
# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
# # max_body_size = 0
# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
# # max_body_size = "500MB"
#
# ## Set one or more allowed client CA certificate file names to
# ## enable mutually authenticated TLS connections
@ -3840,12 +3870,12 @@
# write_timeout = "10s"
#
# ## Maximum allowed http request body size in bytes.
# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
# max_body_size = 0
# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
# max_body_size = "500MiB"
#
# ## Maximum line size allowed to be sent in bytes.
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0
# max_line_size = "64KiB"
#
# ## Set one or more allowed client CA certificate file names to
# ## enable mutually authenticated TLS connections
@ -3939,16 +3969,25 @@
# consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
# ## larger messages are dropped
# max_message_len = 1000000
#
# ## Maximum messages to read from the broker that have not been written by an
# ## output. For best throughput set based on the number of metrics within
# ## each message and the size of the output's metric_batch_size.
# ##
# ## For example, if each message from the queue contains 10 metrics and the
# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
# ## full batch is collected and the write is triggered immediately without
# ## waiting until the next flush_interval.
# # max_undelivered_messages = 1000
#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
#
# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
# ## larger messages are dropped
# max_message_len = 1000000
# # Read metrics from Kafka topic(s)
@ -4043,6 +4082,16 @@
# ## Connection timeout for initial connection in seconds
# connection_timeout = "30s"
#
# ## Maximum messages to read from the broker that have not been written by an
# ## output. For best throughput set based on the number of metrics within
# ## each message and the size of the output's metric_batch_size.
# ##
# ## For example, if each message from the queue contains 10 metrics and the
# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
# ## full batch is collected and the write is triggered immediately without
# ## waiting until the next flush_interval.
# # max_undelivered_messages = 1000
#
# ## Topics to subscribe to
# topics = [
# "telegraf/host01/cpu",
@ -4078,19 +4127,29 @@
# # Read metrics from NATS subject(s)
# [[inputs.nats_consumer]]
# ## urls of NATS servers
# # servers = ["nats://localhost:4222"]
# servers = ["nats://localhost:4222"]
# ## Use Transport Layer Security
# # secure = false
# secure = false
# ## subject(s) to consume
# # subjects = ["telegraf"]
# subjects = ["telegraf"]
# ## name a queue group
# # queue_group = "telegraf_consumers"
# queue_group = "telegraf_consumers"
#
# ## Sets the limits for pending msgs and bytes for each subscription
# ## These shouldn't need to be adjusted except in very high throughput scenarios
# # pending_message_limit = 65536
# # pending_bytes_limit = 67108864
#
# ## Maximum messages to read from the broker that have not been written by an
# ## output. For best throughput set based on the number of metrics within
# ## each message and the size of the output's metric_batch_size.
# ##
# ## For example, if each message from the queue contains 10 metrics and the
# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
# ## full batch is collected and the write is triggered immediately without
# ## waiting until the next flush_interval.
# # max_undelivered_messages = 1000
#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@ -4110,6 +4169,16 @@
# channel = "consumer"
# max_in_flight = 100
#
# ## Maximum messages to read from the broker that have not been written by an
# ## output. For best throughput set based on the number of metrics within
# ## each message and the size of the output's metric_batch_size.
# ##
# ## For example, if each message from the queue contains 10 metrics and the
# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
# ## full batch is collected and the write is triggered immediately without
# ## waiting until the next flush_interval.
# # max_undelivered_messages = 1000
#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@ -4229,6 +4298,39 @@
# tagvalue="postgresql.stats"
# # Read metrics from one or many prometheus clients
# [[inputs.prometheus]]
# ## An array of urls to scrape metrics from.
# urls = ["http://localhost:9100/metrics"]
#
# ## An array of Kubernetes services to scrape metrics from.
# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
#
# ## Kubernetes config file to create client from.
# # kube_config = "/path/to/kubernetes.config"
#
# ## Scrape Kubernetes pods for the following prometheus annotations:
# ## - prometheus.io/scrape: Enable scraping for this pod
# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
# ## set this to 'https' & most likely set the tls config.
# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
# ## - prometheus.io/port: If port is not 9102 use this annotation
# # monitor_kubernetes_pods = true
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
# ## Specify timeout duration for slower prometheus clients (default is 3s)
# # response_timeout = "3s"
#
# ## Optional TLS Config
# # tls_ca = /path/to/cafile
# # tls_cert = /path/to/certfile
# # tls_key = /path/to/keyfile
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Generic socket listener capable of handling multiple socket types.
# [[inputs.socket_listener]]
# ## URL to listen on
@ -4260,11 +4362,11 @@
# ## Enables client authentication if set.
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
# ## Maximum socket buffer size in bytes.
# ## Maximum socket buffer size (in bytes when no unit specified).
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
# ## Defaults to the OS default.
# # read_buffer_size = 65535
# # read_buffer_size = "64KiB"
#
# ## Period between keep alive probes.
# ## Only applies to TCP sockets.