Update README & etc/telegraf.conf

This commit is contained in:
Cameron Sparr 2016-05-31 11:02:10 +01:00
parent eeeab5192b
commit 069764f05e
2 changed files with 152 additions and 124 deletions

View File

@ -233,6 +233,7 @@ want to add support for another service or third-party API.
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog) * [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file) * [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite) * [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental) * [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka) * [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato) * [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)

View File

@ -106,10 +106,10 @@
# [[outputs.amon]] # [[outputs.amon]]
# ## Amon Server Key # ## Amon Server Key
# server_key = "my-server-key" # required. # server_key = "my-server-key" # required.
# #
# ## Amon Instance URL # ## Amon Instance URL
# amon_instance = "https://youramoninstance" # required # amon_instance = "https://youramoninstance" # required
# #
# ## Connection timeout. # ## Connection timeout.
# # timeout = "5s" # # timeout = "5s"
@ -125,21 +125,21 @@
# ## Telegraf tag to use as a routing key # ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key # ## ie, if this tag exists, it's value will be used as the routing key
# routing_tag = "host" # routing_tag = "host"
# #
# ## InfluxDB retention policy # ## InfluxDB retention policy
# # retention_policy = "default" # # retention_policy = "default"
# ## InfluxDB database # ## InfluxDB database
# # database = "telegraf" # # database = "telegraf"
# ## InfluxDB precision # ## InfluxDB precision
# # precision = "s" # # precision = "s"
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem" # # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification # ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
# #
# ## Data format to output. # ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -151,16 +151,22 @@
# [[outputs.cloudwatch]] # [[outputs.cloudwatch]]
# ## Amazon REGION # ## Amazon REGION
# region = 'us-east-1' # region = 'us-east-1'
# #
# ## Amazon Credentials # ## Amazon Credentials
# ## Credentials are loaded in the following order # ## Credentials are loaded in the following order
# ## 1) explicit credentials from 'access_key' and 'secret_key' # ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) environment variables # ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared credentials file # ## 3) shared profile from 'profile'
# ## 4) EC2 Instance Profile # ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
# #access_key = "" # #access_key = ""
# #secret_key = "" # #secret_key = ""
# # #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Namespace for the CloudWatch MetricDatums # ## Namespace for the CloudWatch MetricDatums
# namespace = 'InfluxData/Telegraf' # namespace = 'InfluxData/Telegraf'
@ -169,7 +175,7 @@
# [[outputs.datadog]] # [[outputs.datadog]]
# ## Datadog API key # ## Datadog API key
# apikey = "my-secret-key" # required. # apikey = "my-secret-key" # required.
# #
# ## Connection timeout. # ## Connection timeout.
# # timeout = "5s" # # timeout = "5s"
@ -178,7 +184,7 @@
# [[outputs.file]] # [[outputs.file]]
# ## Files to write to, "stdout" is a specially handled file. # ## Files to write to, "stdout" is a specially handled file.
# files = ["stdout", "/tmp/metrics.out"] # files = ["stdout", "/tmp/metrics.out"]
# #
# ## Data format to output. # ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -199,6 +205,12 @@
# timeout = 2 # timeout = 2
# # Send telegraf metrics to graylog(s)
# [[outputs.graylog]]
# ## Udp endpoint for your graylog instance.
# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
# # Configuration for sending metrics to an Instrumental project # # Configuration for sending metrics to an Instrumental project
# [[outputs.instrumental]] # [[outputs.instrumental]]
# ## Project API Token (required) # ## Project API Token (required)
@ -223,14 +235,14 @@
# ## Telegraf tag to use as a routing key # ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key # ## ie, if this tag exists, it's value will be used as the routing key
# routing_tag = "host" # routing_tag = "host"
# #
# ## CompressionCodec represents the various compression codecs recognized by # ## CompressionCodec represents the various compression codecs recognized by
# ## Kafka in messages. # ## Kafka in messages.
# ## 0 : No compression # ## 0 : No compression
# ## 1 : Gzip compression # ## 1 : Gzip compression
# ## 2 : Snappy compression # ## 2 : Snappy compression
# compression_codec = 0 # compression_codec = 0
# #
# ## RequiredAcks is used in Produce Requests to tell the broker how many # ## RequiredAcks is used in Produce Requests to tell the broker how many
# ## replica acknowledgements it must see before responding # ## replica acknowledgements it must see before responding
# ## 0 : the producer never waits for an acknowledgement from the broker. # ## 0 : the producer never waits for an acknowledgement from the broker.
@ -246,17 +258,17 @@
# ## guarantee that no messages will be lost as long as at least one in # ## guarantee that no messages will be lost as long as at least one in
# ## sync replica remains. # ## sync replica remains.
# required_acks = -1 # required_acks = -1
# #
# ## The total number of times to retry sending a message # ## The total number of times to retry sending a message
# max_retry = 3 # max_retry = 3
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem" # # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification # ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
# #
# ## Data format to output. # ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -268,16 +280,22 @@
# [[outputs.kinesis]] # [[outputs.kinesis]]
# ## Amazon REGION of kinesis endpoint. # ## Amazon REGION of kinesis endpoint.
# region = "ap-southeast-2" # region = "ap-southeast-2"
# #
# ## Amazon Credentials # ## Amazon Credentials
# ## Credentials are loaded in the following order # ## Credentials are loaded in the following order
# ## 1) explicit credentials from 'access_key' and 'secret_key' # ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) environment variables # ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared credentials file # ## 3) shared profile from 'profile'
# ## 4) EC2 Instance Profile # ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
# #access_key = "" # #access_key = ""
# #secret_key = "" # #secret_key = ""
# # #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Kinesis StreamName must exist prior to starting telegraf. # ## Kinesis StreamName must exist prior to starting telegraf.
# streamname = "StreamName" # streamname = "StreamName"
# ## PartitionKey as used for sharding data. # ## PartitionKey as used for sharding data.
@ -312,23 +330,23 @@
# # Configuration for MQTT server to send metrics to # # Configuration for MQTT server to send metrics to
# [[outputs.mqtt]] # [[outputs.mqtt]]
# servers = ["localhost:1883"] # required. # servers = ["localhost:1883"] # required.
# #
# ## MQTT outputs send metrics to this topic format # ## MQTT outputs send metrics to this topic format
# ## "<topic_prefix>/<hostname>/<pluginname>/" # ## "<topic_prefix>/<hostname>/<pluginname>/"
# ## ex: prefix/web01.example.com/mem # ## ex: prefix/web01.example.com/mem
# topic_prefix = "telegraf" # topic_prefix = "telegraf"
# #
# ## username and password to connect MQTT server. # ## username and password to connect MQTT server.
# # username = "telegraf" # # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics" # # password = "metricsmetricsmetricsmetrics"
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem" # # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification # ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
# #
# ## Data format to output. # ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -342,7 +360,7 @@
# server = "localhost:4150" # server = "localhost:4150"
# ## NSQ topic for producer messages # ## NSQ topic for producer messages
# topic = "telegraf" # topic = "telegraf"
# #
# ## Data format to output. # ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -354,14 +372,14 @@
# [[outputs.opentsdb]] # [[outputs.opentsdb]]
# ## prefix for metrics keys # ## prefix for metrics keys
# prefix = "my.specific.prefix." # prefix = "my.specific.prefix."
# #
# ## Telnet Mode ## # ## Telnet Mode ##
# ## DNS name of the OpenTSDB server in telnet mode # ## DNS name of the OpenTSDB server in telnet mode
# host = "opentsdb.example.com" # host = "opentsdb.example.com"
# #
# ## Port of the OpenTSDB server in telnet mode # ## Port of the OpenTSDB server in telnet mode
# port = 4242 # port = 4242
# #
# ## Debug true - Prints OpenTSDB communication # ## Debug true - Prints OpenTSDB communication
# debug = false # debug = false
@ -422,9 +440,6 @@
[[inputs.kernel]] [[inputs.kernel]]
# no configuration # no configuration
# # Get kernel statistics from /proc/vmstat
# [[inputs.kernel_vmstat]]
# # no configuration
# Read metrics about memory usage # Read metrics about memory usage
[[inputs.mem]] [[inputs.mem]]
@ -466,7 +481,7 @@
# ## Bcache sets path # ## Bcache sets path
# ## If not specified, then default is: # ## If not specified, then default is:
# bcachePath = "/sys/fs/bcache" # bcachePath = "/sys/fs/bcache"
# #
# ## By default, telegraf gather stats for all bcache devices # ## By default, telegraf gather stats for all bcache devices
# ## Setting devices will restrict the stats to the specified # ## Setting devices will restrict the stats to the specified
# ## bcache devices. # ## bcache devices.
@ -494,17 +509,17 @@
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]] # [[inputs.ceph]]
# ## All configuration values are optional, defaults are shown below # ## All configuration values are optional, defaults are shown below
# #
# ## location of ceph binary # ## location of ceph binary
# ceph_binary = "/usr/bin/ceph" # ceph_binary = "/usr/bin/ceph"
# #
# ## directory in which to look for socket files # ## directory in which to look for socket files
# socket_dir = "/var/run/ceph" # socket_dir = "/var/run/ceph"
# #
# ## prefix of MON and OSD socket files, used to determine socket type # ## prefix of MON and OSD socket files, used to determine socket type
# mon_prefix = "ceph-mon" # mon_prefix = "ceph-mon"
# osd_prefix = "ceph-osd" # osd_prefix = "ceph-osd"
# #
# ## suffix used to identify socket files # ## suffix used to identify socket files
# socket_suffix = "asok" # socket_suffix = "asok"
@ -513,29 +528,39 @@
# [[inputs.cloudwatch]] # [[inputs.cloudwatch]]
# ## Amazon Region # ## Amazon Region
# region = 'us-east-1' # region = 'us-east-1'
# #
# ## Amazon Credentials # ## Amazon Credentials
# ## Credentials are loaded in the following order # ## Credentials are loaded in the following order
# ## 1) explicit credentials from 'access_key' and 'secret_key' # ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) environment variables # ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared credentials file # ## 3) shared profile from 'profile'
# ## 4) EC2 Instance Profile # ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
# #access_key = "" # #access_key = ""
# #secret_key = "" # #secret_key = ""
# # #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
# period = '1m' # period = '1m'
# #
# ## Collection Delay (required - must account for metrics availability via CloudWatch API) # ## Collection Delay (required - must account for metrics availability via CloudWatch API)
# delay = '1m' # delay = '1m'
# #
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
# ## gaps or overlap in pulled data # ## gaps or overlap in pulled data
# interval = '1m' # interval = '1m'
# #
# ## Configure the TTL for the internal cache of metrics.
# ## Defaults to 1 hr if not specified
# #cache_ttl = '10m'
#
# ## Metric Statistic Namespace (required) # ## Metric Statistic Namespace (required)
# namespace = 'AWS/ELB' # namespace = 'AWS/ELB'
# #
# ## Metrics to Pull (optional) # ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided # ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h # ## Refreshes Namespace available metrics every 1h
@ -582,17 +607,17 @@
# [[inputs.dns_query]] # [[inputs.dns_query]]
# ## servers to query # ## servers to query
# servers = ["8.8.8.8"] # required # servers = ["8.8.8.8"] # required
# #
# ## Domains or subdomains to query. "."(root) is default # ## Domains or subdomains to query. "."(root) is default
# domains = ["."] # optional # domains = ["."] # optional
# #
# ## Query record type. Default is "A" # ## Query record type. Default is "A"
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A" # optional # record_type = "A" # optional
# #
# ## Dns server port. 53 is default # ## Dns server port. 53 is default
# port = 53 # optional # port = 53 # optional
# #
# ## Query timeout in seconds. Default is 2 seconds # ## Query timeout in seconds. Default is 2 seconds
# timeout = 2 # optional # timeout = 2 # optional
@ -628,11 +653,11 @@
# [[inputs.elasticsearch]] # [[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers # ## specify a list of one or more Elasticsearch servers
# servers = ["http://localhost:9200"] # servers = ["http://localhost:9200"]
# #
# ## set local to false when you want to read the indices stats from all nodes # ## set local to false when you want to read the indices stats from all nodes
# ## within the cluster # ## within the cluster
# local = true # local = true
# #
# ## set cluster_health to true when you want to also obtain cluster level stats # ## set cluster_health to true when you want to also obtain cluster level stats
# cluster_health = false # cluster_health = false
@ -640,14 +665,18 @@
# # Read metrics from one or more commands that can output to stdout # # Read metrics from one or more commands that can output to stdout
# [[inputs.exec]] # [[inputs.exec]]
# ## Commands array # ## Commands array
# commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] # commands = [
# # "/tmp/test.sh",
# "/usr/bin/mycollector --foo=bar",
# "/tmp/collect_*.sh"
# ]
#
# ## Timeout for each command to complete. # ## Timeout for each command to complete.
# timeout = "5s" # timeout = "5s"
# #
# ## measurement name suffix (for separating different commands) # ## measurement name suffix (for separating different commands)
# name_suffix = "_mycollector" # name_suffix = "_mycollector"
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -675,7 +704,7 @@
# [[inputs.haproxy]] # [[inputs.haproxy]]
# ## An array of address to gather stats about. Specify an ip on hostname # ## An array of address to gather stats about. Specify an ip on hostname
# ## with optional port. ie localhost, 10.10.3.33:1936, etc. # ## with optional port. ie localhost, 10.10.3.33:1936, etc.
# #
# ## If no servers are specified, then default to 127.0.0.1:1936 # ## If no servers are specified, then default to 127.0.0.1:1936
# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] # servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# ## Or you can also use local socket # ## Or you can also use local socket
@ -699,41 +728,48 @@
# # body = ''' # # body = '''
# # {'fake':'data'} # # {'fake':'data'}
# # ''' # # '''
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read flattened metrics from one or more JSON HTTP endpoints # # Read flattened metrics from one or more JSON HTTP endpoints
# [[inputs.httpjson]] # [[inputs.httpjson]]
# ## NOTE This plugin only reads numerical measurements, strings and booleans # ## NOTE This plugin only reads numerical measurements, strings and booleans
# ## will be ignored. # ## will be ignored.
# #
# ## a name for the service being polled # ## a name for the service being polled
# name = "webserver_stats" # name = "webserver_stats"
# #
# ## URL of each server in the service's cluster # ## URL of each server in the service's cluster
# servers = [ # servers = [
# "http://localhost:9999/stats/", # "http://localhost:9999/stats/",
# "http://localhost:9998/stats/", # "http://localhost:9998/stats/",
# ] # ]
# #
# ## HTTP method to use: GET or POST (case-sensitive) # ## HTTP method to use: GET or POST (case-sensitive)
# method = "GET" # method = "GET"
# #
# ## List of tag names to extract from top-level of JSON server response # ## List of tag names to extract from top-level of JSON server response
# # tag_keys = [ # # tag_keys = [
# # "my_tag_1", # # "my_tag_1",
# # "my_tag_2" # # "my_tag_2"
# # ] # # ]
# #
# ## HTTP parameters (all values must be strings) # ## HTTP parameters (all values must be strings)
# [inputs.httpjson.parameters] # [inputs.httpjson.parameters]
# event_type = "cpu_spike" # event_type = "cpu_spike"
# threshold = "0.75" # threshold = "0.75"
# #
# ## HTTP Header parameters (all values must be strings) # ## HTTP Header parameters (all values must be strings)
# # [inputs.httpjson.headers] # # [inputs.httpjson.headers]
# # X-Auth-Token = "my-xauth-token" # # X-Auth-Token = "my-xauth-token"
# # apiVersion = "v1" # # apiVersion = "v1"
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_cert = "/etc/telegraf/cert.pem"
@ -747,7 +783,7 @@
# ## Works with InfluxDB debug endpoints out of the box, # ## Works with InfluxDB debug endpoints out of the box,
# ## but other services can use this format too. # ## but other services can use this format too.
# ## See the influxdb plugin's README for more details. # ## See the influxdb plugin's README for more details.
# #
# ## Multiple URLs from which to read InfluxDB-formatted JSON # ## Multiple URLs from which to read InfluxDB-formatted JSON
# urls = [ # urls = [
# "http://localhost:8086/debug/vars" # "http://localhost:8086/debug/vars"
@ -768,7 +804,7 @@
# [[inputs.jolokia]] # [[inputs.jolokia]]
# ## This is the context root used to compose the jolokia url # ## This is the context root used to compose the jolokia url
# context = "/jolokia" # context = "/jolokia"
# #
# ## This specifies the mode used # ## This specifies the mode used
# # mode = "proxy" # # mode = "proxy"
# # # #
@ -778,8 +814,8 @@
# # [inputs.jolokia.proxy] # # [inputs.jolokia.proxy]
# # host = "127.0.0.1" # # host = "127.0.0.1"
# # port = "8080" # # port = "8080"
# #
# #
# ## List of servers exposing jolokia read service # ## List of servers exposing jolokia read service
# [[inputs.jolokia.servers]] # [[inputs.jolokia.servers]]
# name = "as-server-01" # name = "as-server-01"
@ -787,7 +823,7 @@
# port = "8080" # port = "8080"
# # username = "myuser" # # username = "myuser"
# # password = "mypassword" # # password = "mypassword"
# #
# ## List of metrics collected on above servers # ## List of metrics collected on above servers
# ## Each metric consists in a name, a jmx path and either # ## Each metric consists in a name, a jmx path and either
# ## a pass or drop slice attribute. # ## a pass or drop slice attribute.
@ -796,13 +832,13 @@
# name = "heap_memory_usage" # name = "heap_memory_usage"
# mbean = "java.lang:type=Memory" # mbean = "java.lang:type=Memory"
# attribute = "HeapMemoryUsage" # attribute = "HeapMemoryUsage"
# #
# ## This collect thread counts metrics. # ## This collect thread counts metrics.
# [[inputs.jolokia.metrics]] # [[inputs.jolokia.metrics]]
# name = "thread_count" # name = "thread_count"
# mbean = "java.lang:type=Threading" # mbean = "java.lang:type=Threading"
# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
# #
# ## This collect number of class loaded/unloaded counts metrics. # ## This collect number of class loaded/unloaded counts metrics.
# [[inputs.jolokia.metrics]] # [[inputs.jolokia.metrics]]
# name = "class_count" # name = "class_count"
@ -955,7 +991,7 @@
# address = "github.com:80" # address = "github.com:80"
# ## Set timeout # ## Set timeout
# timeout = "1s" # timeout = "1s"
# #
# ## Optional string sent to the server # ## Optional string sent to the server
# # send = "ssh" # # send = "ssh"
# ## Optional expected string in answer # ## Optional expected string in answer
@ -1069,7 +1105,7 @@
# ## to grab metrics for. # ## to grab metrics for.
# ## # ##
# address = "host=localhost user=postgres sslmode=disable" # address = "host=localhost user=postgres sslmode=disable"
# #
# ## A list of databases to pull metrics about. If not specified, metrics for all # ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered. # ## databases are gathered.
# # databases = ["app_production", "testing"] # # databases = ["app_production", "testing"]
@ -1151,7 +1187,7 @@
# # pattern = "nginx" # # pattern = "nginx"
# ## user as argument for pgrep (ie, pgrep -u <user>) # ## user as argument for pgrep (ie, pgrep -u <user>)
# # user = "nginx" # # user = "nginx"
# #
# ## override for process_name # ## override for process_name
# ## This is optional; default is sourced from /proc/<pid>/status # ## This is optional; default is sourced from /proc/<pid>/status
# # process_name = "bar" # # process_name = "bar"
@ -1165,7 +1201,7 @@
# [[inputs.prometheus]] # [[inputs.prometheus]]
# ## An array of urls to scrape metrics from. # ## An array of urls to scrape metrics from.
# urls = ["http://localhost:9100/metrics"] # urls = ["http://localhost:9100/metrics"]
# #
# ## Use SSL but skip chain & host verification # ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
# ## Use bearer token for authorization # ## Use bearer token for authorization
@ -1184,7 +1220,7 @@
# # name = "rmq-server-1" # optional tag # # name = "rmq-server-1" # optional tag
# # username = "guest" # # username = "guest"
# # password = "guest" # # password = "guest"
# #
# ## A list of nodes to pull metrics about. If not specified, metrics for # ## A list of nodes to pull metrics about. If not specified, metrics for
# ## all nodes are gathered. # ## all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"] # # nodes = ["rabbit@node1", "rabbit@node2"]
@ -1248,7 +1284,7 @@
# collect = ["mybulk", "sysservices", "sysdescr"] # collect = ["mybulk", "sysservices", "sysdescr"]
# # Simple list of OIDs to get, in addition to "collect" # # Simple list of OIDs to get, in addition to "collect"
# get_oids = [] # get_oids = []
# #
# [[inputs.snmp.host]] # [[inputs.snmp.host]]
# address = "192.168.2.3:161" # address = "192.168.2.3:161"
# community = "public" # community = "public"
@ -1260,31 +1296,31 @@
# "ifNumber", # "ifNumber",
# ".1.3.6.1.2.1.1.3.0", # ".1.3.6.1.2.1.1.3.0",
# ] # ]
# #
# [[inputs.snmp.get]] # [[inputs.snmp.get]]
# name = "ifnumber" # name = "ifnumber"
# oid = "ifNumber" # oid = "ifNumber"
# #
# [[inputs.snmp.get]] # [[inputs.snmp.get]]
# name = "interface_speed" # name = "interface_speed"
# oid = "ifSpeed" # oid = "ifSpeed"
# instance = "0" # instance = "0"
# #
# [[inputs.snmp.get]] # [[inputs.snmp.get]]
# name = "sysuptime" # name = "sysuptime"
# oid = ".1.3.6.1.2.1.1.3.0" # oid = ".1.3.6.1.2.1.1.3.0"
# unit = "second" # unit = "second"
# #
# [[inputs.snmp.bulk]] # [[inputs.snmp.bulk]]
# name = "mybulk" # name = "mybulk"
# max_repetition = 127 # max_repetition = 127
# oid = ".1.3.6.1.2.1.1" # oid = ".1.3.6.1.2.1.1"
# #
# [[inputs.snmp.bulk]] # [[inputs.snmp.bulk]]
# name = "ifoutoctets" # name = "ifoutoctets"
# max_repetition = 127 # max_repetition = 127
# oid = "ifOutOctets" # oid = "ifOutOctets"
# #
# [[inputs.snmp.host]] # [[inputs.snmp.host]]
# address = "192.168.2.13:161" # address = "192.168.2.13:161"
# #address = "127.0.0.1:161" # #address = "127.0.0.1:161"
@ -1297,19 +1333,19 @@
# [[inputs.snmp.host.table]] # [[inputs.snmp.host.table]]
# name = "iftable3" # name = "iftable3"
# include_instances = ["enp5s0", "eth1"] # include_instances = ["enp5s0", "eth1"]
# #
# # SNMP TABLEs # # SNMP TABLEs
# # table without mapping neither subtables # # table without mapping neither subtables
# [[inputs.snmp.table]] # [[inputs.snmp.table]]
# name = "iftable1" # name = "iftable1"
# oid = ".1.3.6.1.2.1.31.1.1.1" # oid = ".1.3.6.1.2.1.31.1.1.1"
# #
# # table without mapping but with subtables # # table without mapping but with subtables
# [[inputs.snmp.table]] # [[inputs.snmp.table]]
# name = "iftable2" # name = "iftable2"
# oid = ".1.3.6.1.2.1.31.1.1.1" # oid = ".1.3.6.1.2.1.31.1.1.1"
# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] # sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
# #
# # table with mapping but without subtables # # table with mapping but without subtables
# [[inputs.snmp.table]] # [[inputs.snmp.table]]
# name = "iftable3" # name = "iftable3"
@ -1317,7 +1353,7 @@
# # if empty. get all instances # # if empty. get all instances
# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# # if empty, get all subtables # # if empty, get all subtables
# #
# # table with both mapping and subtables # # table with both mapping and subtables
# [[inputs.snmp.table]] # [[inputs.snmp.table]]
# name = "iftable4" # name = "iftable4"
@ -1360,10 +1396,11 @@
# [[inputs.varnish]] # [[inputs.varnish]]
# ## The default location of the varnishstat binary can be overridden with: # ## The default location of the varnishstat binary can be overridden with:
# binary = "/usr/bin/varnishstat" # binary = "/usr/bin/varnishstat"
# #
# ## By default, telegraf gather stats for 3 metric points. # ## By default, telegraf gather stats for 3 metric points.
# ## Setting stats will override the defaults shown below. # ## Setting stats will override the defaults shown below.
# ## stats may also be set to ["all"], which will collect all stats # ## Glob matching can be used, ie, stats = ["MAIN.*"]
# ## stats may also be set to ["*"], which will collect all stats
# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
@ -1372,11 +1409,11 @@
# ## ZFS kstat path # ## ZFS kstat path
# ## If not specified, then default is: # ## If not specified, then default is:
# kstatPath = "/proc/spl/kstat/zfs" # kstatPath = "/proc/spl/kstat/zfs"
# #
# ## By default, telegraf gather all zfs stats # ## By default, telegraf gather all zfs stats
# ## If not specified, then default is: # ## If not specified, then default is:
# kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
# #
# ## By default, don't gather zpool stats # ## By default, don't gather zpool stats
# poolMetrics = false # poolMetrics = false
@ -1385,7 +1422,7 @@
# [[inputs.zookeeper]] # [[inputs.zookeeper]]
# ## An array of address to gather stats about. Specify an ip or hostname # ## An array of address to gather stats about. Specify an ip or hostname
# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. # ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
# #
# ## If no servers are specified, then localhost is used as the host. # ## If no servers are specified, then localhost is used as the host.
# ## If no port is specified, 2181 is used # ## If no port is specified, 2181 is used
# servers = [":2181"] # servers = [":2181"]
@ -1414,7 +1451,7 @@
# consumer_group = "telegraf_metrics_consumers" # consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest") # ## Offset (must be either "oldest" or "newest")
# offset = "oldest" # offset = "oldest"
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -1427,32 +1464,32 @@
# servers = ["localhost:1883"] # servers = ["localhost:1883"]
# ## MQTT QoS, must be 0, 1, or 2 # ## MQTT QoS, must be 0, 1, or 2
# qos = 0 # qos = 0
# #
# ## Topics to subscribe to # ## Topics to subscribe to
# topics = [ # topics = [
# "telegraf/host01/cpu", # "telegraf/host01/cpu",
# "telegraf/+/mem", # "telegraf/+/mem",
# "sensors/#", # "sensors/#",
# ] # ]
# #
# # if true, messages that can't be delivered while the subscriber is offline # # if true, messages that can't be delivered while the subscriber is offline
# # will be delivered when it comes back (such as on service restart). # # will be delivered when it comes back (such as on service restart).
# # NOTE: if true, client_id MUST be set # # NOTE: if true, client_id MUST be set
# persistent_session = false # persistent_session = false
# # If empty, a random client ID will be generated. # # If empty, a random client ID will be generated.
# client_id = "" # client_id = ""
# #
# ## username and password to connect MQTT server. # ## username and password to connect MQTT server.
# # username = "telegraf" # # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics" # # password = "metricsmetricsmetricsmetrics"
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem" # # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification # ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -1470,7 +1507,7 @@
# subjects = ["telegraf"] # subjects = ["telegraf"]
# ## name a queue group # ## name a queue group
# queue_group = "telegraf_consumers" # queue_group = "telegraf_consumers"
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -1498,24 +1535,24 @@
# delete_timings = true # delete_timings = true
# ## Percentiles to calculate for timing & histogram stats # ## Percentiles to calculate for timing & histogram stats
# percentiles = [90] # percentiles = [90]
# #
# ## separator to use between elements of a statsd metric # ## separator to use between elements of a statsd metric
# metric_separator = "_" # metric_separator = "_"
# #
# ## Parses tags in the datadog statsd format # ## Parses tags in the datadog statsd format
# ## http://docs.datadoghq.com/guides/dogstatsd/ # ## http://docs.datadoghq.com/guides/dogstatsd/
# parse_data_dog_tags = false # parse_data_dog_tags = false
# #
# ## Statsd data translation templates, more info can be read here: # ## Statsd data translation templates, more info can be read here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
# # templates = [ # # templates = [
# # "cpu.* measurement*" # # "cpu.* measurement*"
# # ] # # ]
# #
# ## Number of UDP messages allowed to queue up, once filled, # ## Number of UDP messages allowed to queue up, once filled,
# ## the statsd server will start dropping packets # ## the statsd server will start dropping packets
# allowed_pending_messages = 10000 # allowed_pending_messages = 10000
# #
# ## Number of timing/histogram values to track per-measurement in the # ## Number of timing/histogram values to track per-measurement in the
# ## calculation of percentiles. Raising this limit increases the accuracy # ## calculation of percentiles. Raising this limit increases the accuracy
# ## of percentiles but also increases the memory usage and cpu time. # ## of percentiles but also increases the memory usage and cpu time.
@ -1536,7 +1573,7 @@
# files = ["/var/mymetrics.out"] # files = ["/var/mymetrics.out"]
# ## Read file from beginning. # ## Read file from beginning.
# from_beginning = false # from_beginning = false
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -1548,14 +1585,14 @@
# [[inputs.tcp_listener]] # [[inputs.tcp_listener]]
# ## Address and port to host TCP listener on # ## Address and port to host TCP listener on
# service_address = ":8094" # service_address = ":8094"
# #
# ## Number of TCP messages allowed to queue up. Once filled, the # ## Number of TCP messages allowed to queue up. Once filled, the
# ## TCP listener will start dropping packets. # ## TCP listener will start dropping packets.
# allowed_pending_messages = 10000 # allowed_pending_messages = 10000
# #
# ## Maximum number of concurrent TCP connections to allow # ## Maximum number of concurrent TCP connections to allow
# max_tcp_connections = 250 # max_tcp_connections = 250
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -1567,24 +1604,14 @@
# [[inputs.udp_listener]] # [[inputs.udp_listener]]
# ## Address and port to host UDP listener on # ## Address and port to host UDP listener on
# service_address = ":8092" # service_address = ":8092"
# #
# ## Number of UDP messages allowed to queue up. Once filled, the # ## Number of UDP messages allowed to queue up. Once filled, the
# ## UDP listener will start dropping packets. # ## UDP listener will start dropping packets.
# allowed_pending_messages = 10000 # allowed_pending_messages = 10000
# #
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx" # data_format = "influx"
# # Collects conntrack stats from the configured directories and files.
# [[inputs.conntrack]]
# ## The following defaults would work with multiple versions of contrack. Note the nf_ and ip_
# ## filename prefixes are mutually exclusive across conntrack versions, as are the directory locations.
#
# ## Superset of filenames to look for within the conntrack dirs. Missing files will be ignored.
# files = ["ip_conntrack_count","ip_conntrack_max","nf_conntrack_count","nf_conntrack_max"]
#
# ## Directories to search within for the conntrack files above. Missing directrories will be ignored.
# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]