Update README & etc/telegraf.conf

This commit is contained in:
Cameron Sparr 2016-05-31 11:02:10 +01:00
parent eeeab5192b
commit 069764f05e
2 changed files with 152 additions and 124 deletions

View File

@ -233,6 +233,7 @@ want to add support for another service or third-party API.
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)

View File

@ -106,10 +106,10 @@
# [[outputs.amon]]
# ## Amon Server Key
# server_key = "my-server-key" # required.
#
#
# ## Amon Instance URL
# amon_instance = "https://youramoninstance" # required
#
#
# ## Connection timeout.
# # timeout = "5s"
@ -125,21 +125,21 @@
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key
# routing_tag = "host"
#
#
# ## InfluxDB retention policy
# # retention_policy = "default"
# ## InfluxDB database
# # database = "telegraf"
# ## InfluxDB precision
# # precision = "s"
#
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -151,16 +151,22 @@
# [[outputs.cloudwatch]]
# ## Amazon REGION
# region = 'us-east-1'
#
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
# ## 1) explicit credentials from 'access_key' and 'secret_key'
# ## 2) environment variables
# ## 3) shared credentials file
# ## 4) EC2 Instance Profile
# ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared profile from 'profile'
# ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
# #access_key = ""
# #secret_key = ""
#
# #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Namespace for the CloudWatch MetricDatums
# namespace = 'InfluxData/Telegraf'
@ -169,7 +175,7 @@
# [[outputs.datadog]]
# ## Datadog API key
# apikey = "my-secret-key" # required.
#
#
# ## Connection timeout.
# # timeout = "5s"
@ -178,7 +184,7 @@
# [[outputs.file]]
# ## Files to write to, "stdout" is a specially handled file.
# files = ["stdout", "/tmp/metrics.out"]
#
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -199,6 +205,12 @@
# timeout = 2
# # Send telegraf metrics to graylog(s)
# [[outputs.graylog]]
# ## Udp endpoint for your graylog instance.
# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
# # Configuration for sending metrics to an Instrumental project
# [[outputs.instrumental]]
# ## Project API Token (required)
@ -223,14 +235,14 @@
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key
# routing_tag = "host"
#
#
# ## CompressionCodec represents the various compression codecs recognized by
# ## Kafka in messages.
# ## 0 : No compression
# ## 1 : Gzip compression
# ## 2 : Snappy compression
# compression_codec = 0
#
#
# ## RequiredAcks is used in Produce Requests to tell the broker how many
# ## replica acknowledgements it must see before responding
# ## 0 : the producer never waits for an acknowledgement from the broker.
@ -246,17 +258,17 @@
# ## guarantee that no messages will be lost as long as at least one in
# ## sync replica remains.
# required_acks = -1
#
#
# ## The total number of times to retry sending a message
# max_retry = 3
#
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -268,16 +280,22 @@
# [[outputs.kinesis]]
# ## Amazon REGION of kinesis endpoint.
# region = "ap-southeast-2"
#
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
# ## 1) explicit credentials from 'access_key' and 'secret_key'
# ## 2) environment variables
# ## 3) shared credentials file
# ## 4) EC2 Instance Profile
# ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared profile from 'profile'
# ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
# #access_key = ""
# #secret_key = ""
#
# #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Kinesis StreamName must exist prior to starting telegraf.
# streamname = "StreamName"
# ## PartitionKey as used for sharding data.
@ -312,23 +330,23 @@
# # Configuration for MQTT server to send metrics to
# [[outputs.mqtt]]
# servers = ["localhost:1883"] # required.
#
#
# ## MQTT outputs send metrics to this topic format
# ## "<topic_prefix>/<hostname>/<pluginname>/"
# ## ex: prefix/web01.example.com/mem
# topic_prefix = "telegraf"
#
#
# ## username and password to connect MQTT server.
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
#
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -342,7 +360,7 @@
# server = "localhost:4150"
# ## NSQ topic for producer messages
# topic = "telegraf"
#
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -354,14 +372,14 @@
# [[outputs.opentsdb]]
# ## prefix for metrics keys
# prefix = "my.specific.prefix."
#
#
# ## Telnet Mode ##
# ## DNS name of the OpenTSDB server in telnet mode
# host = "opentsdb.example.com"
#
#
# ## Port of the OpenTSDB server in telnet mode
# port = 4242
#
#
# ## Debug true - Prints OpenTSDB communication
# debug = false
@ -422,9 +440,6 @@
[[inputs.kernel]]
# no configuration
# # Get kernel statistics from /proc/vmstat
# [[inputs.kernel_vmstat]]
# # no configuration
# Read metrics about memory usage
[[inputs.mem]]
@ -466,7 +481,7 @@
# ## Bcache sets path
# ## If not specified, then default is:
# bcachePath = "/sys/fs/bcache"
#
#
# ## By default, telegraf gather stats for all bcache devices
# ## Setting devices will restrict the stats to the specified
# ## bcache devices.
@ -494,17 +509,17 @@
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]]
# ## All configuration values are optional, defaults are shown below
#
#
# ## location of ceph binary
# ceph_binary = "/usr/bin/ceph"
#
#
# ## directory in which to look for socket files
# socket_dir = "/var/run/ceph"
#
#
# ## prefix of MON and OSD socket files, used to determine socket type
# mon_prefix = "ceph-mon"
# osd_prefix = "ceph-osd"
#
#
# ## suffix used to identify socket files
# socket_suffix = "asok"
@ -513,29 +528,39 @@
# [[inputs.cloudwatch]]
# ## Amazon Region
# region = 'us-east-1'
#
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
# ## 1) explicit credentials from 'access_key' and 'secret_key'
# ## 2) environment variables
# ## 3) shared credentials file
# ## 4) EC2 Instance Profile
# ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared profile from 'profile'
# ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
# #access_key = ""
# #secret_key = ""
#
# #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
# period = '1m'
#
#
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
# delay = '1m'
#
#
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
# ## gaps or overlap in pulled data
# interval = '1m'
#
#
# ## Configure the TTL for the internal cache of metrics.
# ## Defaults to 1 hr if not specified
# #cache_ttl = '10m'
#
# ## Metric Statistic Namespace (required)
# namespace = 'AWS/ELB'
#
#
# ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h
@ -582,17 +607,17 @@
# [[inputs.dns_query]]
# ## servers to query
# servers = ["8.8.8.8"] # required
#
#
# ## Domains or subdomains to query. "."(root) is default
# domains = ["."] # optional
#
#
# ## Query record type. Default is "A"
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A" # optional
#
#
# ## Dns server port. 53 is default
# port = 53 # optional
#
#
# ## Query timeout in seconds. Default is 2 seconds
# timeout = 2 # optional
@ -628,11 +653,11 @@
# [[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
# servers = ["http://localhost:9200"]
#
#
# ## set local to false when you want to read the indices stats from all nodes
# ## within the cluster
# local = true
#
#
# ## set cluster_health to true when you want to also obtain cluster level stats
# cluster_health = false
@ -640,14 +665,18 @@
# # Read metrics from one or more commands that can output to stdout
# [[inputs.exec]]
# ## Commands array
# commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
#
# commands = [
# "/tmp/test.sh",
# "/usr/bin/mycollector --foo=bar",
# "/tmp/collect_*.sh"
# ]
#
# ## Timeout for each command to complete.
# timeout = "5s"
#
#
# ## measurement name suffix (for separating different commands)
# name_suffix = "_mycollector"
#
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -675,7 +704,7 @@
# [[inputs.haproxy]]
# ## An array of address to gather stats about. Specify an ip on hostname
# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
#
#
# ## If no servers are specified, then default to 127.0.0.1:1936
# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# ## Or you can also use local socket
@ -699,41 +728,48 @@
# # body = '''
# # {'fake':'data'}
# # '''
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read flattened metrics from one or more JSON HTTP endpoints
# [[inputs.httpjson]]
# ## NOTE This plugin only reads numerical measurements, strings and booleans
# ## will be ignored.
#
#
# ## a name for the service being polled
# name = "webserver_stats"
#
#
# ## URL of each server in the service's cluster
# servers = [
# "http://localhost:9999/stats/",
# "http://localhost:9998/stats/",
# ]
#
#
# ## HTTP method to use: GET or POST (case-sensitive)
# method = "GET"
#
#
# ## List of tag names to extract from top-level of JSON server response
# # tag_keys = [
# # "my_tag_1",
# # "my_tag_2"
# # ]
#
#
# ## HTTP parameters (all values must be strings)
# [inputs.httpjson.parameters]
# event_type = "cpu_spike"
# threshold = "0.75"
#
#
# ## HTTP Header parameters (all values must be strings)
# # [inputs.httpjson.headers]
# # X-Auth-Token = "my-xauth-token"
# # apiVersion = "v1"
#
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
@ -747,7 +783,7 @@
# ## Works with InfluxDB debug endpoints out of the box,
# ## but other services can use this format too.
# ## See the influxdb plugin's README for more details.
#
#
# ## Multiple URLs from which to read InfluxDB-formatted JSON
# urls = [
# "http://localhost:8086/debug/vars"
@ -768,7 +804,7 @@
# [[inputs.jolokia]]
# ## This is the context root used to compose the jolokia url
# context = "/jolokia"
#
#
# ## This specifies the mode used
# # mode = "proxy"
# #
@ -778,8 +814,8 @@
# # [inputs.jolokia.proxy]
# # host = "127.0.0.1"
# # port = "8080"
#
#
#
#
# ## List of servers exposing jolokia read service
# [[inputs.jolokia.servers]]
# name = "as-server-01"
@ -787,7 +823,7 @@
# port = "8080"
# # username = "myuser"
# # password = "mypassword"
#
#
# ## List of metrics collected on above servers
# ## Each metric consists in a name, a jmx path and either
# ## a pass or drop slice attribute.
@ -796,13 +832,13 @@
# name = "heap_memory_usage"
# mbean = "java.lang:type=Memory"
# attribute = "HeapMemoryUsage"
#
#
# ## This collect thread counts metrics.
# [[inputs.jolokia.metrics]]
# name = "thread_count"
# mbean = "java.lang:type=Threading"
# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
#
#
# ## This collect number of class loaded/unloaded counts metrics.
# [[inputs.jolokia.metrics]]
# name = "class_count"
@ -955,7 +991,7 @@
# address = "github.com:80"
# ## Set timeout
# timeout = "1s"
#
#
# ## Optional string sent to the server
# # send = "ssh"
# ## Optional expected string in answer
@ -1069,7 +1105,7 @@
# ## to grab metrics for.
# ##
# address = "host=localhost user=postgres sslmode=disable"
#
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# # databases = ["app_production", "testing"]
@ -1151,7 +1187,7 @@
# # pattern = "nginx"
# ## user as argument for pgrep (ie, pgrep -u <user>)
# # user = "nginx"
#
#
# ## override for process_name
# ## This is optional; default is sourced from /proc/<pid>/status
# # process_name = "bar"
@ -1165,7 +1201,7 @@
# [[inputs.prometheus]]
# ## An array of urls to scrape metrics from.
# urls = ["http://localhost:9100/metrics"]
#
#
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# ## Use bearer token for authorization
@ -1184,7 +1220,7 @@
# # name = "rmq-server-1" # optional tag
# # username = "guest"
# # password = "guest"
#
#
# ## A list of nodes to pull metrics about. If not specified, metrics for
# ## all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"]
@ -1248,7 +1284,7 @@
# collect = ["mybulk", "sysservices", "sysdescr"]
# # Simple list of OIDs to get, in addition to "collect"
# get_oids = []
#
#
# [[inputs.snmp.host]]
# address = "192.168.2.3:161"
# community = "public"
@ -1260,31 +1296,31 @@
# "ifNumber",
# ".1.3.6.1.2.1.1.3.0",
# ]
#
#
# [[inputs.snmp.get]]
# name = "ifnumber"
# oid = "ifNumber"
#
#
# [[inputs.snmp.get]]
# name = "interface_speed"
# oid = "ifSpeed"
# instance = "0"
#
#
# [[inputs.snmp.get]]
# name = "sysuptime"
# oid = ".1.3.6.1.2.1.1.3.0"
# unit = "second"
#
#
# [[inputs.snmp.bulk]]
# name = "mybulk"
# max_repetition = 127
# oid = ".1.3.6.1.2.1.1"
#
#
# [[inputs.snmp.bulk]]
# name = "ifoutoctets"
# max_repetition = 127
# oid = "ifOutOctets"
#
#
# [[inputs.snmp.host]]
# address = "192.168.2.13:161"
# #address = "127.0.0.1:161"
@ -1297,19 +1333,19 @@
# [[inputs.snmp.host.table]]
# name = "iftable3"
# include_instances = ["enp5s0", "eth1"]
#
#
# # SNMP TABLEs
# # table without mapping neither subtables
# [[inputs.snmp.table]]
# name = "iftable1"
# oid = ".1.3.6.1.2.1.31.1.1.1"
#
#
# # table without mapping but with subtables
# [[inputs.snmp.table]]
# name = "iftable2"
# oid = ".1.3.6.1.2.1.31.1.1.1"
# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
#
#
# # table with mapping but without subtables
# [[inputs.snmp.table]]
# name = "iftable3"
@ -1317,7 +1353,7 @@
# # if empty. get all instances
# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# # if empty, get all subtables
#
#
# # table with both mapping and subtables
# [[inputs.snmp.table]]
# name = "iftable4"
@ -1360,10 +1396,11 @@
# [[inputs.varnish]]
# ## The default location of the varnishstat binary can be overridden with:
# binary = "/usr/bin/varnishstat"
#
#
# ## By default, telegraf gather stats for 3 metric points.
# ## Setting stats will override the defaults shown below.
# ## stats may also be set to ["all"], which will collect all stats
# ## Glob matching can be used, ie, stats = ["MAIN.*"]
# ## stats may also be set to ["*"], which will collect all stats
# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
@ -1372,11 +1409,11 @@
# ## ZFS kstat path
# ## If not specified, then default is:
# kstatPath = "/proc/spl/kstat/zfs"
#
#
# ## By default, telegraf gather all zfs stats
# ## If not specified, then default is:
# kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
#
#
# ## By default, don't gather zpool stats
# poolMetrics = false
@ -1385,7 +1422,7 @@
# [[inputs.zookeeper]]
# ## An array of address to gather stats about. Specify an ip or hostname
# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
#
#
# ## If no servers are specified, then localhost is used as the host.
# ## If no port is specified, 2181 is used
# servers = [":2181"]
@ -1414,7 +1451,7 @@
# consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
#
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -1427,32 +1464,32 @@
# servers = ["localhost:1883"]
# ## MQTT QoS, must be 0, 1, or 2
# qos = 0
#
#
# ## Topics to subscribe to
# topics = [
# "telegraf/host01/cpu",
# "telegraf/+/mem",
# "sensors/#",
# ]
#
#
# # if true, messages that can't be delivered while the subscriber is offline
# # will be delivered when it comes back (such as on service restart).
# # NOTE: if true, client_id MUST be set
# persistent_session = false
# # If empty, a random client ID will be generated.
# client_id = ""
#
#
# ## username and password to connect MQTT server.
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
#
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -1470,7 +1507,7 @@
# subjects = ["telegraf"]
# ## name a queue group
# queue_group = "telegraf_consumers"
#
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -1498,24 +1535,24 @@
# delete_timings = true
# ## Percentiles to calculate for timing & histogram stats
# percentiles = [90]
#
#
# ## separator to use between elements of a statsd metric
# metric_separator = "_"
#
#
# ## Parses tags in the datadog statsd format
# ## http://docs.datadoghq.com/guides/dogstatsd/
# parse_data_dog_tags = false
#
#
# ## Statsd data translation templates, more info can be read here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
# # templates = [
# # "cpu.* measurement*"
# # ]
#
#
# ## Number of UDP messages allowed to queue up, once filled,
# ## the statsd server will start dropping packets
# allowed_pending_messages = 10000
#
#
# ## Number of timing/histogram values to track per-measurement in the
# ## calculation of percentiles. Raising this limit increases the accuracy
# ## of percentiles but also increases the memory usage and cpu time.
@ -1536,7 +1573,7 @@
# files = ["/var/mymetrics.out"]
# ## Read file from beginning.
# from_beginning = false
#
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -1548,14 +1585,14 @@
# [[inputs.tcp_listener]]
# ## Address and port to host TCP listener on
# service_address = ":8094"
#
#
# ## Number of TCP messages allowed to queue up. Once filled, the
# ## TCP listener will start dropping packets.
# allowed_pending_messages = 10000
#
#
# ## Maximum number of concurrent TCP connections to allow
# max_tcp_connections = 250
#
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@ -1567,24 +1604,14 @@
# [[inputs.udp_listener]]
# ## Address and port to host UDP listener on
# service_address = ":8092"
#
#
# ## Number of UDP messages allowed to queue up. Once filled, the
# ## UDP listener will start dropping packets.
# allowed_pending_messages = 10000
#
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Collects conntrack stats from the configured directories and files.
# [[inputs.conntrack]]
# ## The following defaults would work with multiple versions of contrack. Note the nf_ and ip_
# ## filename prefixes are mutually exclusive across conntrack versions, as are the directory locations.
#
# ## Superset of filenames to look for within the conntrack dirs. Missing files will be ignored.
# files = ["ip_conntrack_count","ip_conntrack_max","nf_conntrack_count","nf_conntrack_max"]
#
# ## Directories to search within for the conntrack files above. Missing directrories will be ignored.
# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]