Update etc/telegraf.conf

closes #1789
This commit is contained in:
Cameron Sparr 2016-09-21 11:53:06 +01:00
parent 6b25a73629
commit f12368698b
1 changed files with 123 additions and 45 deletions

View File

@ -357,6 +357,30 @@
# data_format = "influx"
# # Send telegraf measurements to NATS
# [[outputs.nats]]
# ## URLs of NATS servers
# servers = ["nats://localhost:4222"]
# ## Optional credentials
# # username = ""
# # password = ""
# ## NATS subject for producer messages
# subject = "telegraf"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Send telegraf measurements to NSQD
# [[outputs.nsq]]
# ## Location of nsqd instance listening on TCP
@ -376,13 +400,18 @@
# ## prefix for metrics keys
# prefix = "my.specific.prefix."
#
# ## Telnet Mode ##
# ## DNS name of the OpenTSDB server in telnet mode
# ## DNS name of the OpenTSDB server
# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
# ## telnet API. "http://opentsdb.example.com" will use the Http API.
# host = "opentsdb.example.com"
#
# ## Port of the OpenTSDB server in telnet mode
# ## Port of the OpenTSDB server
# port = 4242
#
# ## Number of data points to send to OpenTSDB in Http requests.
# ## Not used with telnet API.
# httpBatchSize = 50
#
# ## Debug true - Prints OpenTSDB communication
# debug = false
@ -414,8 +443,8 @@
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## Comment this line if you want the raw CPU time metrics
fielddrop = ["time_*"]
## If true, collect raw CPU time metrics.
collect_cpu_time = false
# Read metrics about disk usage by mount point
@ -530,14 +559,7 @@
# ## suffix used to identify socket files
# socket_suffix = "asok"
#
# ## Ceph user to authenticate as, ceph will search for the corresponding keyring
# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
# ## client section of ceph.conf for example:
# ##
# ## [client.telegraf]
# ## keyring = /etc/ceph/client.telegraf.keyring
# ##
# ## Consult the ceph documentation for more detail on keyring generation.
# ## Ceph user to authenticate as
# ceph_user = "client.admin"
#
# ## Ceph configuration to use to locate the cluster
@ -546,8 +568,7 @@
# ## Whether to gather statistics via the admin socket
# gather_admin_socket_stats = true
#
# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
# ## to be specified
# ## Whether to gather statistics via ceph commands
# gather_cluster_stats = true
@ -601,6 +622,11 @@
# ## Metric Statistic Namespace (required)
# namespace = 'AWS/ELB'
#
# ## Maximum requests per second. Note that the global default AWS rate limit is
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
# ## maximum of 10. Optional - default value is 10.
# ratelimit = 10
#
# ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h
@ -718,6 +744,9 @@
# ## specify a list of one or more Elasticsearch servers
# servers = ["http://localhost:9200"]
#
# ## Timeout for HTTP requests to the elastic search server(s)
# http_timeout = "5s"
#
# ## set local to false when you want to read the indices stats from all nodes
# ## within the cluster
# local = true
@ -860,6 +889,8 @@
# "http://localhost:9999/stats/",
# "http://localhost:9998/stats/",
# ]
# ## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
#
# ## HTTP method to use: GET or POST (case-sensitive)
# method = "GET"
@ -899,6 +930,9 @@
# urls = [
# "http://localhost:8086/debug/vars"
# ]
#
# ## http request & header timeout
# timeout = "5s"
# # Read metrics from one or many bare metal servers
@ -910,22 +944,11 @@
# ##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
# # Gather packets and bytes throughput from iptables
# [[inputs.iptables]]
# ## iptables require root access on most systems.
# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
# ## Users must configure sudo to allow telegraf user to run iptables.
# ## iptables can be restricted to only use list command "iptables -nvL"
# use_sudo = false
# ## define the table to monitor:
# table = "filter"
# ## Defines the chains to monitor:
# chains = [ "INPUT" ]
# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
# ## This is the context root used to compose the jolokia url
# ## NOTE that your jolokia security policy must allow for POST requests.
# context = "/jolokia"
#
# ## This specifies the mode used
@ -1186,23 +1209,6 @@
# command = "passenger-status -v --show=xml"
# # Read metrics from one or many pgbouncer servers
# [[inputs.pgbouncer]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost:port[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest port=6432 password=... sslmode=... dbname=pgbouncer
# ##
# ## All connection parameters are optional, except for dbname,
# ## you need to set it always as pgbouncer.
# address = "host=localhost user=postgres port=6432 sslmode=disable dbname=pgbouncer"
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# # databases = ["app_production", "testing"]
# # Read metrics of phpfpm, via HTTP status page or socket
# [[inputs.phpfpm]]
# ## An array of addresses to gather stats about. Specify an ip or hostname
@ -1261,8 +1267,12 @@
# ##
# address = "host=localhost user=postgres sslmode=disable"
#
# ## A list of databases to explicitly ignore. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'databases' option.
# # ignored_databases = ["postgres", "template0", "template1"]
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# ## databases are gathered. Do NOT use with the 'ignore_databases' option.
# # databases = ["app_production", "testing"]
@ -1429,6 +1439,61 @@
# servers = ["http://localhost:8098"]
# # Retrieves SNMP values from remote agents
# [[inputs.snmp]]
# agents = [ "127.0.0.1:161" ]
# timeout = "5s"
# version = 2
#
# # SNMPv1 & SNMPv2 parameters
# community = "public"
#
# # SNMPv2 & SNMPv3 parameters
# max_repetitions = 50
#
# # SNMPv3 parameters
# #sec_name = "myuser"
# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
# #auth_password = "password123"
# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
# #context_name = ""
# #priv_protocol = "" # Values: "DES", "AES", ""
# #priv_password = ""
#
# # measurement name
# name = "system"
# [[inputs.snmp.field]]
# name = "hostname"
# oid = ".1.0.0.1.1"
# [[inputs.snmp.field]]
# name = "uptime"
# oid = ".1.0.0.1.2"
# [[inputs.snmp.field]]
# name = "load"
# oid = ".1.0.0.1.3"
# [[inputs.snmp.field]]
# oid = "HOST-RESOURCES-MIB::hrMemorySize"
#
# [[inputs.snmp.table]]
# # measurement name
# name = "remote_servers"
# inherit_tags = [ "hostname" ]
# [[inputs.snmp.table.field]]
# name = "server"
# oid = ".1.0.0.0.1.0"
# is_tag = true
# [[inputs.snmp.table.field]]
# name = "connections"
# oid = ".1.0.0.0.1.1"
# [[inputs.snmp.table.field]]
# name = "latency"
# oid = ".1.0.0.0.1.2"
#
# [[inputs.snmp.table]]
# # auto populate table's fields using the MIB
# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
# [[inputs.snmp_legacy]]
# ## Use 'oids.txt' file to translate oids to names
@ -1601,6 +1666,16 @@
# SERVICE INPUT PLUGINS #
###############################################################################
# # Influx HTTP write listener
# [[inputs.http_listener]]
# ## Address and port to host HTTP listener on
# service_address = ":8186"
#
# ## timeouts
# read_timeout = "10s"
# write_timeout = "10s"
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]]
# ## topic(s) to consume
@ -1823,6 +1898,9 @@
# ## Address and port to host Webhook listener on
# service_address = ":1619"
#
# [inputs.webhooks.filestack]
# path = "/filestack"
#
# [inputs.webhooks.github]
# path = "/github"
#