Update example config

This commit is contained in:
Daniel Nelson 2018-03-07 13:49:04 -08:00
parent d191ccdd3f
commit 30d595fe0c
1 changed files with 270 additions and 110 deletions

View File

@ -510,6 +510,9 @@
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
#
# ## Timeout for write operations. default: 5s
# # timeout = "5s"
#
# ## client ID, if not set a random ID is generated
# # client_id = ""
#
@ -594,12 +597,24 @@
# ## Address to listen on
# # listen = ":9273"
#
# ## Use TLS
# #tls_cert = "/etc/ssl/telegraf.crt"
# #tls_key = "/etc/ssl/telegraf.key"
#
# ## Use http basic authentication
# #basic_username = "Foo"
# #basic_password = "Bar"
#
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
# # expiration_interval = "60s"
#
# ## Collectors to enable, valid entries are "gocollector" and "process".
# ## If unset, both are enabled.
# collectors_exclude = ["gocollector", "process"]
#
# # Send string metrics as Prometheus labels.
# # Unless set to false all string metrics will be sent as labels.
# string_as_label = true
# # Configuration for the Riemann server to send metrics to
@ -699,7 +714,7 @@
# #use_regex = false
#
# ## point tags to use as the source name for Wavefront (if none found, host will be used)
# #source_override = ["hostname", "snmp_host", "node_host"]
# #source_override = ["hostname", "agent_host", "node_host"]
#
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
# #convert_bool = true
@ -718,6 +733,18 @@
# PROCESSOR PLUGINS #
###############################################################################
# # Apply metric modifications using override semantics.
# [[processors.override]]
# ## All modifications on inputs and aggregators can be overridden:
# # name_override = "new_name"
# # name_prefix = "new_name_prefix"
# # name_suffix = "new_name_suffix"
#
# ## Tags to be added (all values must be strings)
# # [processors.override.tags]
# # additional_tag = "tag_value"
# # Print all metrics that pass through this filter.
# [[processors.printer]]
@ -792,12 +819,11 @@
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
@ -806,7 +832,7 @@
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
# devices = ["sda", "sdb", "vd*"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#
@ -1301,9 +1327,6 @@
# # Read metrics from fail2ban.
# [[inputs.fail2ban]]
# ## Use sudo to run fail2ban-client
# ## Generally the fail2ban-client requires root access but running telegraf
# ## as root is not recommended. Learn more here:
# ## https://github.com/influxdata/telegraf/tree/master/plugins/inputs/fail2ban
# use_sudo = false
@ -1417,11 +1440,51 @@
# # devices = ["sda", "*"]
# # Read formatted metrics from one or more HTTP endpoints
# [[inputs.http]]
# ## One or more URLs from which to read formatted metrics
# urls = [
# "http://localhost/metrics"
# ]
#
# ## HTTP method
# # method = "GET"
#
# ## Optional HTTP headers
# # headers = {"X-Special-Header" = "Special-Value"}
#
# ## Optional HTTP Basic Auth Credentials
# # username = "username"
# # password = "pa$$word"
#
# ## Tag all metrics with the url
# # tag_url = true
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Amount of time allowed to complete the HTTP request
# # timeout = "5s"
#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# # data_format = "influx"
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
# ## Server address (default http://localhost)
# # address = "http://localhost"
#
# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
# # http_proxy = "http://localhost:8888"
#
# ## Set response_timeout (default 5 seconds)
# # response_timeout = "5s"
#
@ -1481,6 +1544,13 @@
# # "my_tag_2"
# # ]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## HTTP parameters (all values must be strings). For "GET" requests, data
# ## will be included in the query. For "POST" requests, data will be included
# ## in the request body as "x-www-form-urlencoded".
@ -1492,13 +1562,6 @@
# # [inputs.httpjson.headers]
# # X-Auth-Token = "my-xauth-token"
# # apiVersion = "v1"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
@ -1541,7 +1604,10 @@
# [[inputs.ipmi_sensor]]
# ## optionally specify the path to the ipmitool executable
# # path = "/usr/bin/ipmitool"
# #
# ##
# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
# # privilege = "ADMINISTRATOR"
# ##
# ## optionally specify one or more servers via a url matching
# ## [username[:password]@][protocol[(address)]]
# ## e.g.
@ -1559,6 +1625,17 @@
# timeout = "20s"
# # Gather packets and bytes counters from Linux ipsets
# [[inputs.ipset]]
# ## By default, we only show sets which have already matched at least 1 packet.
# ## set include_unmatched_sets = true to gather them all.
# include_unmatched_sets = false
# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
# use_sudo = false
# ## The default timeout of 1s for ipset execution can be overridden here:
# # timeout = "1s"
# # Gather packets and bytes throughput from iptables
# [[inputs.iptables]]
# ## iptables require root access on most systems.
@ -1792,7 +1869,7 @@
# ## Timeout, in ms.
# timeout = 100
# ## A list of Mesos masters.
# masters = ["localhost:5050"]
# masters = ["http://localhost:5050"]
# ## Master metrics groups to be collected, by default, all enabled.
# master_collections = [
# "resources",
@ -1816,6 +1893,13 @@
# # "tasks",
# # "messages",
# # ]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
@ -1913,6 +1997,15 @@
# ssl_key = "/etc/telegraf/key.pem"
# # Provides metrics about the state of a NATS server
# [[inputs.nats]]
# ## The address of the monitoring endpoint of the NATS server
# server = "http://localhost:8222"
#
# ## Maximum time to receive response
# # response_timeout = "5s"
# # Read metrics about network interface usage
# [[inputs.net]]
# ## By default, telegraf gathers stats from any up interface (excluding loopback)
@ -2017,6 +2110,10 @@
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
# bind_dn = ""
# bind_password = ""
#
# # Reverse metric names so they sort more naturally. Recommended.
# # This defaults to false if unset, but is set to true when generating a new config
# reverse_metric_names = true
# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
@ -2090,7 +2187,10 @@
# # ping_interval = 1.0
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
# # timeout = 1.0
# ## interface to send ping from (ping -I <INTERFACE>)
# ## total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
# # deadline = 10
# ## interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
# ## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
# # interface = ""
@ -2101,90 +2201,6 @@
# # queue_directory = "/var/spool/postfix"
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# ##
# ## All connection parameters are optional.
# ##
# ## Without the dbname parameter, the driver will default to a database
# ## with the same name as the user. This dbname is just for instantiating a
# ## connection with the server and doesn't restrict the databases we are trying
# ## to grab metrics for.
# ##
# address = "host=localhost user=postgres sslmode=disable"
#
# ## A list of databases to explicitly ignore. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'databases' option.
# # ignored_databases = ["postgres", "template0", "template1"]
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
# # databases = ["app_production", "testing"]
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql_extensible]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# #
# ## All connection parameters are optional. #
# ## Without the dbname parameter, the driver will default to a database
# ## with the same name as the user. This dbname is just for instantiating a
# ## connection with the server and doesn't restrict the databases we are trying
# ## to grab metrics for.
# #
# address = "host=localhost user=postgres sslmode=disable"
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# ## databases = ["app_production", "testing"]
# #
# # outputaddress = "db01"
# ## A custom name for the database that will be used as the "server" tag in the
# ## measurement output. If not specified, a default one generated from
# ## the connection address is used.
# #
# ## Define the toml config where the sql queries are stored
# ## New queries can be added, if the withdbname is set to true and there is no
# ## databases defined in the 'databases field', the sql query is ended by a
# ## 'is not null' in order to make the query succeed.
# ## Example :
# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
# ## withdbname was true. Be careful that if the withdbname is set to false you
# ## don't have to define the where clause (aka with the dbname) the tagvalue
# ## field is used to define custom tags (separated by commas)
# ## The optional "measurement" value can be used to override the default
# ## output measurement name ("postgresql").
# #
# ## Structure :
# ## [[inputs.postgresql_extensible.query]]
# ## sqlquery string
# ## version string
# ## withdbname boolean
# ## tagvalue string (comma separated)
# ## measurement string
# [[inputs.postgresql_extensible.query]]
# sqlquery="SELECT * FROM pg_stat_database"
# version=901
# withdbname=false
# tagvalue=""
# measurement=""
# [[inputs.postgresql_extensible.query]]
# sqlquery="SELECT * FROM pg_stat_bgwriter"
# version=901
# withdbname=false
# tagvalue="postgresql.stats"
# # Read metrics from one or many PowerDNS servers
# [[inputs.powerdns]]
# ## An array of sockets to gather stats about.
@ -2194,7 +2210,6 @@
# # Monitor process cpu and memory usage
# [[inputs.procstat]]
# ## Must specify one of: pid_file, exe, or pattern
# ## PID file to monitor process
# pid_file = "/var/run/nginx.pid"
# ## executable name (ie, pgrep <exe>)
@ -2211,12 +2226,20 @@
# ## override for process_name
# ## This is optional; default is sourced from /proc/<pid>/status
# # process_name = "bar"
#
# ## Field name prefix
# prefix = ""
# ## comment this out if you want raw cpu_time stats
# fielddrop = ["cpu_time_*"]
# ## This is optional; moves pid into a tag instead of a field
# pid_tag = false
# # prefix = ""
#
# ## Add PID as a tag instead of a field; useful to differentiate between
# ## processes whose tags are otherwise the same. Can create a large number
# ## of series, use judiciously.
# # pid_tag = false
#
# ## Method to use when finding process IDs. Can be one of 'pgrep', or
# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
# ## the native finder performs the search directly in a manor dependent on the
# ## platform. Default is 'pgrep'
# # pid_finder = "pgrep"
# # Read metrics from one or many prometheus clients
@ -2281,6 +2304,15 @@
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
# ## specified, metrics for all queues are gathered.
# # queues = ["telegraf"]
#
# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
# ## specified, metrics for all exchanges are gathered.
# # exchanges = ["telegraf"]
#
# ## Queues to include and exclude. Globs accepted.
# ## Note that an empty array for both will include all queues
# queue_name_include = []
# queue_name_exclude = []
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
@ -2571,6 +2603,28 @@
# # servers = [
# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
# # ]
#
# ## Optional parameter, setting this to 2 will use a new version
# ## of the collection queries that break compatibility with the original
# ## dashboards.
# query_version = 2
#
# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
# # azuredb = false
#
# ## If you would like to exclude some of the metrics queries, list them here
# ## Possible choices:
# ## - PerformanceCounters
# ## - WaitStatsCategorized
# ## - DatabaseIO
# ## - DatabaseProperties
# ## - CPUHistory
# ## - DatabaseSize
# ## - DatabaseStats
# ## - MemoryClerk
# ## - VolumeSpace
# ## - PerformanceMetrics
# # exclude_query = [ 'PerformanceCounters','WaitStatsCatagorized' ]
# # Sysstat metrics collector
@ -2694,6 +2748,10 @@
#
# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields
# fieldpass = ["total_*", "num_*","time_up", "mem_*"]
#
# ## IP of server to connect to, read from unbound conf default, optionally ':port'
# ## Will lookup IP if given a hostname
# server = "127.0.0.1:8953"
# # A plugin to collect stats from Varnish HTTP Cache
@ -2724,7 +2782,9 @@
# ## By default, telegraf gather all zfs stats
# ## If not specified, then default is:
# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
#
# ## For Linux, the default is:
# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
# ## By default, don't gather zpool stats
# # poolMetrics = false
@ -2998,6 +3058,105 @@
# data_format = "influx"
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# ##
# ## All connection parameters are optional.
# ##
# ## Without the dbname parameter, the driver will default to a database
# ## with the same name as the user. This dbname is just for instantiating a
# ## connection with the server and doesn't restrict the databases we are trying
# ## to grab metrics for.
# ##
# address = "host=localhost user=postgres sslmode=disable"
# ## A custom name for the database that will be used as the "server" tag in the
# ## measurement output. If not specified, a default one generated from
# ## the connection address is used.
# # outputaddress = "db01"
#
# ## connection configuration.
# ## maxlifetime - specify the maximum lifetime of a connection.
# ## default is forever (0s)
# max_lifetime = "0s"
#
# ## A list of databases to explicitly ignore. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'databases' option.
# # ignored_databases = ["postgres", "template0", "template1"]
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
# # databases = ["app_production", "testing"]
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql_extensible]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# #
# ## All connection parameters are optional. #
# ## Without the dbname parameter, the driver will default to a database
# ## with the same name as the user. This dbname is just for instantiating a
# ## connection with the server and doesn't restrict the databases we are trying
# ## to grab metrics for.
# #
# address = "host=localhost user=postgres sslmode=disable"
#
# ## connection configuration.
# ## maxlifetime - specify the maximum lifetime of a connection.
# ## default is forever (0s)
# max_lifetime = "0s"
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# ## databases = ["app_production", "testing"]
# #
# ## A custom name for the database that will be used as the "server" tag in the
# ## measurement output. If not specified, a default one generated from
# ## the connection address is used.
# # outputaddress = "db01"
# #
# ## Define the toml config where the sql queries are stored
# ## New queries can be added, if the withdbname is set to true and there is no
# ## databases defined in the 'databases field', the sql query is ended by a
# ## 'is not null' in order to make the query succeed.
# ## Example :
# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
# ## withdbname was true. Be careful that if the withdbname is set to false you
# ## don't have to define the where clause (aka with the dbname) the tagvalue
# ## field is used to define custom tags (separated by commas)
# ## The optional "measurement" value can be used to override the default
# ## output measurement name ("postgresql").
# #
# ## Structure :
# ## [[inputs.postgresql_extensible.query]]
# ## sqlquery string
# ## version string
# ## withdbname boolean
# ## tagvalue string (comma separated)
# ## measurement string
# [[inputs.postgresql_extensible.query]]
# sqlquery="SELECT * FROM pg_stat_database"
# version=901
# withdbname=false
# tagvalue=""
# measurement=""
# [[inputs.postgresql_extensible.query]]
# sqlquery="SELECT * FROM pg_stat_bgwriter"
# version=901
# withdbname=false
# tagvalue="postgresql.stats"
# # Generic socket listener capable of handling multiple socket types.
# [[inputs.socket_listener]]
# ## URL to listen on
@ -3048,6 +3207,7 @@
#
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
# max_tcp_connections = 250
#
# ## Enable TCP keep alive probes (default=false)
# tcp_keep_alive = false
#