Update sample config

This commit is contained in:
Daniel Nelson 2018-06-05 11:45:07 -07:00
parent 46edb6c96d
commit e00d441056
1 changed files with 306 additions and 46 deletions

View File

@ -158,33 +158,74 @@
# # timeout = "5s" # # timeout = "5s"
# # Configuration for the AMQP server to send metrics to # # Publishes metrics to an AMQP broker
# [[outputs.amqp]] # [[outputs.amqp]]
# ## AMQP url # ## Broker to publish to.
# url = "amqp://localhost:5672/influxdb" # ## deprecated in 1.7; use the brokers option
# ## AMQP exchange # # url = "amqp://localhost:5672/influxdb"
#
# ## Brokers to publish to. If multiple brokers are specified a random broker
# ## will be selected anytime a connection is established. This can be
# ## helpful for load balancing when not using a dedicated load balancer.
# brokers = ["amqp://localhost:5672/influxdb"]
#
# ## Maximum messages to send over a connection. Once this is reached, the
# ## connection is closed and a new connection is made. This can be helpful for
# ## load balancing when not using a dedicated load balancer.
# # max_messages = 0
#
# ## Exchange to declare and publish to.
# exchange = "telegraf" # exchange = "telegraf"
#
# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# # exchange_type = "topic"
#
# ## If true, exchange will be passively declared.
# # exchange_declare_passive = false
#
# ## If true, exchange will be created as a durable exchange.
# # exchange_durable = true
#
# ## Additional exchange arguments.
# # exchange_arguments = { }
# # exchange_arguments = {"hash_propery" = "timestamp"}
#
# ## Authentication credentials for the PLAIN auth_method.
# # username = ""
# # password = ""
#
# ## Auth method. PLAIN and EXTERNAL are supported # ## Auth method. PLAIN and EXTERNAL are supported
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
# ## described here: https://www.rabbitmq.com/plugins.html # ## described here: https://www.rabbitmq.com/plugins.html
# # auth_method = "PLAIN" # # auth_method = "PLAIN"
# ## Topic routing key
# # routing_key = ""
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, its value will be used as the routing key
# ## and override routing_key config even if defined
# routing_tag = "host"
# ## Delivery Mode controls if a published message is persistent
# ## Valid options are "transient" and "persistent". default: "transient"
# delivery_mode = "transient"
# #
# ## InfluxDB retention policy # ## Metric tag to use as a routing key.
# # retention_policy = "default" # ## ie, if this tag exists, its value will be used as the routing key
# ## InfluxDB database # # routing_tag = "host"
#
# ## Static routing key. Used when no routing_tag is set or as a fallback
# ## when the tag specified in routing tag is not found.
# # routing_key = ""
# # routing_key = "telegraf"
#
# ## Delivery Mode controls if a published message is persistent.
# ## One of "transient" or "persistent".
# # delivery_mode = "transient"
#
# ## InfluxDB database added as a message header.
# ## deprecated in 1.7; use the headers option
# # database = "telegraf" # # database = "telegraf"
# #
# ## Write timeout, formatted as a string. If not provided, will default # ## InfluxDB retention policy added as a message header
# ## to 5s. 0s means no timeout (not recommended). # ## deprecated in 1.7; use the headers option
# # retention_policy = "default"
#
# ## Static headers added to each published message.
# # headers = { }
# # headers = {"database" = "telegraf", "retention_policy" = "default"}
#
# ## Connection timeout. If not provided, will default to 5s. 0s means no
# ## timeout (not recommended).
# # timeout = "5s" # # timeout = "5s"
# #
# ## Optional TLS Config # ## Optional TLS Config
@ -194,11 +235,16 @@
# ## Use TLS but skip chain & host verification # ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
# #
# ## If true use batch serialization format instead of line based delimiting.
# ## Only applies to data formats which are not line based such as JSON.
# ## Recommended to set to true.
# # use_batch_format = false
#
# ## Data format to output. # ## Data format to output.
# ## Each data format has its own unique set of configuration options, read # ## Each data format has its own unique set of configuration options, read
# ## more about them here: # ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx" # # data_format = "influx"
# # Send metrics to Azure Application Insights # # Send metrics to Azure Application Insights
@ -347,6 +393,10 @@
# ## Graphite output template # ## Graphite output template
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# template = "host.tags.measurement.field" # template = "host.tags.measurement.field"
#
# ## Enable Graphite tags support
# # graphite_tag_support = false
#
# ## timeout in seconds for the write connection to graphite # ## timeout in seconds for the write connection to graphite
# timeout = 2 # timeout = 2
# #
@ -379,11 +429,6 @@
# # username = "username" # # username = "username"
# # password = "pa$$word" # # password = "pa$$word"
# #
# ## Additional HTTP headers
# # [outputs.http.headers]
# # # Should be set to "application/json" for json data_format
# # Content-Type = "text/plain; charset=utf-8"
#
# ## Optional TLS Config # ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem" # # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem" # # tls_cert = "/etc/telegraf/cert.pem"
@ -396,6 +441,11 @@
# ## more about them here: # ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# # data_format = "influx" # # data_format = "influx"
#
# ## Additional HTTP headers
# # [outputs.http.headers]
# # # Should be set manually to "application/json" for json data_format
# # Content-Type = "text/plain; charset=utf-8"
# # Configuration for sending metrics to an Instrumental project # # Configuration for sending metrics to an Instrumental project
@ -828,6 +878,34 @@
# PROCESSOR PLUGINS # # PROCESSOR PLUGINS #
############################################################################### ###############################################################################
# # Convert values to another metric value type
# [[processors.converter]]
# ## Tags to convert
# ##
# ## The table key determines the target type, and the array of key-values
# ## select the keys to convert. The array may contain globs.
# ## <target-type> = [<tag-key>...]
# [processors.converter.tags]
# string = []
# integer = []
# unsigned = []
# boolean = []
# float = []
#
# ## Fields to convert
# ##
# ## The table key determines the target type, and the array of key-values
# ## select the keys to convert. The array may contain globs.
# ## <target-type> = [<field-key>...]
# [processors.converter.fields]
# tag = []
# string = []
# integer = []
# unsigned = []
# boolean = []
# float = []
# # Apply metric modifications using override semantics. # # Apply metric modifications using override semantics.
# [[processors.override]] # [[processors.override]]
# ## All modifications on inputs and aggregators can be overridden: # ## All modifications on inputs and aggregators can be overridden:
@ -844,6 +922,36 @@
# [[processors.printer]] # [[processors.printer]]
# # Transforms tag and field values with regex pattern
# [[processors.regex]]
# ## Tag and field conversions defined in a separate sub-tables
# # [[processors.regex.tags]]
# # ## Tag to change
# # key = "resp_code"
# # ## Regular expression to match on a tag value
# # pattern = "^(\\d)\\d\\d$"
# # ## Pattern for constructing a new value (${1} represents first subgroup)
# # replacement = "${1}xx"
#
# # [[processors.regex.fields]]
# # key = "request"
# # ## All the power of the Go regular expressions available here
# # ## For example, named subgroups
# # pattern = "^/api(?P<method>/[\\w/]+)\\S*"
# # replacement = "${method}"
# # ## If result_key is present, a new field will be created
# # ## instead of changing existing field
# # result_key = "method"
#
# ## Multiple conversions may be applied for one field sequentially
# ## Let's extract one more value
# # [[processors.regex.fields]]
# # key = "request"
# # pattern = ".*category=(\\w+).*"
# # replacement = "${1}"
# # result_key = "search_category"
# # Print all metrics that pass through this filter. # # Print all metrics that pass through this filter.
# [[processors.topk]] # [[processors.topk]]
# ## How many seconds between aggregations # ## How many seconds between aggregations
@ -1030,6 +1138,17 @@
# ## This plugin will query all namespaces the aerospike # ## This plugin will query all namespaces the aerospike
# ## server has configured and get stats for them. # ## server has configured and get stats for them.
# servers = ["localhost:3000"] # servers = ["localhost:3000"]
#
# # username = "telegraf"
# # password = "pa$$word"
#
# ## Optional TLS Config
# # enable_tls = false
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## If false, skip chain & host verification
# # insecure_skip_verify = true
# # Read Apache status information (mod_status) # # Read Apache status information (mod_status)
@ -1054,6 +1173,32 @@
# # insecure_skip_verify = false # # insecure_skip_verify = false
# # Gather metrics from Apache Aurora schedulers
# [[inputs.aurora]]
# ## Schedulers are the base addresses of your Aurora Schedulers
# schedulers = ["http://127.0.0.1:8081"]
#
# ## Set of role types to collect metrics from.
# ##
# ## The scheduler roles are checked each interval by contacting the
# ## scheduler nodes; zookeeper is not contacted.
# # roles = ["leader", "follower"]
#
# ## Timeout is the max time for total network operations.
# # timeout = "5s"
#
# ## Username and password are sent using HTTP Basic Auth.
# # username = "username"
# # password = "pa$$word"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Read metrics of bcache from stats_total and dirty_data # # Read metrics of bcache from stats_total and dirty_data
# [[inputs.bcache]] # [[inputs.bcache]]
# ## Bcache sets path # ## Bcache sets path
@ -1078,6 +1223,49 @@
# # bond_interfaces = ["bond0"] # # bond_interfaces = ["bond0"]
# # Collect Kafka topics and consumers status from Burrow HTTP API.
# [[inputs.burrow]]
# ## Burrow API endpoints in format "schema://host:port".
# ## Default is "http://localhost:8000".
# servers = ["http://localhost:8000"]
#
# ## Override Burrow API prefix.
# ## Useful when Burrow is behind reverse-proxy.
# # api_prefix = "/v3/kafka"
#
# ## Maximum time to receive response.
# # response_timeout = "5s"
#
# ## Limit per-server concurrent connections.
# ## Useful in case of large number of topics or consumer groups.
# # concurrent_connections = 20
#
# ## Filter clusters, default is no filtering.
# ## Values can be specified as glob patterns.
# # clusters_include = []
# # clusters_exclude = []
#
# ## Filter consumer groups, default is no filtering.
# ## Values can be specified as glob patterns.
# # groups_include = []
# # groups_exclude = []
#
# ## Filter topics, default is no filtering.
# ## Values can be specified as glob patterns.
# # topics_include = []
# # topics_exclude = []
#
# ## Credentials for basic HTTP authentication.
# # username = ""
# # password = ""
#
# ## Optional SSL config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# # insecure_skip_verify = false
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]] # [[inputs.ceph]]
# ## This is the recommended interval to poll. Too frequent and you will lose # ## This is the recommended interval to poll. Too frequent and you will lose
@ -2599,6 +2787,9 @@
# ## Remove numbers from field names. # ## Remove numbers from field names.
# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. # ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# # remove_numbers = true # # remove_numbers = true
#
# ## Timeout is the maximum amount of time that the sensors command can run.
# # timeout = "5s"
# # Read metrics from storage devices supporting S.M.A.R.T. # # Read metrics from storage devices supporting S.M.A.R.T.
@ -2950,23 +3141,27 @@
# pools = ["redis_pool", "mc_pool"] # pools = ["redis_pool", "mc_pool"]
# # A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver # # A plugin to collect stats from the Unbound DNS resolver
# [[inputs.unbound]] # [[inputs.unbound]]
# ## Address of server to connect to, read from unbound conf default, optionally ':port'
# ## Will lookup IP if given a hostname
# server = "127.0.0.1:8953"
#
# ## If running as a restricted user you can prepend sudo for additional access: # ## If running as a restricted user you can prepend sudo for additional access:
# # use_sudo = false # # use_sudo = false
# #
# ## The default location of the unbound-control binary can be overridden with: # ## The default location of the unbound-control binary can be overridden with:
# binary = "/usr/sbin/unbound-control" # # binary = "/usr/sbin/unbound-control"
# #
# ## The default timeout of 1s can be overriden with: # ## The default timeout of 1s can be overriden with:
# timeout = "1s" # # timeout = "1s"
# #
# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields # ## When set to true, thread metrics are tagged with the thread id.
# fieldpass = ["total_*", "num_*","time_up", "mem_*"] # ##
# # ## The default is false for backwards compatibility, and will be change to
# ## IP of server to connect to, read from unbound conf default, optionally ':port' # ## true in a future version. It is recommended to set to true on new
# ## Will lookup IP if given a hostname # ## deployments.
# server = "127.0.0.1:8953" # thread_as_tag = false
# # A plugin to collect stats from Varnish HTTP Cache # # A plugin to collect stats from Varnish HTTP Cache
@ -2985,7 +3180,7 @@
# #
# ## Optional name for the varnish instance (or working directory) to query # ## Optional name for the varnish instance (or working directory) to query
# ## Usually appened after -n in varnish cli # ## Usually appened after -n in varnish cli
# #name = instanceName # # instance_name = instanceName
# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
@ -3032,19 +3227,43 @@
# # AMQP consumer plugin # # AMQP consumer plugin
# [[inputs.amqp_consumer]] # [[inputs.amqp_consumer]]
# ## AMQP url # ## Broker to consume from.
# url = "amqp://localhost:5672/influxdb" # ## deprecated in 1.7; use the brokers option
# ## AMQP exchange # # url = "amqp://localhost:5672/influxdb"
#
# ## Brokers to consume from. If multiple brokers are specified a random broker
# ## will be selected anytime a connection is established. This can be
# ## helpful for load balancing when not using a dedicated load balancer.
# brokers = ["amqp://localhost:5672/influxdb"]
#
# ## Authentication credentials for the PLAIN auth_method.
# # username = ""
# # password = ""
#
# ## Exchange to declare and consume from.
# exchange = "telegraf" # exchange = "telegraf"
# ## Exchange passive mode #
# exchange_passive = false # ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# # exchange_type = "topic"
#
# ## If true, exchange will be passively declared.
# # exchange_passive = false
#
# ## Exchange durability can be either "transient" or "durable".
# # exchange_durability = "durable"
#
# ## Additional exchange arguments.
# # exchange_arguments = { }
# # exchange_arguments = {"hash_propery" = "timestamp"}
#
# ## AMQP queue name # ## AMQP queue name
# queue = "telegraf" # queue = "telegraf"
#
# ## Binding Key # ## Binding Key
# binding_key = "#" # binding_key = "#"
# #
# ## Maximum number of messages server should give to the worker. # ## Maximum number of messages server should give to the worker.
# prefetch_count = 50 # # prefetch_count = 50
# #
# ## Auth method. PLAIN and EXTERNAL are supported # ## Auth method. PLAIN and EXTERNAL are supported
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
@ -3565,6 +3784,46 @@
# percentile_limit = 1000 # percentile_limit = 1000
# # Accepts syslog messages per RFC5425
# [[inputs.syslog]]
# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
# ## Protocol, address and port to host the syslog receiver.
# ## If no host is specified, then localhost is used.
# ## If no port is specified, 6514 is used (RFC5425#section-4.1).
# server = "tcp://:6514"
#
# ## TLS Config
# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
#
# ## Period between keep alive probes.
# ## 0 disables keep alive probes.
# ## Defaults to the OS configuration.
# ## Only applies to stream sockets (e.g. TCP).
# # keep_alive_period = "5m"
#
# ## Maximum number of concurrent connections (default = 0).
# ## 0 means unlimited.
# ## Only applies to stream sockets (e.g. TCP).
# # max_connections = 1024
#
# ## Read timeout (default = 500ms).
# ## 0 means unlimited.
# # read_timeout = 500ms
#
# ## Whether to parse in best effort mode or not (default = false).
# ## By default best effort parsing is off.
# # best_effort = false
#
# ## Character to prepend to SD-PARAMs (default = "_").
# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
# ## For each combination a field is created.
# ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
# # sdparam_separator = "_"
# # Stream a log file, like the tail -f command # # Stream a log file, like the tail -f command
# [[inputs.tail]] # [[inputs.tail]]
# ## files to tail. # ## files to tail.
@ -3635,3 +3894,4 @@
# [[inputs.zipkin]] # [[inputs.zipkin]]
# # path = "/api/v1/spans" # URL path for span data # # path = "/api/v1/spans" # URL path for span data
# # port = 9411 # Port on which Telegraf listens # # port = 9411 # Port on which Telegraf listens