Update sample config

This commit is contained in:
Daniel Nelson 2018-05-11 18:18:43 -07:00
parent 18db718d7f
commit 558caf57de
No known key found for this signature in database
GPG Key ID: CAAD59C9444F6155
1 changed files with 110 additions and 8 deletions

View File

@ -399,7 +399,7 @@
# ## 0 : No compression # ## 0 : No compression
# ## 1 : Gzip compression # ## 1 : Gzip compression
# ## 2 : Snappy compression # ## 2 : Snappy compression
# compression_codec = 0 # # compression_codec = 0
# #
# ## RequiredAcks is used in Produce Requests to tell the broker how many # ## RequiredAcks is used in Produce Requests to tell the broker how many
# ## replica acknowledgements it must see before responding # ## replica acknowledgements it must see before responding
@ -415,10 +415,11 @@
# ## received the data. This option provides the best durability, we # ## received the data. This option provides the best durability, we
# ## guarantee that no messages will be lost as long as at least one in # ## guarantee that no messages will be lost as long as at least one in
# ## sync replica remains. # ## sync replica remains.
# required_acks = -1 # # required_acks = -1
# #
# ## The total number of times to retry sending a message # ## The maximum number of times to retry sending a metric before failing
# max_retry = 3 # ## until the next flush.
# # max_retry = 3
# #
# ## Optional TLS Config # ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem" # # tls_ca = "/etc/telegraf/ca.pem"
@ -435,7 +436,7 @@
# ## Each data format has its own unique set of configuration options, read # ## Each data format has its own unique set of configuration options, read
# ## more about them here: # ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx" # # data_format = "influx"
# # Configuration for the AWS Kinesis output. # # Configuration for the AWS Kinesis output.
@ -776,6 +777,56 @@
# [[processors.printer]] # [[processors.printer]]
# # Print all metrics that pass through this filter.
# [[processors.topk]]
# ## How many seconds between aggregations
# # period = 10
#
# ## How many top metrics to return
# # k = 10
#
# ## Over which tags should the aggregation be done. Globs can be specified, in
# ## which case any tag matching the glob will aggregated over. If set to an
# ## empty list is no aggregation over tags is done
# # group_by = ['*']
#
# ## Over which fields are the top k are calculated
# # fields = ["value"]
#
# ## What aggregation to use. Options: sum, mean, min, max
# # aggregation = "mean"
#
# ## Instead of the top k largest metrics, return the bottom k lowest metrics
# # bottomk = false
#
# ## The plugin assigns each metric a GroupBy tag generated from its name and
# ## tags. If this setting is different than "" the plugin will add a
# ## tag (which name will be the value of this setting) to each metric with
# ## the value of the calculated GroupBy tag. Useful for debugging
# # add_groupby_tag = ""
#
# ## These settings provide a way to know the position of each metric in
# ## the top k. The 'add_rank_field' setting allows to specify for which
# ## fields the position is required. If the list is non empty, then a field
# ## will be added to each and every metric for each string present in this
# ## setting. This field will contain the ranking of the group that
# ## the metric belonged to when aggregated over that field.
# ## The name of the field will be set to the name of the aggregation field,
# ## suffixed with the string '_topk_rank'
# # add_rank_fields = []
#
# ## These settings provide a way to know what values the plugin is generating
# ## when aggregating metrics. The 'add_agregate_field' setting allows to
# ## specify for which fields the final aggregation value is required. If the
# ## list is non empty, then a field will be added to each every metric for
# ## each field present in this setting. This field will contain
# ## the computed aggregation for the group that the metric belonged to when
# ## aggregated over that field.
# ## The name of the field will be set to the name of the aggregation field,
# ## suffixed with the string '_topk_aggregate'
# # add_aggregate_fields = []
############################################################################### ###############################################################################
# AGGREGATOR PLUGINS # # AGGREGATOR PLUGINS #
@ -2093,7 +2144,7 @@
# ## # ##
# # TCP or UDP 'ping' given url and collect response time in seconds # # Collect response time of a TCP or UDP connection
# [[inputs.net_response]] # [[inputs.net_response]]
# ## Protocol, must be "tcp" or "udp" # ## Protocol, must be "tcp" or "udp"
# ## NOTE: because the "udp" protocol does not respond to requests, it requires # ## NOTE: because the "udp" protocol does not respond to requests, it requires
@ -2101,11 +2152,12 @@
# protocol = "tcp" # protocol = "tcp"
# ## Server address (default localhost) # ## Server address (default localhost)
# address = "localhost:80" # address = "localhost:80"
#
# ## Set timeout # ## Set timeout
# timeout = "1s" # # timeout = "1s"
# #
# ## Set read timeout (only used if expecting a response) # ## Set read timeout (only used if expecting a response)
# read_timeout = "1s" # # read_timeout = "1s"
# #
# ## The following options are required for UDP checks. For TCP, they are # ## The following options are required for UDP checks. For TCP, they are
# ## optional. The plugin will send the given string to the server and then # ## optional. The plugin will send the given string to the server and then
@ -2114,6 +2166,9 @@
# # send = "ssh" # # send = "ssh"
# ## expected string in answer # ## expected string in answer
# # expect = "ssh" # # expect = "ssh"
#
# ## Uncomment to remove deprecated fields
# # fieldexclude = ["result_type", "string_found"]
# # Read TCP metrics such as established, time wait and sockets counts. # # Read TCP metrics such as established, time wait and sockets counts.
@ -2990,6 +3045,53 @@
# # basic_password = "barfoo" # # basic_password = "barfoo"
# # Read JTI OpenConfig Telemetry from listed sensors
# [[inputs.jti_openconfig_telemetry]]
# ## List of device addresses to collect telemetry from
# servers = ["localhost:1883"]
#
# ## Authentication details. Username and password are must if device expects
# ## authentication. Client ID must be unique when connecting from multiple instances
# ## of telegraf to the same device
# username = "user"
# password = "pass"
# client_id = "telegraf"
#
# ## Frequency to get data
# sample_frequency = "1000ms"
#
# ## Sensors to subscribe for
# ## A identifier for each sensor can be provided in path by separating with space
# ## Else sensor path will be used as identifier
# ## When identifier is used, we can provide a list of space separated sensors.
# ## A single subscription will be created with all these sensors and data will
# ## be saved to measurement with this identifier name
# sensors = [
# "/interfaces/",
# "collection /components/ /lldp",
# ]
#
# ## We allow specifying sensor group level reporting rate. To do this, specify the
# ## reporting rate in Duration at the beginning of sensor paths / collection
# ## name. For entries without reporting rate, we use configured sample frequency
# sensors = [
# "1000ms customReporting /interfaces /lldp",
# "2000ms collection /components",
# "/interfaces",
# ]
#
# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
# ## channel will be opened with server
# ssl_cert = "/etc/telegraf/cert.pem"
#
# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
# ## Failed streams/calls will not be retried if 0 is provided
# retry_delay = "1000ms"
#
# ## To treat all string values as tags, set this to true
# str_as_tags = false
# # Read metrics from Kafka topic(s) # # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]] # [[inputs.kafka_consumer]]
# ## kafka servers # ## kafka servers