Update telegraf.conf

This commit is contained in:
Daniel Nelson 2017-03-27 14:49:04 -07:00
parent d2afe424f5
commit 3bf45f9365
1 changed files with 334 additions and 44 deletions

View File

@ -81,7 +81,10 @@
# Configuration for influxdb server to send metrics to # Configuration for influxdb server to send metrics to
[[outputs.influxdb]] [[outputs.influxdb]]
## The full HTTP or UDP endpoint URL for your InfluxDB instance. ## The HTTP or UDP URL for your InfluxDB instance. Each item should be
## of the form:
## scheme "://" host [ ":" port]
##
## Multiple urls can be specified as part of the same cluster, ## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval. ## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example # urls = ["udp://localhost:8089"] # UDP endpoint example
@ -131,6 +134,8 @@
# ## AMQP exchange # ## AMQP exchange
# exchange = "telegraf" # exchange = "telegraf"
# ## Auth method. PLAIN and EXTERNAL are supported # ## Auth method. PLAIN and EXTERNAL are supported
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
# ## described here: https://www.rabbitmq.com/plugins.html
# # auth_method = "PLAIN" # # auth_method = "PLAIN"
# ## Telegraf tag to use as a routing key # ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key # ## ie, if this tag exists, it's value will be used as the routing key
@ -193,6 +198,45 @@
# # no configuration # # no configuration
# # Configuration for Elasticsearch to send metrics to.
# [[outputs.elasticsearch]]
# ## The full HTTP endpoint URL for your Elasticsearch instance
# ## Multiple urls can be specified as part of the same cluster,
# ## this means that only ONE of the urls will be written to each interval.
# urls = [ "http://node1.es.example.com:9200" ] # required.
# ## Elasticsearch client timeout, defaults to "5s" if not set.
# timeout = "5s"
# ## Set to true to ask Elasticsearch a list of all cluster nodes,
# ## thus it is not necessary to list all nodes in the urls config option.
# enable_sniffer = false
# ## Set the interval to check if the Elasticsearch nodes are available
# ## Setting to "0s" will disable the health check (not recommended in production)
# health_check_interval = "10s"
# ## HTTP basic authentication details (eg. when using Shield)
# # username = "telegraf"
# # password = "mypassword"
#
# ## Index Config
# ## The target index for metrics (Elasticsearch will create if it not exists).
# ## You can use the date specifiers below to create indexes per time frame.
# ## The metric timestamp will be used to decide the destination index name
# # %Y - year (2016)
# # %y - last two digits of year (00..99)
# # %m - month (01..12)
# # %d - day of month (e.g., 01)
# # %H - hour (00..23)
# index_name = "telegraf-%Y.%m.%d" # required.
#
# ## Template Config
# ## Set to true if you want telegraf to manage its index template.
# ## If enabled it will create a recommended index template for telegraf indexes
# manage_template = true
# ## The template name used for telegraf indexes
# template_name = "telegraf"
# ## Set to true if you want telegraf to overwrite an existing template
# overwrite_template = false
# # Send telegraf metrics to file(s) # # Send telegraf metrics to file(s)
# [[outputs.file]] # [[outputs.file]]
# ## Files to write to, "stdout" is a specially handled file. # ## Files to write to, "stdout" is a specially handled file.
@ -443,7 +487,7 @@
# # expiration_interval = "60s" # # expiration_interval = "60s"
# # Configuration for Riemann server to send metrics to # # Configuration for the Riemann server to send metrics to
# [[outputs.riemann]] # [[outputs.riemann]]
# ## The full TCP or UDP URL of the Riemann server # ## The full TCP or UDP URL of the Riemann server
# url = "tcp://localhost:5555" # url = "tcp://localhost:5555"
@ -472,9 +516,12 @@
# #
# ## Description for Riemann event # ## Description for Riemann event
# # description_text = "metrics collected from telegraf" # # description_text = "metrics collected from telegraf"
#
# ## Riemann client write timeout, defaults to "5s" if not set.
# # timeout = "5s"
# # Configuration for the legacy Riemann plugin # # Configuration for the Riemann server to send metrics to
# [[outputs.riemann_legacy]] # [[outputs.riemann_legacy]]
# ## URL of server # ## URL of server
# url = "localhost:5555" # url = "localhost:5555"
@ -484,6 +531,27 @@
# separator = " " # separator = " "
# # Generic socket writer capable of handling multiple socket types.
# [[outputs.socket_writer]]
# ## URL to connect to
# # address = "tcp://127.0.0.1:8094"
# # address = "tcp://example.com:http"
# # address = "tcp4://127.0.0.1:8094"
# # address = "tcp6://127.0.0.1:8094"
# # address = "tcp6://[2001:db8::1]:8094"
# # address = "udp://127.0.0.1:8094"
# # address = "udp4://127.0.0.1:8094"
# # address = "udp6://127.0.0.1:8094"
# # address = "unix:///tmp/telegraf.sock"
# # address = "unixgram:///tmp/telegraf.sock"
#
# ## Data format to generate.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# # data_format = "influx"
############################################################################### ###############################################################################
# PROCESSOR PLUGINS # # PROCESSOR PLUGINS #
@ -531,7 +599,7 @@
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev). ## present on /run, /var/run, /dev/shm or /dev).
ignore_fs = ["tmpfs", "devtmpfs"] ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
# Read metrics about disk IO by device # Read metrics about disk IO by device
@ -542,6 +610,23 @@
# devices = ["sda", "sdb"] # devices = ["sda", "sdb"]
## Uncomment the following line if you need disk serial numbers. ## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false # skip_serial_number = false
#
## On systems which support it, device metadata can be added in the form of
## tags.
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
## name of the device via templates.
## The 'name_templates' parameter is a list of templates to try and apply to
## the device. The template may contain variables in the form of '$PROPERTY' or
## '${PROPERTY}'. The first template which does not contain any variables not
## present for the device is used as the device name tag.
## The typical use case is for LVM volumes, to get the VG/LV name instead of
## the near-meaningless DM-0 name.
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
# Get kernel statistics from /proc/stat # Get kernel statistics from /proc/stat
@ -658,7 +743,7 @@
# gather_admin_socket_stats = true # gather_admin_socket_stats = true
# #
# ## Whether to gather statistics via ceph commands # ## Whether to gather statistics via ceph commands
# gather_cluster_stats = true # gather_cluster_stats = false
# # Read specific statistics per cgroup # # Read specific statistics per cgroup
@ -677,6 +762,12 @@
# # files = ["memory.*usage*", "memory.limit_in_bytes"] # # files = ["memory.*usage*", "memory.limit_in_bytes"]
# # Get standard chrony metrics, requires chronyc executable.
# [[inputs.chrony]]
# ## If true, chronyc tries to perform a DNS lookup for the time server.
# # dns_lookup = false
# # Pull Metric Statistics from Amazon CloudWatch # # Pull Metric Statistics from Amazon CloudWatch
# [[inputs.cloudwatch]] # [[inputs.cloudwatch]]
# ## Amazon Region # ## Amazon Region
@ -722,9 +813,10 @@
# namespace = "AWS/ELB" # namespace = "AWS/ELB"
# #
# ## Maximum requests per second. Note that the global default AWS rate limit is # ## Maximum requests per second. Note that the global default AWS rate limit is
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a # ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
# ## maximum of 10. Optional - default value is 10. # ## maximum of 400. Optional - default value is 200.
# ratelimit = 10 # ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
# ratelimit = 200
# #
# ## Metrics to Pull (optional) # ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided # ## Defaults to all Metrics in Namespace if nothing is provided
@ -738,6 +830,22 @@
# # value = "p-example" # # value = "p-example"
# # Collects conntrack stats from the configured directories and files.
# [[inputs.conntrack]]
# ## The following defaults would work with multiple versions of conntrack.
# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
# ## kernel versions, as are the directory locations.
#
# ## Superset of filenames to look for within the conntrack dirs.
# ## Missing files will be ignored.
# files = ["ip_conntrack_count","ip_conntrack_max",
# "nf_conntrack_count","nf_conntrack_max"]
#
# ## Directories to search within for the conntrack files above.
# ## Missing directrories will be ignored.
# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
# # Gather health check statuses from services registered in Consul # # Gather health check statuses from services registered in Consul
# [[inputs.consul]] # [[inputs.consul]]
# ## Most of these values defaults to the one configured on a Consul's agent level. # ## Most of these values defaults to the one configured on a Consul's agent level.
@ -957,6 +1065,24 @@
# ## Server address not starting with 'http' will be treated as a possible # ## Server address not starting with 'http' will be treated as a possible
# ## socket, so both examples below are valid. # ## socket, so both examples below are valid.
# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] # ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
# #
# ## By default, some of the fields are renamed from what haproxy calls them.
# ## Setting this option to true results in the plugin keeping the original
# ## field names.
# ## keep_field_names = true
# # Monitor disks' temperatures using hddtemp
# [[inputs.hddtemp]]
# ## By default, telegraf gathers temps data from all disks detected by the
# ## hddtemp.
# ##
# ## Only collect temps from the selected disks.
# ##
# ## A * as the device name will return the temperature values of all disks.
# ##
# # address = "127.0.0.1:7634"
# # devices = ["sda", "*"]
# # HTTP/HTTPS request given an address a method and a timeout # # HTTP/HTTPS request given an address a method and a timeout
@ -977,6 +1103,11 @@
# # {'fake':'data'} # # {'fake':'data'}
# # ''' # # '''
# #
# ## Optional substring or regex match in body of the response
# ## response_string_match = "\"service_status\": \"up\""
# ## response_string_match = "ok"
# ## response_string_match = "\".*_status\".?:.?\"up\""
#
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_cert = "/etc/telegraf/cert.pem"
@ -1050,14 +1181,37 @@
# # collect_memstats = true # # collect_memstats = true
# # Read metrics from one or many bare metal servers # # Read metrics from the bare metal servers via IPMI
# [[inputs.ipmi_sensor]] # [[inputs.ipmi_sensor]]
# ## specify servers via a url matching: # ## optionally specify the path to the ipmitool executable
# # path = "/usr/bin/ipmitool"
# #
# ## optionally specify one or more servers via a url matching
# ## [username[:password]@][protocol[(address)]] # ## [username[:password]@][protocol[(address)]]
# ## e.g. # ## e.g.
# ## root:passwd@lan(127.0.0.1) # ## root:passwd@lan(127.0.0.1)
# ## # ##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] # ## if no servers are specified, local machine sensor stats will be queried
# ##
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
# # Gather packets and bytes throughput from iptables
# [[inputs.iptables]]
# ## iptables require root access on most systems.
# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
# ## Users must configure sudo to allow telegraf user to run iptables with no password.
# ## iptables can be restricted to only list command "iptables -nvL".
# use_sudo = false
# ## Setting 'use_lock' to true runs iptables with the "-w" option.
# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
# use_lock = false
# ## defines the table to monitor:
# table = "filter"
# ## defines the chains to monitor.
# ## NOTE: iptables rules without a comment will not be monitored.
# ## Read the plugin documentation for more information.
# chains = [ "INPUT" ]
# # Read JMX metrics through Jolokia # # Read JMX metrics through Jolokia
@ -1087,6 +1241,13 @@
# ## Includes connection time, any redirects, and reading the response body. # ## Includes connection time, any redirects, and reading the response body.
# # client_timeout = "4s" # # client_timeout = "4s"
# #
# ## Attribute delimiter
# ##
# ## When multiple attributes are returned for a single
# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
# ## name, and the attribute name, separated by the given delimiter.
# # delimiter = "_"
#
# ## List of servers exposing jolokia read service # ## List of servers exposing jolokia read service
# [[inputs.jolokia.servers]] # [[inputs.jolokia.servers]]
# name = "as-server-01" # name = "as-server-01"
@ -1117,6 +1278,11 @@
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
# # Get kernel statistics from /proc/vmstat
# [[inputs.kernel_vmstat]]
# # no configuration
# # Read metrics from the kubernetes kubelet api # # Read metrics from the kubernetes kubelet api
# [[inputs.kubernetes]] # [[inputs.kubernetes]]
# ## URL for the kubelet # ## URL for the kubelet
@ -1216,6 +1382,13 @@
# ## 10.0.0.1:10000, etc. # ## 10.0.0.1:10000, etc.
# servers = ["127.0.0.1:27017"] # servers = ["127.0.0.1:27017"]
# gather_perdb_stats = false # gather_perdb_stats = false
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read metrics from one or many mysql servers # # Read metrics from one or many mysql servers
@ -1243,9 +1416,15 @@
# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
# gather_process_list = true # gather_process_list = true
# # # #
# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
# gather_user_statistics = true
# #
# ## gather auto_increment columns and max values from information schema # ## gather auto_increment columns and max values from information schema
# gather_info_schema_auto_inc = true # gather_info_schema_auto_inc = true
# # # #
# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
# gather_innodb_metrics = true
# #
# ## gather metrics from SHOW SLAVE STATUS command output # ## gather metrics from SHOW SLAVE STATUS command output
# gather_slave_status = true # gather_slave_status = true
# # # #
@ -1417,7 +1596,7 @@
# # ignored_databases = ["postgres", "template0", "template1"] # # ignored_databases = ["postgres", "template0", "template1"]
# #
# ## A list of databases to pull metrics about. If not specified, metrics for all # ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'ignore_databases' option. # ## databases are gathered. Do NOT use with the 'ignored_databases' option.
# # databases = ["app_production", "testing"] # # databases = ["app_production", "testing"]
@ -1599,6 +1778,13 @@
# servers = ["http://localhost:8098"] # servers = ["http://localhost:8098"]
# # Monitor sensors, requires lm-sensors package
# [[inputs.sensors]]
# ## Remove numbers from field names.
# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# # remove_numbers = true
# # Retrieves SNMP values from remote agents # # Retrieves SNMP values from remote agents
# [[inputs.snmp]] # [[inputs.snmp]]
# agents = [ "127.0.0.1:161" ] # agents = [ "127.0.0.1:161" ]
@ -1775,6 +1961,68 @@
# # ] # # ]
# # Sysstat metrics collector
# [[inputs.sysstat]]
# ## Path to the sadc command.
# #
# ## Common Defaults:
# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
# ## Arch: /usr/lib/sa/sadc
# ## RHEL/CentOS: /usr/lib64/sa/sadc
# sadc_path = "/usr/lib/sa/sadc" # required
# #
# #
# ## Path to the sadf command, if it is not in PATH
# # sadf_path = "/usr/bin/sadf"
# #
# #
# ## Activities is a list of activities, that are passed as argument to the
# ## sadc collector utility (e.g: DISK, SNMP etc...)
# ## The more activities that are added, the more data is collected.
# # activities = ["DISK"]
# #
# #
# ## Group metrics to measurements.
# ##
# ## If group is false each metric will be prefixed with a description
# ## and represents itself a measurement.
# ##
# ## If Group is true, corresponding metrics are grouped to a single measurement.
# # group = true
# #
# #
# ## Options for the sadf command. The values on the left represent the sadf
# ## options and the values on the right their description (wich are used for
# ## grouping and prefixing metrics).
# ##
# ## Run 'sar -h' or 'man sar' to find out the supported options for your
# ## sysstat version.
# [inputs.sysstat.options]
# -C = "cpu"
# -B = "paging"
# -b = "io"
# -d = "disk" # requires DISK activity
# "-n ALL" = "network"
# "-P ALL" = "per_cpu"
# -q = "queue"
# -R = "mem"
# -r = "mem_util"
# -S = "swap_util"
# -u = "cpu_util"
# -v = "inode"
# -W = "swap"
# -w = "task"
# # -H = "hugepages" # only available for newer linux distributions
# # "-I ALL" = "interrupts" # requires INT activity
# #
# #
# ## Device tags can be used to add additional tags for devices.
# ## For example the configuration below adds a tag vg with value rootvg for
# ## all metrics with sda devices.
# # [[inputs.sysstat.device_tags.sda]]
# # vg = "rootvg"
# # Inserts sine and cosine waves for demonstration purposes # # Inserts sine and cosine waves for demonstration purposes
# [[inputs.trig]] # [[inputs.trig]]
# ## Set the amplitude # ## Set the amplitude
@ -1830,6 +2078,39 @@
# SERVICE INPUT PLUGINS # # SERVICE INPUT PLUGINS #
############################################################################### ###############################################################################
# # AMQP consumer plugin
# [[inputs.amqp_consumer]]
# ## AMQP url
# url = "amqp://localhost:5672/influxdb"
# ## AMQP exchange
# exchange = "telegraf"
# ## AMQP queue name
# queue = "telegraf"
# ## Binding Key
# binding_key = "#"
#
# ## Maximum number of messages server should give to the worker.
# prefetch_count = 50
#
# ## Auth method. PLAIN and EXTERNAL are supported
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
# ## described here: https://www.rabbitmq.com/plugins.html
# # auth_method = "PLAIN"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Influx HTTP write listener # # Influx HTTP write listener
# [[inputs.http_listener]] # [[inputs.http_listener]]
# ## Address and port to host HTTP listener on # ## Address and port to host HTTP listener on
@ -1878,7 +2159,9 @@
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
# ## /var/log/apache.log -> only tail the apache log file # ## /var/log/apache.log -> only tail the apache log file
# files = ["/var/log/apache/access.log"] # files = ["/var/log/apache/access.log"]
# ## Read file from beginning. # ## Read files that currently exist from the beginning. Files that are created
# ## while telegraf is running (and that match the "files" globs) will always
# ## be read from the beginning.
# from_beginning = false # from_beginning = false
# #
# ## Parse logstash-style "grok" patterns: # ## Parse logstash-style "grok" patterns:
@ -1976,6 +2259,38 @@
# data_format = "influx" # data_format = "influx"
# # Generic socket listener capable of handling multiple socket types.
# [[inputs.socket_listener]]
# ## URL to listen on
# # service_address = "tcp://:8094"
# # service_address = "tcp://127.0.0.1:http"
# # service_address = "tcp4://:8094"
# # service_address = "tcp6://:8094"
# # service_address = "tcp6://[2001:db8::1]:8094"
# # service_address = "udp://:8094"
# # service_address = "udp4://:8094"
# # service_address = "udp6://:8094"
# # service_address = "unix:///tmp/telegraf.sock"
# # service_address = "unixgram:///tmp/telegraf.sock"
#
# ## Maximum number of concurrent connections.
# ## Only applies to stream sockets (e.g. TCP).
# ## 0 (default) is unlimited.
# # max_connections = 1024
#
# ## Maximum socket buffer size in bytes.
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
# ## Defaults to the OS default.
# # read_buffer_size = 65535
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# # data_format = "influx"
# # Statsd Server # # Statsd Server
# [[inputs.statsd]] # [[inputs.statsd]]
# ## Address and port to host UDP listener on # ## Address and port to host UDP listener on
@ -2045,41 +2360,16 @@
# # Generic TCP listener # # Generic TCP listener
# [[inputs.tcp_listener]] # [[inputs.tcp_listener]]
# ## Address and port to host TCP listener on # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
# # service_address = ":8094" # # socket_listener plugin
# # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
# ## Number of TCP messages allowed to queue up. Once filled, the
# ## TCP listener will start dropping packets.
# # allowed_pending_messages = 10000
#
# ## Maximum number of concurrent TCP connections to allow
# # max_tcp_connections = 250
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Generic UDP listener # # Generic UDP listener
# [[inputs.udp_listener]] # [[inputs.udp_listener]]
# ## Address and port to host UDP listener on # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
# # service_address = ":8092" # # socket_listener plugin
# # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
# ## Number of UDP messages allowed to queue up. Once filled, the
# ## UDP listener will start dropping packets.
# # allowed_pending_messages = 10000
#
# ## Set the buffer size of the UDP connection outside of OS default (in bytes)
# ## If set to 0, take OS default
# udp_buffer_size = 16777216
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # A Webhooks Event collector # # A Webhooks Event collector