Update sample config

This commit is contained in:
Daniel Nelson 2017-08-16 16:46:40 -07:00
parent 7608251633
commit 8b588ea37f
No known key found for this signature in database
GPG Key ID: CAAD59C9444F6155
1 changed files with 267 additions and 80 deletions

View File

@ -118,6 +118,12 @@
## Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
## HTTP Proxy Config
# http_proxy = "http://corporate.proxy:3128"
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
# # Configuration for Amon Server to send metrics to. # # Configuration for Amon Server to send metrics to.
# [[outputs.amon]] # [[outputs.amon]]
@ -272,11 +278,11 @@
# timeout = 2 # timeout = 2
# #
# ## Optional SSL Config # ## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem" # # ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem" # # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification # ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # # insecure_skip_verify = false
# # Send telegraf metrics to graylog(s) # # Send telegraf metrics to graylog(s)
@ -596,17 +602,7 @@
# AGGREGATOR PLUGINS # # AGGREGATOR PLUGINS #
############################################################################### ###############################################################################
# # Keep the aggregate min/max of each metric passing through. # # Keep the aggregate histogram of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
# # Configuration for aggregate histogram metrics
# [[aggregators.histogram]] # [[aggregators.histogram]]
# ## General Aggregator Arguments: # ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator. # ## The period on which to flush & clear the aggregator.
@ -632,6 +628,16 @@
# metric_fields = ["io_time", "read_time", "write_time"] # metric_fields = ["io_time", "read_time", "write_time"]
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
############################################################################### ###############################################################################
# INPUT PLUGINS # # INPUT PLUGINS #
@ -645,6 +651,8 @@
totalcpu = true totalcpu = true
## If true, collect raw CPU time metrics. ## If true, collect raw CPU time metrics.
collect_cpu_time = false collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point # Read metrics about disk usage by mount point
@ -720,15 +728,17 @@
# # Read Apache status information (mod_status) # # Read Apache status information (mod_status)
# [[inputs.apache]] # [[inputs.apache]]
# ## An array of Apache status URI to gather stats. # ## An array of URLs to gather from, must be directed at the machine
# ## readable version of the mod_status page including the auto query string.
# ## Default is "http://localhost/server-status?auto". # ## Default is "http://localhost/server-status?auto".
# urls = ["http://localhost/server-status?auto"] # urls = ["http://localhost/server-status?auto"]
# ## user credentials for basic HTTP authentication
# username = "myuser"
# password = "mypassword"
# #
# ## Timeout to the complete conection and reponse time in seconds # ## Credentials for basic HTTP authentication.
# response_timeout = "25s" ## default to 5 seconds # # username = "myuser"
# # password = "mypassword"
#
# ## Maximum time to receive response.
# # response_timeout = "5s"
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
@ -846,7 +856,7 @@
# #
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# # metrics are made available to the 1 minute period. Some are collected at # # metrics are made available to the 1 minute period. Some are collected at
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# # Note that if a period is configured that is smaller than the minimum for a # # Note that if a period is configured that is smaller than the minimum for a
# # particular metric, that metric will not be returned by the Cloudwatch API # # particular metric, that metric will not be returned by the Cloudwatch API
# # and will not be collected by Telegraf. # # and will not be collected by Telegraf.
@ -958,20 +968,23 @@
# # Query given DNS server and gives statistics # # Query given DNS server and gives statistics
# [[inputs.dns_query]] # [[inputs.dns_query]]
# ## servers to query # ## servers to query
# servers = ["8.8.8.8"] # required # servers = ["8.8.8.8"]
# #
# ## Domains or subdomains to query. "."(root) is default # ## Network is the network protocol name.
# domains = ["."] # optional # # network = "udp"
# #
# ## Query record type. Default is "A" # ## Domains or subdomains to query.
# # domains = ["."]
#
# ## Query record type.
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A" # optional # # record_type = "A"
# #
# ## Dns server port. 53 is default # ## Dns server port.
# port = 53 # optional # # port = 53
# #
# ## Query timeout in seconds. Default is 2 seconds # ## Query timeout in seconds.
# timeout = 2 # optional # # timeout = 2
# # Read metrics about docker containers # # Read metrics about docker containers
@ -980,8 +993,15 @@
# ## To use TCP, set endpoint = "tcp://[ip]:[port]" # ## To use TCP, set endpoint = "tcp://[ip]:[port]"
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" # ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
# endpoint = "unix:///var/run/docker.sock" # endpoint = "unix:///var/run/docker.sock"
#
# ## Only collect metrics for these containers, collect all if empty # ## Only collect metrics for these containers, collect all if empty
# container_names = [] # container_names = []
#
# ## Containers to include and exclude. Globs accepted.
# ## Note that an empty array for both will include all containers
# container_name_include = []
# container_name_exclude = []
#
# ## Timeout for docker list, info, and stats commands # ## Timeout for docker list, info, and stats commands
# timeout = "5s" # timeout = "5s"
# #
@ -990,11 +1010,20 @@
# perdevice = true # perdevice = true
# ## Whether to report for each container total blkio and network stats or not # ## Whether to report for each container total blkio and network stats or not
# total = false # total = false
# ## Which environment variables should we use as a tag
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
# #
# ## docker labels to include and exclude as tags. Globs accepted. # ## docker labels to include and exclude as tags. Globs accepted.
# ## Note that an empty array for both will include all labels as tags # ## Note that an empty array for both will include all labels as tags
# docker_label_include = [] # docker_label_include = []
# docker_label_exclude = [] # docker_label_exclude = []
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read statistics from one or many dovecot servers # # Read statistics from one or many dovecot servers
@ -1064,6 +1093,15 @@
# data_format = "influx" # data_format = "influx"
# # Read metrics from fail2ban.
# [[inputs.fail2ban]]
# ## fail2ban-client require root access.
# ## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
# ## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
# ## This plugin run only "fail2ban-client status".
# use_sudo = false
# # Read stats about given file(s) # # Read stats about given file(s)
# [[inputs.filestat]] # [[inputs.filestat]]
# ## Files to gather stats about. # ## Files to gather stats about.
@ -1080,6 +1118,22 @@
# md5 = false # md5 = false
# # Read metrics exposed by fluentd in_monitor plugin
# [[inputs.fluentd]]
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
# ##
# ## Endpoint:
# ## - only one URI is allowed
# ## - https is not supported
# endpoint = "http://localhost:24220/api/plugins.json"
#
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
# exclude = [
# "monitor_agent",
# "dummy",
# ]
# # Read flattened metrics from one or more GrayLog HTTP endpoints # # Read flattened metrics from one or more GrayLog HTTP endpoints
# [[inputs.graylog]] # [[inputs.graylog]]
# ## API endpoint, currently supported API: # ## API endpoint, currently supported API:
@ -1161,25 +1215,26 @@
# # HTTP/HTTPS request given an address a method and a timeout # # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]] # [[inputs.http_response]]
# ## Server address (default http://localhost) # ## Server address (default http://localhost)
# address = "http://github.com" # # address = "http://localhost"
#
# ## Set response_timeout (default 5 seconds) # ## Set response_timeout (default 5 seconds)
# response_timeout = "5s" # # response_timeout = "5s"
#
# ## HTTP Request Method # ## HTTP Request Method
# method = "GET" # # method = "GET"
#
# ## Whether to follow redirects from the server (defaults to false) # ## Whether to follow redirects from the server (defaults to false)
# follow_redirects = true # # follow_redirects = false
# ## HTTP Request Headers (all values must be strings) #
# # [inputs.http_response.headers]
# # Host = "github.com"
# ## Optional HTTP Request Body # ## Optional HTTP Request Body
# # body = ''' # # body = '''
# # {'fake':'data'} # # {'fake':'data'}
# # ''' # # '''
# #
# ## Optional substring or regex match in body of the response # ## Optional substring or regex match in body of the response
# ## response_string_match = "\"service_status\": \"up\"" # # response_string_match = "\"service_status\": \"up\""
# ## response_string_match = "ok" # # response_string_match = "ok"
# ## response_string_match = "\".*_status\".?:.?\"up\"" # # response_string_match = "\".*_status\".?:.?\"up\""
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
@ -1187,6 +1242,10 @@
# # ssl_key = "/etc/telegraf/key.pem" # # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification # ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false # # insecure_skip_verify = false
#
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
# # Read flattened metrics from one or more JSON HTTP endpoints # # Read flattened metrics from one or more JSON HTTP endpoints
@ -1249,6 +1308,13 @@
# "http://localhost:8086/debug/vars" # "http://localhost:8086/debug/vars"
# ] # ]
# #
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## http request & header timeout # ## http request & header timeout
# timeout = "5s" # timeout = "5s"
@ -1279,6 +1345,13 @@
# ## if no servers are specified, local machine sensor stats will be queried # ## if no servers are specified, local machine sensor stats will be queried
# ## # ##
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] # # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
#
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
# ## gaps or overlap in pulled data
# interval = "30s"
#
# ## Timeout for the ipmitool command to complete
# timeout = "20s"
# # Gather packets and bytes throughput from iptables # # Gather packets and bytes throughput from iptables
@ -1398,9 +1471,9 @@
# # Read metrics from a LeoFS Server via SNMP # # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]] # [[inputs.leofs]]
# ## An array of URI to gather stats about LeoFS. # ## An array of URLs of the form:
# ## Specify an ip or hostname with port. ie 127.0.0.1:4020 # ## "udp://" host [ ":" port]
# servers = ["127.0.0.1:4021"] # servers = ["udp://127.0.0.1:4020"]
# # Provides Linux sysctl fs metrics # # Provides Linux sysctl fs metrics
@ -1475,14 +1548,24 @@
# # ] # # ]
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
# [[inputs.minecraft]]
# ## server address for minecraft
# # server = "localhost"
# ## port for RCON
# # port = "25575"
# ## password RCON for mincraft server
# # password = ""
# # Read metrics from one or many MongoDB servers # # Read metrics from one or many MongoDB servers
# [[inputs.mongodb]] # [[inputs.mongodb]]
# ## An array of URI to gather stats about. Specify an ip or hostname # ## An array of URLs of the form:
# ## with optional port add password. ie, # ## "mongodb://" [user ":" pass "@"] host [ ":" port]
# ## For example:
# ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://user:auth_key@10.10.3.30:27017,
# ## mongodb://10.10.3.33:18832, # ## mongodb://10.10.3.33:18832,
# ## 10.0.0.1:10000, etc. # servers = ["mongodb://127.0.0.1:27017"]
# servers = ["127.0.0.1:27017"]
# gather_perdb_stats = false # gather_perdb_stats = false
# #
# ## Optional SSL Config # ## Optional SSL Config
@ -1496,7 +1579,7 @@
# # Read metrics from one or many mysql servers # # Read metrics from one or many mysql servers
# [[inputs.mysql]] # [[inputs.mysql]]
# ## specify servers via a url matching: # ## specify servers via a url matching:
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
# ## e.g. # ## e.g.
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] # ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
@ -1553,7 +1636,7 @@
# # # #
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
# interval_slow = "30m" # interval_slow = "30m"
#
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri) # ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
# ssl_ca = "/etc/telegraf/ca.pem" # ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem" # ssl_cert = "/etc/telegraf/cert.pem"
@ -1599,8 +1682,17 @@
# # Read Nginx's basic status information (ngx_http_stub_status_module) # # Read Nginx's basic status information (ngx_http_stub_status_module)
# [[inputs.nginx]] # [[inputs.nginx]]
# ## An array of Nginx stub_status URI to gather stats. # # An array of Nginx stub_status URI to gather stats.
# urls = ["http://localhost/status"] # urls = ["http://localhost/server_status"]
#
# # TLS/SSL configuration
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.cer"
# ssl_key = "/etc/telegraf/key.key"
# insecure_skip_verify = false
#
# # HTTP response timeout (default: 5s)
# response_timeout = "5s"
# # Read NSQ topic and channel statistics. # # Read NSQ topic and channel statistics.
@ -1627,6 +1719,27 @@
# dns_lookup = true # dns_lookup = true
# # OpenLDAP cn=Monitor plugin
# [[inputs.openldap]]
# host = "localhost"
# port = 389
#
# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
# # note that port will likely need to be changed to 636 for ldaps
# # valid options: "" | "starttls" | "ldaps"
# ssl = ""
#
# # skip peer certificate verification. Default is false.
# insecure_skip_verify = false
#
# # Path to PEM-encoded Root certificate to use to verify server certificate
# ssl_ca = "/etc/ssl/certs.pem"
#
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
# bind_dn = ""
# bind_password = ""
# # Read metrics of passenger using passenger-status # # Read metrics of passenger using passenger-status
# [[inputs.passenger]] # [[inputs.passenger]]
# ## Path of passenger-status. # ## Path of passenger-status.
@ -1820,10 +1933,13 @@
# location = "/var/lib/puppet/state/last_run_summary.yaml" # location = "/var/lib/puppet/state/last_run_summary.yaml"
# # Read metrics from one or many RabbitMQ servers via the management API # # Reads metrics from RabbitMQ servers via the Management Plugin
# [[inputs.rabbitmq]] # [[inputs.rabbitmq]]
# ## Management Plugin url. (default: http://localhost:15672)
# # url = "http://localhost:15672" # # url = "http://localhost:15672"
# # name = "rmq-server-1" # optional tag # ## Tag added to rabbitmq_overview series; deprecated: use tags
# # name = "rmq-server-1"
# ## Credentials
# # username = "guest" # # username = "guest"
# # password = "guest" # # password = "guest"
# #
@ -1880,14 +1996,11 @@
# ## # ##
# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, # ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. # ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
# servers = ["rethinkdb2://username:password@127.0.0.1:28015"] # # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
# ## # ##
# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol # ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
# ## have to be named "rethinkdb". # ## have to be named "rethinkdb".
# servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] # # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
# # Read metrics one or many Riak servers # # Read metrics one or many Riak servers
@ -1896,6 +2009,26 @@
# servers = ["http://localhost:8098"] # servers = ["http://localhost:8098"]
# # Read API usage and limits for a Salesforce organisation
# [[inputs.salesforce]]
# ## specify your credentials
# ##
# username = "your_username"
# password = "your_password"
# ##
# ## (optional) security token
# # security_token = "your_security_token"
# ##
# ## (optional) environment type (sandbox or production)
# ## default is: production
# ##
# # environment = "production"
# ##
# ## (optional) API version (default: "39.0")
# ##
# # version = "39.0"
# # Monitor sensors, requires lm-sensors package # # Monitor sensors, requires lm-sensors package
# [[inputs.sensors]] # [[inputs.sensors]]
# ## Remove numbers from field names. # ## Remove numbers from field names.
@ -2141,6 +2274,26 @@
# # vg = "rootvg" # # vg = "rootvg"
# # Gather metrics from the Tomcat server status page.
# [[inputs.tomcat]]
# ## URL of the Tomcat server status
# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
#
# ## HTTP Basic Auth Credentials
# # username = "tomcat"
# # password = "s3cret"
#
# ## Request timeout
# # timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Inserts sine and cosine waves for demonstration purposes # # Inserts sine and cosine waves for demonstration purposes
# [[inputs.trig]] # [[inputs.trig]]
# ## Set the amplitude # ## Set the amplitude
@ -2157,6 +2310,9 @@
# # A plugin to collect stats from Varnish HTTP Cache # # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]] # [[inputs.varnish]]
# ## If running as a restricted user you can prepend sudo for additional access:
# #use_sudo = false
#
# ## The default location of the varnishstat binary can be overridden with: # ## The default location of the varnishstat binary can be overridden with:
# binary = "/usr/bin/varnishstat" # binary = "/usr/bin/varnishstat"
# #
@ -2247,16 +2403,13 @@
# ## 0 means to use the default of 65536 bytes (64 kibibytes) # ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0 # max_line_size = 0
# # Read metrics from Kafka 0.9+ topic(s)
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]] # [[inputs.kafka_consumer]]
# ## topic(s) to consume
# topics = ["telegraf"]
# ## kafka servers # ## kafka servers
# brokers = ["localhost:9092"] # brokers = ["localhost:9092"]
# ## the name of the consumer group # ## topic(s) to consume
# consumer_group = "telegraf_metrics_consumers" # topics = ["telegraf"]
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
# #
# ## Optional SSL Config # ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_ca = "/etc/telegraf/ca.pem"
@ -2269,6 +2422,11 @@
# # sasl_username = "kafka" # # sasl_username = "kafka"
# # sasl_password = "secret" # # sasl_password = "secret"
# #
# ## the name of the consumer group
# consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
#
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read # ## Each data format has its own unique set of configuration options, read
# ## more about them here: # ## more about them here:
@ -2279,7 +2437,8 @@
# ## larger messages are dropped # ## larger messages are dropped
# max_message_len = 65536 # max_message_len = 65536
# # Read metrics from Kafka (0.8 or less) topic(s)
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer_legacy]] # [[inputs.kafka_consumer_legacy]]
# ## topic(s) to consume # ## topic(s) to consume
# topics = ["telegraf"] # topics = ["telegraf"]
@ -2312,6 +2471,7 @@
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
# ## /var/log/apache.log -> only tail the apache log file # ## /var/log/apache.log -> only tail the apache log file
# files = ["/var/log/apache/access.log"] # files = ["/var/log/apache/access.log"]
#
# ## Read files that currently exist from the beginning. Files that are created # ## Read files that currently exist from the beginning. Files that are created
# ## while telegraf is running (and that match the "files" globs) will always # ## while telegraf is running (and that match the "files" globs) will always
# ## be read from the beginning. # ## be read from the beginning.
@ -2327,12 +2487,26 @@
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
# patterns = ["%{COMBINED_LOG_FORMAT}"] # patterns = ["%{COMBINED_LOG_FORMAT}"]
#
# ## Name of the outputted measurement name. # ## Name of the outputted measurement name.
# measurement = "apache_access_log" # measurement = "apache_access_log"
#
# ## Full path(s) to custom pattern files. # ## Full path(s) to custom pattern files.
# custom_pattern_files = [] # custom_pattern_files = []
#
# ## Custom patterns can also be defined here. Put one pattern per line. # ## Custom patterns can also be defined here. Put one pattern per line.
# custom_patterns = ''' # custom_patterns = '''
#
# ## Timezone allows you to provide an override for timestamps that
# ## don't already include an offset
# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
# ##
# ## Default: "" which renders UTC
# ## Options are as follows:
# ## 1. Local -- interpret based on machine localtime
# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
# timezone = "Canada/Eastern"
# ''' # '''
@ -2431,6 +2605,11 @@
# ## 0 (default) is unlimited. # ## 0 (default) is unlimited.
# # max_connections = 1024 # # max_connections = 1024
# #
# ## Read timeout.
# ## Only applies to stream sockets (e.g. TCP).
# ## 0 (default) is unlimited.
# # read_timeout = "30s"
#
# ## Maximum socket buffer size in bytes. # ## Maximum socket buffer size in bytes.
# ## For stream sockets, once the buffer fills up, the sender will start backing up. # ## For stream sockets, once the buffer fills up, the sender will start backing up.
# ## For datagram sockets, once the buffer fills up, metrics will start dropping. # ## For datagram sockets, once the buffer fills up, metrics will start dropping.
@ -2450,12 +2629,14 @@
# # data_format = "influx" # # data_format = "influx"
# # Statsd Server # # Statsd UDP/TCP Server
# [[inputs.statsd]] # [[inputs.statsd]]
# ## Protocol, must be "tcp" or "udp" # ## Protocol, must be "tcp" or "udp" (default=udp)
# protocol = "udp" # protocol = "udp"
# ## Maximum number of concurrent TCP connections to allow #
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
# max_tcp_connections = 250 # max_tcp_connections = 250
#
# ## Address and port to host UDP listener on # ## Address and port to host UDP listener on
# service_address = ":8125" # service_address = ":8125"
# #
@ -2556,3 +2737,9 @@
# [inputs.webhooks.papertrail] # [inputs.webhooks.papertrail]
# path = "/papertrail" # path = "/papertrail"
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
# [[inputs.zipkin]]
# # path = "/api/v1/spans" # URL path for span data
# # port = 9411 # Port on which Telegraf listens