From 96f3d7def4e1a1c0afd9586cd7ddc6c12469fa45 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Wed, 12 Sep 2018 16:06:31 -0600 Subject: [PATCH] Update telegraf.conf --- etc/telegraf.conf | 465 +++++++++++++++++++++++++++++++++++++++++----- scripts/build.py | 2 +- 2 files changed, 422 insertions(+), 45 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 11842e7e1..8d1371a24 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -183,8 +183,8 @@ # ## If true, exchange will be passively declared. # # exchange_declare_passive = false # -# ## If true, exchange will be created as a durable exchange. -# # exchange_durable = true +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" # # ## Additional exchange arguments. # # exchange_arguments = { } @@ -256,7 +256,7 @@ # # timeout = "5s" # # ## Enable additional diagnostic logging. -# # enable_diagnosic_logging = false +# # enable_diagnostic_logging = false # # ## Context Tag Sources add Application Insights context tags to a tag value. # ## @@ -267,6 +267,32 @@ # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" + + # # Configuration for AWS CloudWatch output. # [[outputs.cloudwatch]] # ## Amazon REGION @@ -287,8 +313,22 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Namespace for the CloudWatch MetricDatums # namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false # # Configuration for CrateDB to send metrics to. @@ -429,6 +469,12 @@ # # username = "username" # # password = "pa$$word" # +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -470,6 +516,15 @@ # ## Kafka topic for producer messages # topic = "telegraf" # +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# # ## Optional topic suffix configuration. # ## If the section is omitted, no suffix is used. # ## Following topic suffix methods are supported: @@ -501,11 +556,19 @@ # ## ie, if this tag exists, its value will be used as the routing key # routing_tag = "host" # +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. If set to "random", +# ## a random value will be generated for each message. +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# # ## CompressionCodec represents the various compression codecs recognized by # ## Kafka in messages. # ## 0 : No compression # ## 1 : Gzip compression # ## 2 : Snappy compression +# ## 3 : LZ4 compression # # compression_codec = 0 # # ## RequiredAcks is used in Produce Requests to tell the broker how many @@ -528,6 +591,10 @@ # ## until the next flush. # # max_retry = 3 # +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -566,6 +633,12 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" # ## DEPRECATED: PartitionKey as used for sharding data. @@ -721,11 +794,11 @@ # # ## Number of data points to send to OpenTSDB in Http requests. # ## Not used with telnet API. -# httpBatchSize = 50 +# http_batch_size = 50 # # ## URI Path for Http requests to OpenTSDB. # ## Used in cases where OpenTSDB is located behind a reverse proxy. -# httpPath = "/api/put" +# http_path = "/api/put" # # ## Debug true - Prints OpenTSDB communication # debug = false @@ -737,29 +810,33 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on -# # listen = ":9273" +# listen = ":9273" # -# ## Use TLS -# #tls_cert = "/etc/ssl/telegraf.crt" -# #tls_key = "/etc/ssl/telegraf.key" +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" # -# ## Use http basic authentication -# #basic_username = "Foo" -# #basic_password = "Bar" +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] # -# ## IP Ranges which are allowed to access metrics -# #ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# ## Path to publish the metrics on. +# # path = "/metrics" # -# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration +# ## Expiration interval for each metric. 0 == no expiration # # expiration_interval = "60s" # # ## Collectors to enable, valid entries are "gocollector" and "process". # ## If unset, both are enabled. -# collectors_exclude = ["gocollector", "process"] +# # collectors_exclude = ["gocollector", "process"] # -# # Send string metrics as Prometheus labels. -# # Unless set to false all string metrics will be sent as labels. -# string_as_label = true +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" # # Configuration for the Riemann server to send metrics to @@ -913,6 +990,27 @@ # float = [] +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.fields]] +# ## Name of the field to map +# source = "name" +# +# ## Destination field to be used for the mapped value. By default the source +# ## field is used, overwriting the original value. +# # destination = "mapped" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.fields.value_mappings] +# value1 = 1 +# value2 = 2 + + # # Apply metric modifications using override semantics. # [[processors.override]] # ## All modifications on inputs and aggregators can be overridden: @@ -925,6 +1023,25 @@ # # additional_tag = "tag_value" +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + # # Print all metrics that pass through this filter. # [[processors.printer]] @@ -959,6 +1076,67 @@ # # result_key = "search_category" +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] +# ## Measurement, tag, and field renamings are stored in separate sub-tables. +# ## Specify one sub-table per rename operation. +# # [[processors.rename.measurement]] +# # ## measurement to change +# # from = "kilobytes_per_second" +# # to = "kbps" +# +# # [[processors.rename.tag]] +# # ## tag to change +# # from = "host" +# # to = "hostname" +# +# # [[processors.rename.field]] +# # ## field to change +# # from = "lower" +# # to = "min" +# +# # [[processors.rename.field]] +# # ## field to change +# # from = "upper" +# # to = "max" + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" + + # # Print all metrics that pass through this filter. # [[processors.topk]] # ## How many seconds between aggregations @@ -1060,6 +1238,18 @@ # drop_original = false +# # Count the occurance of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + ############################################################################### # INPUT PLUGINS # @@ -1139,6 +1329,31 @@ # no configuration +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## Required ActiveMQ Endpoint +# # server = "192.168.50.10" +# +# ## Required ActiveMQ port +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification + + # # Read stats from aerospike server(s) # [[inputs.aerospike]] # ## Aerospike servers to connect to (with port) @@ -1349,6 +1564,12 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -1385,7 +1606,9 @@ # #[[inputs.cloudwatch.metrics]] # # names = ["Latency", "RequestCount"] # # -# # ## Dimension filters for Metric (optional) +# # ## Dimension filters for Metric. These are optional however all dimensions +# # ## defined for the metric names must be specified in order to retrieve +# # ## the metric statistics. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -1455,7 +1678,7 @@ # # Read CouchDB Stats from one or more servers # [[inputs.couchdb]] # ## Works with CouchDB stats endpoints out of the box -# ## Multiple HOSTs from which to read CouchDB stats: +# ## Multiple Hosts from which to read CouchDB stats: # hosts = ["http://localhost:8086/_stats"] @@ -1685,6 +1908,48 @@ # # timeout = "5s" +# # Reload and gather from file[s] on telegraf's interval. +# [[inputs.file]] +# ## Files to parse each interval. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only read the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# directory = "/var/cache/apt/archives" +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Only count files that are at least this size in bytes. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Defaults to 0. +# size = 0 +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + # # Read stats about given file(s) # [[inputs.filestat]] # ## Files to gather stats about. @@ -1919,6 +2184,29 @@ # # apiVersion = "v1" +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address (default: "https://localhost:5665") +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts, default "services") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints # [[inputs.influxdb]] # ## Works with InfluxDB debug endpoints out of the box, @@ -1978,7 +2266,7 @@ # # ## Timeout for the ipmitool command to complete # timeout = "20s" - +# # ## Schema Version: (Optional, defaults to version 1) # metric_version = 2 @@ -2004,8 +2292,6 @@ # ## Setting 'use_lock' to true runs iptables with the "-w" option. # ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") # use_lock = false -# ## Define an alternate executable, such as "ip6tables". Default is "iptables". -# # binary = "ip6tables" # ## defines the table to monitor: # table = "filter" # ## defines the chains to monitor. @@ -2164,6 +2450,26 @@ # # no configuration +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from the kubernetes kubelet api # [[inputs.kubernetes]] # ## URL for the kubelet @@ -2348,7 +2654,7 @@ # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST # gather_process_list = true # # -# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS # gather_user_statistics = true # # # ## gather auto_increment columns and max values from information schema @@ -2499,11 +2805,11 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = /usr/bin/nvidia-smi +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" # -# ## Optional: timeout for GPU polling -# # timeout = 5s +# ## Optional: timeout for GPU polling +# # timeout = "5s" # # OpenLDAP cn=Monitor plugin @@ -2514,7 +2820,7 @@ # # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. # # note that port will likely need to be changed to 636 for ldaps # # valid options: "" | "starttls" | "ldaps" -# ssl = "" +# tls = "" # # # skip peer certificate verification. Default is false. # insecure_skip_verify = false @@ -2591,20 +2897,23 @@ # # Ping given url(s) and return statistics # [[inputs.ping]] -# ## NOTE: this plugin forks the ping command. You may need to set capabilities -# ## via setcap cap_net_raw+p /bin/ping -# # # ## List of urls to ping -# urls = ["www.google.com"] # required -# ## number of pings to send per collection (ping -c ) +# urls = ["example.org"] +# +# ## Number of pings to send per collection (ping -c ) # # count = 1 -# ## interval, in s, at which to ping. 0 == default (ping -i ) +# +# ## Interval, in s, at which to ping. 0 == default (ping -i ) +# ## Not available in Windows. # # ping_interval = 1.0 -# ## per-ping timeout, in s. 0 == no timeout (ping -W ) +# +# ## Per-ping timeout, in s. 0 == no timeout (ping -W ) # # timeout = 1.0 -# ## total-ping deadline, in s. 0 == no deadline (ping -w ) +# +# ## Total-ping deadline, in s. 0 == no deadline (ping -w ) # # deadline = 10 -# ## interface or source address to send ping from (ping -I ) +# +# ## Interface or source address to send ping from (ping -I ) # ## on Darwin and Freebsd only source address possible: (ping -S ) # # interface = "" @@ -2748,6 +3057,13 @@ # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used # servers = ["tcp://localhost:6379"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true # # Read metrics from one or many RethinkDB servers @@ -3119,6 +3435,22 @@ # # virtual_servers = [1] +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Gather metrics from the Tomcat server status page. # [[inputs.tomcat]] # ## URL of the Tomcat server status @@ -3195,6 +3527,23 @@ # # instance_name = instanceName +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools # [[inputs.zfs]] # ## ZFS kstat path. Ignored on FreeBSD @@ -3268,10 +3617,13 @@ # # exchange_arguments = { } # # exchange_arguments = {"hash_propery" = "timestamp"} # -# ## AMQP queue name +# ## AMQP queue name. # queue = "telegraf" # -# ## Binding Key +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## Binding Key. # binding_key = "#" # # ## Maximum number of messages server should give to the worker. @@ -3404,6 +3756,15 @@ # ## topic(s) to consume # topics = ["telegraf"] # +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3428,7 +3789,7 @@ # # ## Maximum length of a message to consume, in bytes (default 0/unlimited); # ## larger messages are dropped -# max_message_len = 65536 +# max_message_len = 1000000 # # Read metrics from Kafka topic(s) @@ -3457,6 +3818,9 @@ # # Stream and parse log file(s). # [[inputs.logparser]] +# ## DEPRECATED: The 'logparser' plugin is deprecated in 1.8. Please use the +# ## 'tail' plugin with the grok data_format as a replacement. +# # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: @@ -3590,6 +3954,19 @@ # data_format = "influx" +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: @@ -3820,9 +4197,9 @@ # ## Only applies to stream sockets (e.g. TCP). # # max_connections = 1024 # -# ## Read timeout (default = 500ms). +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). # ## 0 means unlimited. -# # read_timeout = 500ms +# # read_timeout = "5s" # # ## Whether to parse in best effort mode or not (default = false). # ## By default best effort parsing is off. diff --git a/scripts/build.py b/scripts/build.py index bfaba5a8a..675a4c3c0 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -95,7 +95,7 @@ supported_packages = { "freebsd": [ "tar" ] } -next_version = '1.8.0' +next_version = '1.9.0' ################ #### Telegraf Functions