From e376228a1a17526c8611fcf87c602a8bcff7f85b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Mar 2020 13:49:47 -0700 Subject: [PATCH] Update etc/telegraf.conf --- etc/telegraf.conf | 487 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 442 insertions(+), 45 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 9621b59f1..bc012ee30 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -131,6 +131,13 @@ ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be removed from the metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" @@ -577,10 +584,15 @@ # # insecure_skip_verify = false -# # Send telegraf metrics to graylog(s) +# # Send telegraf metrics to graylog # [[outputs.graylog]] # ## UDP endpoint for your graylog instance. -# servers = ["127.0.0.1:12201", "192.168.1.1:12201"] +# servers = ["127.0.0.1:12201"] +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" # # Configurable HTTP health check resource based on metrics @@ -742,6 +754,13 @@ # ## Kafka topic for producer messages # topic = "telegraf" # +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# # ## Optional Client id # # client_id = "Telegraf" # @@ -778,13 +797,21 @@ # # keys = ["foo", "bar"] # # separator = "_" # -# ## Telegraf tag to use as a routing key -# ## ie, if this tag exists, its value will be used as the routing key +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. # routing_tag = "host" # -# ## Static routing key. Used when no routing_tag is set or as a fallback -# ## when the tag specified in routing tag is not found. If set to "random", -# ## a random value will be generated for each message. +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## # ## ex: routing_key = "random" # ## routing_key = "telegraf" # # routing_key = "" @@ -980,9 +1007,14 @@ # [[outputs.nats]] # ## URLs of NATS servers # servers = ["nats://localhost:4222"] +# # ## Optional credentials # # username = "" # # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# # ## NATS subject for producer messages # subject = "telegraf" # @@ -1266,6 +1298,34 @@ # # default_appname = "Telegraf" +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] # ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy @@ -1341,6 +1401,7 @@ # ## select the keys to convert. The array may contain globs. # ## = [...] # [processors.converter.tags] +# measurement = [] # string = [] # integer = [] # unsigned = [] @@ -1353,6 +1414,7 @@ # ## select the keys to convert. The array may contain globs. # ## = [...] # [processors.converter.fields] +# measurement = [] # tag = [] # string = [] # integer = [] @@ -1369,6 +1431,20 @@ # ## Date format string, must be a representation of the Go "reference time" # ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". # date_format = "Jan" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag. This can be set to one of +# ## "UTC", "Local", or to a location name in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" # # Map enum values according to given table. @@ -1475,6 +1551,20 @@ # [[processors.rename]] +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + # # Perform string processing on tags, fields, and measurements # [[processors.strings]] # ## Convert a tag value to uppercase @@ -1486,6 +1576,10 @@ # # field = "uri_stem" # # dest = "uri_stem_normalised" # +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# # ## Trim leading and trailing whitespace using the default cutset # # [[processors.strings.trim]] # # field = "message" @@ -1535,6 +1629,17 @@ # keep = ["foo", "bar", "baz"] +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + # # Print all metrics that pass through this filter. # [[processors.topk]] # ## How many seconds between aggregations @@ -1636,16 +1741,20 @@ # ## of accumulating the results. # reset = false # +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# # ## Example config that aggregates all fields of the metric. # # [[aggregators.histogram.config]] -# # ## The set of buckets. +# # ## Right borders of buckets (with +Inf implicitly added). # # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] # # ## The name of metric. # # measurement_name = "cpu" # # ## Example config that aggregates only specific fields of the metric. # # [[aggregators.histogram.config]] -# # ## The set of buckets. +# # ## Right borders of buckets (with +Inf implicitly added). # # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] # # ## The name of metric. # # measurement_name = "diskio" @@ -2123,7 +2232,7 @@ # # Gather health check statuses from services registered in Consul # [[inputs.consul]] # ## Consul server address -# # address = "localhost" +# # address = "localhost:8500" # # ## URI scheme for the Consul server, one of "http", "https" # # scheme = "http" @@ -2823,6 +2932,11 @@ # # insecure_skip_verify = true +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints # [[inputs.influxdb]] # ## Works with InfluxDB debug endpoints out of the box, @@ -3136,7 +3250,7 @@ # # Read status information from one or more Kibana servers # [[inputs.kibana]] -# ## specify a list of one or more Kibana servers +# ## Specify a list of one or more Kibana servers # servers = ["http://localhost:5601"] # # ## Timeout for HTTP requests @@ -3388,6 +3502,80 @@ # # tagdrop = ["server"] +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# # Serial (RS485; RS232) +# #controller = "file:///dev/ttyUSB0" +# #baud_rate = 9600 +# #data_bits = 8 +# #parity = "N" +# #stop_bits = 1 +# #transmission_mode = "RTU" +# +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + # # Read metrics from one or many MongoDB servers # [[inputs.mongodb]] # ## An array of URLs of the form: @@ -3415,6 +3603,26 @@ # # insecure_skip_verify = false +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Aggregates the contents of multiple files into a single point # [[inputs.multifile]] # ## Base directory where telegraf will look for files. @@ -3772,7 +3980,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -4672,6 +4880,10 @@ # ## Timeout for SSL connection # # timeout = "5s" # +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -4905,6 +5117,73 @@ # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: user = "default"" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from Google PubSub # [[inputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -5071,9 +5350,113 @@ # # insecure_skip_verify = false -# # Influx HTTP write listener +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.http_listener]] -# ## Address and port to host HTTP listener on +# ## Address and port to host InfluxDB listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -5081,14 +5464,9 @@ # ## maximum duration before timing out write of the response # write_timeout = "10s" # -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" -# -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" -# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" # # ## Optional tag name used to store the database. # ## If the write has a database in the query string then it will be kept in this tag name. @@ -5154,9 +5532,9 @@ # data_format = "influx" -# # Influx HTTP write listener +# # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.influxdb_listener]] -# ## Address and port to host HTTP listener on +# ## Address and port to host InfluxDB listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -5164,14 +5542,9 @@ # ## maximum duration before timing out write of the response # write_timeout = "10s" # -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" -# -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" -# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" # # ## Optional tag name used to store the database. # ## If the write has a database in the query string then it will be kept in this tag name. @@ -5393,6 +5766,14 @@ # table_name = "default" +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + # # Stream and parse log file(s). # [[inputs.logparser]] # ## Log files to parse. @@ -5529,6 +5910,9 @@ # # username = "" # # password = "" # +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# # ## Use Transport Layer Security # # secure = false # @@ -5595,10 +5979,10 @@ # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -5608,10 +5992,10 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -5620,7 +6004,7 @@ # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # ## -# address = "host=localhost port=5432 user=postgres sslmode=disable" +# address = "host=localhost user=postgres sslmode=disable" # ## A custom name for the database that will be used as the "server" tag in the # ## measurement output. If not specified, a default one generated from # ## the connection address is used. @@ -5643,10 +6027,10 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql_extensible]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # # # ## All connection parameters are optional. # # ## Without the dbname parameter, the driver will default to a database @@ -5654,7 +6038,7 @@ # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # # -# address = "host=localhost port=5432 user=postgres sslmode=disable" +# address = "host=localhost user=postgres sslmode=disable" # # ## connection configuration. # ## maxlifetime - specify the maximum lifetime of a connection. @@ -5740,6 +6124,11 @@ # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" @@ -5762,6 +6151,19 @@ # # insecure_skip_verify = false +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + # # Receive SNMP traps # [[inputs.snmp_trap]] # ## Transport, local address, and port to listen on. Transport must @@ -6135,11 +6537,6 @@ # # collect_concurrency = 1 # # discover_concurrency = 1 # -# ## whether or not to force discovery of new objects on initial gather call before collecting metrics -# ## when true for large environments this may cause errors for time elapsed while collecting metrics -# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered -# # force_discover_on_init = false -# # ## the interval before (re)discovering objects subject to metrics collection (default: 300s) # # object_discovery_interval = "300s" #