@@ -30,15 +30,13 @@
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will send metrics to output in batch of at
|
||||
## Telegraf will send metrics to outputs in batches of at
|
||||
## most metric_batch_size metrics.
|
||||
metric_batch_size = 1000
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write. This should be a multiple of
|
||||
## metric_batch_size and could not be less than 2 times metric_batch_size
|
||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||
## are dropped first when this buffer fills.
|
||||
metric_buffer_limit = 10000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
@@ -151,6 +149,15 @@
|
||||
# ## Amazon REGION
|
||||
# region = 'us-east-1'
|
||||
#
|
||||
# ## Amazon Credentials
|
||||
# ## Credentials are loaded in the following order
|
||||
# ## 1) explicit credentials from 'access_key' and 'secret_key'
|
||||
# ## 2) environment variables
|
||||
# ## 3) shared credentials file
|
||||
# ## 4) EC2 Instance Profile
|
||||
# #access_key = ""
|
||||
# #secret_key = ""
|
||||
#
|
||||
# ## Namespace for the CloudWatch MetricDatums
|
||||
# namespace = 'InfluxData/Telegraf'
|
||||
|
||||
@@ -243,6 +250,16 @@
|
||||
# [[outputs.kinesis]]
|
||||
# ## Amazon REGION of kinesis endpoint.
|
||||
# region = "ap-southeast-2"
|
||||
#
|
||||
# ## Amazon Credentials
|
||||
# ## Credentials are loaded in the following order
|
||||
# ## 1) explicit credentials from 'access_key' and 'secret_key'
|
||||
# ## 2) environment variables
|
||||
# ## 3) shared credentials file
|
||||
# ## 4) EC2 Instance Profile
|
||||
# #access_key = ""
|
||||
# #secret_key = ""
|
||||
#
|
||||
# ## Kinesis StreamName must exist prior to starting telegraf.
|
||||
# streamname = "StreamName"
|
||||
# ## PartitionKey as used for sharding data.
|
||||
@@ -457,6 +474,15 @@
|
||||
# ## Amazon Region
|
||||
# region = 'us-east-1'
|
||||
#
|
||||
# ## Amazon Credentials
|
||||
# ## Credentials are loaded in the following order
|
||||
# ## 1) explicit credentials from 'access_key' and 'secret_key'
|
||||
# ## 2) environment variables
|
||||
# ## 3) shared credentials file
|
||||
# ## 4) EC2 Instance Profile
|
||||
# #access_key = ""
|
||||
# #secret_key = ""
|
||||
#
|
||||
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
# period = '1m'
|
||||
#
|
||||
@@ -588,8 +614,14 @@
|
||||
# [[inputs.filestat]]
|
||||
# ## Files to gather stats about.
|
||||
# ## These accept standard unix glob matching rules, but with the addition of
|
||||
# ## ** as a "super asterisk". See https://github.com/gobwas/glob.
|
||||
# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"]
|
||||
# ## ** as a "super asterisk". ie:
|
||||
# ## "/var/log/**.log" -> recursively find all .log files in /var/log
|
||||
# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
|
||||
# ## "/var/log/apache.log" -> just tail the apache log file
|
||||
# ##
|
||||
# ## See https://github.com/gobwas/glob for more examples
|
||||
# ##
|
||||
# files = ["/var/log/**.log"]
|
||||
# ## If true, read the entire file and calculate an md5 checksum.
|
||||
# md5 = false
|
||||
|
||||
@@ -980,6 +1012,11 @@
|
||||
# ## databases are gathered.
|
||||
# ## databases = ["app_production", "testing"]
|
||||
# #
|
||||
# # outputaddress = "db01"
|
||||
# ## A custom name for the database that will be used as the "server" tag in the
|
||||
# ## measurement output. If not specified, a default one generated from
|
||||
# ## the connection address is used.
|
||||
# #
|
||||
# ## Define the toml config where the sql queries are stored
|
||||
# ## New queries can be added, if the withdbname is set to true and there is no
|
||||
# ## databases defined in the 'databases field', the sql query is ended by a
|
||||
@@ -990,24 +1027,28 @@
|
||||
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
|
||||
# ## withdbname was true. Be careful that if the withdbname is set to false you
|
||||
# ## don't have to define the where clause (aka with the dbname) the tagvalue
|
||||
# ## field is used to define custom tags (separated by comas)
|
||||
# ## field is used to define custom tags (separated by commas)
|
||||
# ## The optional "measurement" value can be used to override the default
|
||||
# ## output measurement name ("postgresql").
|
||||
# #
|
||||
# ## Structure :
|
||||
# ## [[inputs.postgresql_extensible.query]]
|
||||
# ## sqlquery string
|
||||
# ## version string
|
||||
# ## withdbname boolean
|
||||
# ## tagvalue string (coma separated)
|
||||
# ## tagvalue string (comma separated)
|
||||
# ## measurement string
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_database"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue=""
|
||||
# measurement=""
|
||||
# [[inputs.postgresql_extensible.query]]
|
||||
# sqlquery="SELECT * FROM pg_stat_bgwriter"
|
||||
# version=901
|
||||
# withdbname=false
|
||||
# tagvalue=""
|
||||
# tagvalue="postgresql.stats"
|
||||
|
||||
|
||||
# # Read metrics from one or many PowerDNS servers
|
||||
@@ -1379,6 +1420,28 @@
|
||||
# percentile_limit = 1000
|
||||
|
||||
|
||||
# # Stream a log file, like the tail -f command
|
||||
# [[inputs.tail]]
|
||||
# ## files to tail.
|
||||
# ## These accept standard unix glob matching rules, but with the addition of
|
||||
# ## ** as a "super asterisk". ie:
|
||||
# ## "/var/log/**.log" -> recursively find all .log files in /var/log
|
||||
# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
|
||||
# ## "/var/log/apache.log" -> just tail the apache log file
|
||||
# ##
|
||||
# ## See https://github.com/gobwas/glob for more examples
|
||||
# ##
|
||||
# files = ["/var/mymetrics.out"]
|
||||
# ## Read file from beginning.
|
||||
# from_beginning = false
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Generic TCP listener
|
||||
# [[inputs.tcp_listener]]
|
||||
# ## Address and port to host TCP listener on
|
||||
|
||||
Reference in New Issue
Block a user