Update sample config
This commit is contained in:
parent
317c823bfc
commit
5d502bb605
|
@ -35,7 +35,9 @@
|
|||
## This controls the size of writes that Telegraf sends to output plugins.
|
||||
metric_batch_size = 1000
|
||||
|
||||
## Maximum number of unwritten metrics per output.
|
||||
## Maximum number of unwritten metrics per output. Increasing this value
|
||||
## allows for longer periods of output downtime without dropping metrics at the
|
||||
## cost of higher maximum memory usage.
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
|
@ -66,7 +68,13 @@
|
|||
## Log only error level messages.
|
||||
# quiet = false
|
||||
|
||||
## Log file name, the empty string means to log to stderr.
|
||||
## Log target controls the destination for logs and can be one of "file",
|
||||
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
|
||||
## is determined by the "logfile" setting.
|
||||
# logtarget = "file"
|
||||
|
||||
## Name of the file to be logged to when using the "file" logtarget. If set to
|
||||
## the empty string then logs are written to stderr.
|
||||
# logfile = ""
|
||||
|
||||
## The logfile will be rotated after the time interval specified. When set
|
||||
|
@ -412,6 +420,9 @@
|
|||
# ## You could use basicstats aggregator to calculate those fields. If not all statistic
|
||||
# ## fields are available, all fields would still be sent as raw metrics.
|
||||
# # write_statistics = false
|
||||
#
|
||||
# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
|
||||
# # high_resolution_metrics = false
|
||||
|
||||
|
||||
# # Configuration for CrateDB to send metrics to.
|
||||
|
@ -516,6 +527,11 @@
|
|||
# ## Files to write to, "stdout" is a specially handled file.
|
||||
# files = ["stdout", "/tmp/metrics.out"]
|
||||
#
|
||||
# ## Use batch serialization format instead of line based delimiting. The
|
||||
# ## batch format allows for the production of non line based output formats and
|
||||
# ## may more effiently encode metric groups.
|
||||
# # use_batch_format = false
|
||||
#
|
||||
# ## The file will be rotated after the time interval specified. When set
|
||||
# ## to 0 no time based rotation is performed.
|
||||
# # rotation_interval = "0d"
|
||||
|
@ -657,6 +673,7 @@
|
|||
# ##
|
||||
# ## Multiple URLs can be specified for a single cluster, only ONE of the
|
||||
# ## urls will be written to each interval.
|
||||
# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
|
||||
# urls = ["http://127.0.0.1:9999"]
|
||||
#
|
||||
# ## Token for authentication.
|
||||
|
@ -1029,6 +1046,14 @@
|
|||
# ## Address to listen on
|
||||
# listen = ":9273"
|
||||
#
|
||||
# ## Metric version controls the mapping from Telegraf metrics into
|
||||
# ## Prometheus format. When using the prometheus input, use the same value in
|
||||
# ## both plugins to ensure metrics are round-tripped without modification.
|
||||
# ##
|
||||
# ## example: metric_version = 1; deprecated in 1.13
|
||||
# ## metric_version = 2; recommended version
|
||||
# # metric_version = 1
|
||||
#
|
||||
# ## Use HTTP Basic Authentication.
|
||||
# # basic_username = "Foo"
|
||||
# # basic_password = "Bar"
|
||||
|
@ -1292,6 +1317,18 @@
|
|||
###############################################################################
|
||||
|
||||
|
||||
# # Clone metrics and apply modifications.
|
||||
# [[processors.clone]]
|
||||
# ## All modifications on inputs and aggregators can be overridden:
|
||||
# # name_override = "new_name"
|
||||
# # name_prefix = "new_name_prefix"
|
||||
# # name_suffix = "new_name_suffix"
|
||||
#
|
||||
# ## Tags to be added (all values must be strings)
|
||||
# # [processors.clone.tags]
|
||||
# # additional_tag = "tag_value"
|
||||
|
||||
|
||||
# # Convert values to another metric value type
|
||||
# [[processors.converter]]
|
||||
# ## Tags to convert
|
||||
|
@ -1557,6 +1594,7 @@
|
|||
# [[aggregators.basicstats]]
|
||||
# ## The period on which to flush & clear the aggregator.
|
||||
# period = "30s"
|
||||
#
|
||||
# ## If true, the original metric will be dropped by the
|
||||
# ## aggregator and will not get sent to the output plugins.
|
||||
# drop_original = false
|
||||
|
@ -1607,6 +1645,11 @@
|
|||
# # fields = ["io_time", "read_time", "write_time"]
|
||||
|
||||
|
||||
# # Merge metrics into multifield metrics by series key
|
||||
# [[aggregators.merge]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Keep the aggregate min/max of each metric passing through.
|
||||
# [[aggregators.minmax]]
|
||||
# ## General Aggregator Arguments:
|
||||
|
@ -1816,6 +1859,18 @@
|
|||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Gather Azure Storage Queue metrics
|
||||
# [[inputs.azure_storage_queue]]
|
||||
# ## Required Azure Storage Account name
|
||||
# account_name = "mystorageaccount"
|
||||
#
|
||||
# ## Required Azure Storage Account access key
|
||||
# account_key = "storageaccountaccesskey"
|
||||
#
|
||||
# ## Set to false to disable peeking age of oldest message (executes faster)
|
||||
# # peek_oldest_message_age = true
|
||||
|
||||
|
||||
# # Read metrics of bcache from stats_total and dirty_data
|
||||
# [[inputs.bcache]]
|
||||
# ## Bcache sets path
|
||||
|
@ -2013,6 +2068,9 @@
|
|||
# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
# # ratelimit = 25
|
||||
#
|
||||
# ## Timeout for http requests made by the cloudwatch client.
|
||||
# # timeout = "5s"
|
||||
#
|
||||
# ## Namespace-wide statistic filters. These allow fewer queries to be made to
|
||||
# ## cloudwatch.
|
||||
# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
|
||||
|
@ -2202,6 +2260,9 @@
|
|||
# ## Only collect metrics for these containers, collect all if empty
|
||||
# container_names = []
|
||||
#
|
||||
# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
|
||||
# source_tag = false
|
||||
#
|
||||
# ## Containers to include and exclude. Globs accepted.
|
||||
# ## Note that an empty array for both will include all containers
|
||||
# container_name_include = []
|
||||
|
@ -2220,8 +2281,10 @@
|
|||
# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
||||
# ## network (eth0, eth1, ...) stats or not
|
||||
# perdevice = true
|
||||
#
|
||||
# ## Whether to report for each container total blkio and network stats or not
|
||||
# total = false
|
||||
#
|
||||
# ## Which environment variables should we use as a tag
|
||||
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
#
|
||||
|
@ -2246,8 +2309,10 @@
|
|||
# ##
|
||||
# ## If no servers are specified, then localhost is used as the host.
|
||||
# servers = ["localhost:24242"]
|
||||
#
|
||||
# ## Type is one of "user", "domain", "ip", or "global"
|
||||
# type = "global"
|
||||
#
|
||||
# ## Wildcard matches like "*.com". An empty string "" is same as "*"
|
||||
# ## If type = "ip" filters should be <IP/network>
|
||||
# filters = [""]
|
||||
|
@ -2332,6 +2397,15 @@
|
|||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Returns ethtool statistics for given interfaces
|
||||
# [[inputs.ethtool]]
|
||||
# ## List of interfaces to pull metrics for
|
||||
# # interface_include = ["eth0"]
|
||||
#
|
||||
# ## List of interfaces to ignore when pulling metrics.
|
||||
# # interface_exclude = ["eth1"]
|
||||
|
||||
|
||||
# # Read metrics from one or more commands that can output to stdout
|
||||
# [[inputs.exec]]
|
||||
# ## Commands array
|
||||
|
@ -2389,6 +2463,10 @@
|
|||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# data_format = "influx"
|
||||
#
|
||||
# ## Name a tag containing the name of the file the data was parsed from. Leave empty
|
||||
# ## to disable.
|
||||
# # file_tag = ""
|
||||
|
||||
|
||||
# # Count files in a directory
|
||||
|
@ -2414,6 +2492,9 @@
|
|||
# ## Only count regular files. Defaults to true.
|
||||
# regular_only = true
|
||||
#
|
||||
# ## Follow all symlinks while walking the directory tree. Defaults to false.
|
||||
# follow_symlinks = false
|
||||
#
|
||||
# ## Only count files that are at least this size. If size is
|
||||
# ## a negative number, only count files that are smaller than the
|
||||
# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
|
||||
|
@ -2438,6 +2519,7 @@
|
|||
# ## See https://github.com/gobwas/glob for more examples
|
||||
# ##
|
||||
# files = ["/var/log/**.log"]
|
||||
#
|
||||
# ## If true, read the entire file and calculate an md5 checksum.
|
||||
# md5 = false
|
||||
|
||||
|
@ -2710,10 +2792,10 @@
|
|||
|
||||
# # Gather Icinga2 status
|
||||
# [[inputs.icinga2]]
|
||||
# ## Required Icinga2 server address (default: "https://localhost:5665")
|
||||
# ## Required Icinga2 server address
|
||||
# # server = "https://localhost:5665"
|
||||
#
|
||||
# ## Required Icinga2 object type ("services" or "hosts, default "services")
|
||||
# ## Required Icinga2 object type ("services" or "hosts")
|
||||
# # object_type = "services"
|
||||
#
|
||||
# ## Credentials for basic HTTP authentication
|
||||
|
@ -2743,6 +2825,10 @@
|
|||
# "http://localhost:8086/debug/vars"
|
||||
# ]
|
||||
#
|
||||
# ## Username and password to send using HTTP Basic Authentication.
|
||||
# # username = ""
|
||||
# # password = ""
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
||||
|
@ -2841,7 +2927,7 @@
|
|||
|
||||
# # Read jobs and cluster metrics from Jenkins instances
|
||||
# [[inputs.jenkins]]
|
||||
# ## The Jenkins URL
|
||||
# ## The Jenkins URL in the format "schema://host:port"
|
||||
# url = "http://my-jenkins-instance:8080"
|
||||
# # username = "admin"
|
||||
# # password = "admin"
|
||||
|
@ -3062,6 +3148,8 @@
|
|||
# # namespace = "default"
|
||||
#
|
||||
# ## Use bearer token for authorization. ('bearer_token' takes priority)
|
||||
# ## If both of these are empty, we'll use the default serviceaccount:
|
||||
# ## at: /run/secrets/kubernetes.io/serviceaccount/token
|
||||
# # bearer_token = "/path/to/bearer/token"
|
||||
# ## OR
|
||||
# # bearer_token_string = "abc_123"
|
||||
|
@ -3093,6 +3181,8 @@
|
|||
# url = "http://127.0.0.1:10255"
|
||||
#
|
||||
# ## Use bearer token for authorization. ('bearer_token' takes priority)
|
||||
# ## If both of these are empty, we'll use the default serviceaccount:
|
||||
# ## at: /run/secrets/kubernetes.io/serviceaccount/token
|
||||
# # bearer_token = "/path/to/bearer/token"
|
||||
# ## OR
|
||||
# # bearer_token_string = "abc_123"
|
||||
|
@ -3223,8 +3313,10 @@
|
|||
# [[inputs.mesos]]
|
||||
# ## Timeout, in ms.
|
||||
# timeout = 100
|
||||
#
|
||||
# ## A list of Mesos masters.
|
||||
# masters = ["http://localhost:5050"]
|
||||
#
|
||||
# ## Master metrics groups to be collected, by default, all enabled.
|
||||
# master_collections = [
|
||||
# "resources",
|
||||
|
@ -3239,8 +3331,10 @@
|
|||
# "registrar",
|
||||
# "allocator",
|
||||
# ]
|
||||
#
|
||||
# ## A list of Mesos slaves, default is []
|
||||
# # slaves = []
|
||||
#
|
||||
# ## Slave metrics groups to be collected, by default, all enabled.
|
||||
# # slave_collections = [
|
||||
# # "resources",
|
||||
|
@ -3285,8 +3379,10 @@
|
|||
#
|
||||
# ## When true, collect per database stats
|
||||
# # gather_perdb_stats = false
|
||||
#
|
||||
# ## When true, collect per collection stats
|
||||
# # gather_col_stats = false
|
||||
#
|
||||
# ## List of db where collections stats are collected
|
||||
# ## If empty, all db are concerned
|
||||
# # col_stats_dbs = ["local"]
|
||||
|
@ -3349,55 +3445,56 @@
|
|||
# ## <1.6: metric_version = 1 (or unset)
|
||||
# metric_version = 2
|
||||
#
|
||||
# ## the limits for metrics form perf_events_statements
|
||||
# perf_events_statements_digest_text_limit = 120
|
||||
# perf_events_statements_limit = 250
|
||||
# perf_events_statements_time_limit = 86400
|
||||
# #
|
||||
# ## if the list is empty, then metrics are gathered from all databasee tables
|
||||
# table_schema_databases = []
|
||||
# #
|
||||
# # table_schema_databases = []
|
||||
#
|
||||
# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
|
||||
# gather_table_schema = false
|
||||
# #
|
||||
# # gather_table_schema = false
|
||||
#
|
||||
# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
|
||||
# gather_process_list = true
|
||||
# #
|
||||
# # gather_process_list = false
|
||||
#
|
||||
# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
|
||||
# gather_user_statistics = true
|
||||
# #
|
||||
# # gather_user_statistics = false
|
||||
#
|
||||
# ## gather auto_increment columns and max values from information schema
|
||||
# gather_info_schema_auto_inc = true
|
||||
# #
|
||||
# # gather_info_schema_auto_inc = false
|
||||
#
|
||||
# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
|
||||
# gather_innodb_metrics = true
|
||||
# #
|
||||
# # gather_innodb_metrics = false
|
||||
#
|
||||
# ## gather metrics from SHOW SLAVE STATUS command output
|
||||
# gather_slave_status = true
|
||||
# #
|
||||
# # gather_slave_status = false
|
||||
#
|
||||
# ## gather metrics from SHOW BINARY LOGS command output
|
||||
# gather_binary_logs = false
|
||||
# #
|
||||
# # gather_binary_logs = false
|
||||
#
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
|
||||
# gather_table_io_waits = false
|
||||
# #
|
||||
# # gather_table_io_waits = false
|
||||
#
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
|
||||
# gather_table_lock_waits = false
|
||||
# #
|
||||
# # gather_table_lock_waits = false
|
||||
#
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
|
||||
# gather_index_io_waits = false
|
||||
# #
|
||||
# # gather_index_io_waits = false
|
||||
#
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
|
||||
# gather_event_waits = false
|
||||
# #
|
||||
# # gather_event_waits = false
|
||||
#
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
|
||||
# gather_file_events_stats = false
|
||||
# #
|
||||
# # gather_file_events_stats = false
|
||||
#
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
|
||||
# gather_perf_events_statements = false
|
||||
# #
|
||||
# # gather_perf_events_statements = false
|
||||
#
|
||||
# ## the limits for metrics form perf_events_statements
|
||||
# # perf_events_statements_digest_text_limit = 120
|
||||
# # perf_events_statements_limit = 250
|
||||
# # perf_events_statements_time_limit = 86400
|
||||
#
|
||||
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
|
||||
# interval_slow = "30m"
|
||||
# ## example: interval_slow = "30m"
|
||||
# # interval_slow = ""
|
||||
#
|
||||
# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
|
||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
||||
|
@ -3672,6 +3769,12 @@
|
|||
# ## City ID's to collect weather data from.
|
||||
# city_id = ["5391959"]
|
||||
#
|
||||
# ## Language of the description field. Can be one of "ar", "bg",
|
||||
# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
|
||||
# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
|
||||
# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
|
||||
# # lang = "en"
|
||||
#
|
||||
# ## APIs to fetch; can contain "weather" or "forecast".
|
||||
# fetch = ["weather", "forecast"]
|
||||
#
|
||||
|
@ -3748,35 +3851,47 @@
|
|||
|
||||
# # Ping given url(s) and return statistics
|
||||
# [[inputs.ping]]
|
||||
# ## List of urls to ping
|
||||
# ## Hosts to send ping packets to.
|
||||
# urls = ["example.org"]
|
||||
#
|
||||
# ## Number of pings to send per collection (ping -c <COUNT>)
|
||||
# # count = 1
|
||||
#
|
||||
# ## Interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||
# # ping_interval = 1.0
|
||||
#
|
||||
# ## Per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||
# # timeout = 1.0
|
||||
#
|
||||
# ## Total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
|
||||
# # deadline = 10
|
||||
#
|
||||
# ## Interface or source address to send ping from (ping -I[-S] <INTERFACE/SRC_ADDR>)
|
||||
# # interface = ""
|
||||
#
|
||||
# ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'.
|
||||
# ## Method used for sending pings, can be either "exec" or "native". When set
|
||||
# ## to "exec" the systems ping command will be executed. When set to "native"
|
||||
# ## the plugin will send pings directly.
|
||||
# ##
|
||||
# ## While the default is "exec" for backwards compatibility, new deployments
|
||||
# ## are encouraged to use the "native" method for improved compatibility and
|
||||
# ## performance.
|
||||
# # method = "exec"
|
||||
#
|
||||
# ## Specify the ping executable binary, default is "ping"
|
||||
# # binary = "ping"
|
||||
# ## Number of ping packets to send per interval. Corresponds to the "-c"
|
||||
# ## option of the ping command.
|
||||
# # count = 1
|
||||
#
|
||||
# ## Arguments for ping command. When arguments is not empty, system binary will be used and
|
||||
# ## other options (ping_interval, timeout, etc) will be ignored.
|
||||
# ## Time to wait between sending ping packets in seconds. Operates like the
|
||||
# ## "-i" option of the ping command.
|
||||
# # ping_interval = 1.0
|
||||
#
|
||||
# ## If set, the time to wait for a ping response in seconds. Operates like
|
||||
# ## the "-W" option of the ping command.
|
||||
# # timeout = 1.0
|
||||
#
|
||||
# ## If set, the total ping deadline, in seconds. Operates like the -w option
|
||||
# ## of the ping command.
|
||||
# # deadline = 10
|
||||
#
|
||||
# ## Interface or source address to send ping from. Operates like the -I or -S
|
||||
# ## option of the ping command.
|
||||
# # interface = ""
|
||||
#
|
||||
# ## Specify the ping executable binary.
|
||||
# # binary = "ping"
|
||||
#
|
||||
# ## Arguments for ping command. When arguments is not empty, the command from
|
||||
# ## the binary option will be used and other options (ping_interval, timeout,
|
||||
# ## etc) will be ignored.
|
||||
# # arguments = ["-c", "3"]
|
||||
#
|
||||
# ## Use only ipv6 addresses when resolving hostnames.
|
||||
# ## Use only IPv6 addresses when resolving a hostname.
|
||||
# # ipv6 = false
|
||||
|
||||
|
||||
|
@ -3895,6 +4010,15 @@
|
|||
# ## Note that an empty array for both will include all queues
|
||||
# queue_name_include = []
|
||||
# queue_name_exclude = []
|
||||
#
|
||||
# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement.
|
||||
# ## If neither are specified, metrics for all federation upstreams are gathered.
|
||||
# ## Federation link metrics will only be gathered for queues and exchanges
|
||||
# ## whose non-federation metrics will be collected (e.g a queue excluded
|
||||
# ## by the 'queue_name_exclude' option will also be excluded from federation).
|
||||
# ## Globs accepted.
|
||||
# # federation_upstream_include = ["dataCentre-*"]
|
||||
# # federation_upstream_exclude = []
|
||||
|
||||
|
||||
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
|
||||
|
@ -4200,7 +4324,8 @@
|
|||
# ## By default, the host is localhost, listening on default port, TCP 1433.
|
||||
# ## for Windows, the user is the currently running AD user (SSO).
|
||||
# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
|
||||
# ## parameters.
|
||||
# ## parameters, in particular, tls connections can be created like so:
|
||||
# ## "encrypt=true;certificate=<cert>;hostNameInCertificate=<SqlServer host fqdn>"
|
||||
# # servers = [
|
||||
# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
|
||||
# # ]
|
||||
|
@ -4229,6 +4354,7 @@
|
|||
# ## - AzureDBResourceStats
|
||||
# ## - AzureDBResourceGovernance
|
||||
# ## - SqlRequests
|
||||
# ## - ServerProperties
|
||||
# exclude_query = [ 'Schedulers' ]
|
||||
|
||||
|
||||
|
@ -4312,6 +4438,11 @@
|
|||
# # value = 'one_of("sda", "sdb")'
|
||||
|
||||
|
||||
# # Get synproxy counter statistics from procfs
|
||||
# [[inputs.synproxy]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Sysstat metrics collector
|
||||
# [[inputs.sysstat]]
|
||||
# ## Path to the sadc command.
|
||||
|
@ -4321,18 +4452,15 @@
|
|||
# ## Arch: /usr/lib/sa/sadc
|
||||
# ## RHEL/CentOS: /usr/lib64/sa/sadc
|
||||
# sadc_path = "/usr/lib/sa/sadc" # required
|
||||
# #
|
||||
# #
|
||||
#
|
||||
# ## Path to the sadf command, if it is not in PATH
|
||||
# # sadf_path = "/usr/bin/sadf"
|
||||
# #
|
||||
# #
|
||||
#
|
||||
# ## Activities is a list of activities, that are passed as argument to the
|
||||
# ## sadc collector utility (e.g: DISK, SNMP etc...)
|
||||
# ## The more activities that are added, the more data is collected.
|
||||
# # activities = ["DISK"]
|
||||
# #
|
||||
# #
|
||||
#
|
||||
# ## Group metrics to measurements.
|
||||
# ##
|
||||
# ## If group is false each metric will be prefixed with a description
|
||||
|
@ -4340,8 +4468,7 @@
|
|||
# ##
|
||||
# ## If Group is true, corresponding metrics are grouped to a single measurement.
|
||||
# # group = true
|
||||
# #
|
||||
# #
|
||||
#
|
||||
# ## Options for the sadf command. The values on the left represent the sadf
|
||||
# ## options and the values on the right their description (which are used for
|
||||
# ## grouping and prefixing metrics).
|
||||
|
@ -4365,8 +4492,7 @@
|
|||
# -w = "task"
|
||||
# # -H = "hugepages" # only available for newer linux distributions
|
||||
# # "-I ALL" = "interrupts" # requires INT activity
|
||||
# #
|
||||
# #
|
||||
#
|
||||
# ## Device tags can be used to add additional tags for devices.
|
||||
# ## For example the configuration below adds a tag vg with value rootvg for
|
||||
# ## all metrics with sda devices.
|
||||
|
@ -4374,6 +4500,17 @@
|
|||
# # vg = "rootvg"
|
||||
|
||||
|
||||
# # Gather systemd units state
|
||||
# [[inputs.systemd_units]]
|
||||
# ## Set timeout for systemctl execution
|
||||
# # timeout = "1s"
|
||||
# #
|
||||
# ## Filter for a specific unit type, default is "service", other possible
|
||||
# ## values are "socket", "target", "device", "mount", "automount", "swap",
|
||||
# ## "timer", "path", "slice" and "scope ":
|
||||
# # unittype = "service"
|
||||
|
||||
|
||||
# # Reads metrics from a Teamspeak 3 Server via ServerQuery
|
||||
# [[inputs.teamspeak]]
|
||||
# ## Server address for Teamspeak 3 ServerQuery
|
||||
|
@ -4739,6 +4876,9 @@
|
|||
# ## transport only.
|
||||
# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
||||
#
|
||||
# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
|
||||
# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
|
||||
#
|
||||
# ## Define aliases to map telemetry encoding paths to simple measurement names
|
||||
# [inputs.cisco_telemetry_mdt.aliases]
|
||||
# ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
|
||||
|
@ -4899,6 +5039,9 @@
|
|||
# # docker_label_include = []
|
||||
# # docker_label_exclude = []
|
||||
#
|
||||
# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
|
||||
# source_tag = false
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
||||
|
@ -5144,12 +5287,16 @@
|
|||
# [[inputs.kafka_consumer_legacy]]
|
||||
# ## topic(s) to consume
|
||||
# topics = ["telegraf"]
|
||||
#
|
||||
# ## an array of Zookeeper connection strings
|
||||
# zookeeper_peers = ["localhost:2181"]
|
||||
#
|
||||
# ## Zookeeper Chroot
|
||||
# zookeeper_chroot = ""
|
||||
#
|
||||
# ## the name of the consumer group
|
||||
# consumer_group = "telegraf_metrics_consumers"
|
||||
#
|
||||
# ## Offset (must be either "oldest" or "newest")
|
||||
# offset = "oldest"
|
||||
#
|
||||
|
@ -5314,7 +5461,7 @@
|
|||
# # max_undelivered_messages = 1000
|
||||
#
|
||||
# ## Persistent session disables clearing of the client session on connection.
|
||||
# ## In order for this option to work you must also set client_id to identity
|
||||
# ## In order for this option to work you must also set client_id to identify
|
||||
# ## the client. To receive messages that arrived while the client is offline,
|
||||
# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
|
||||
# ## publishing.
|
||||
|
@ -5348,6 +5495,7 @@
|
|||
#
|
||||
# ## subject(s) to consume
|
||||
# subjects = ["telegraf"]
|
||||
#
|
||||
# ## name a queue group
|
||||
# queue_group = "telegraf_consumers"
|
||||
#
|
||||
|
@ -5391,8 +5539,10 @@
|
|||
# [[inputs.nsq_consumer]]
|
||||
# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
|
||||
# # server = "localhost:4150"
|
||||
#
|
||||
# ## An array representing the NSQD TCP HTTP Endpoints
|
||||
# nsqd = ["localhost:4150"]
|
||||
#
|
||||
# ## An array representing the NSQLookupd HTTP Endpoints
|
||||
# nsqlookupd = ["localhost:4161"]
|
||||
# topic = "telegraf"
|
||||
|
@ -5507,7 +5657,10 @@
|
|||
# ## field is used to define custom tags (separated by commas)
|
||||
# ## The optional "measurement" value can be used to override the default
|
||||
# ## output measurement name ("postgresql").
|
||||
# #
|
||||
# ##
|
||||
# ## The script option can be used to specify the .sql file path.
|
||||
# ## If script and sqlquery options specified at same time, sqlquery will be used
|
||||
# ##
|
||||
# ## Structure :
|
||||
# ## [[inputs.postgresql_extensible.query]]
|
||||
# ## sqlquery string
|
||||
|
@ -5533,6 +5686,18 @@
|
|||
# ## An array of urls to scrape metrics from.
|
||||
# urls = ["http://localhost:9100/metrics"]
|
||||
#
|
||||
# ## Metric version controls the mapping from Prometheus metrics into
|
||||
# ## Telegraf metrics. When using the prometheus_client output, use the same
|
||||
# ## value in both plugins to ensure metrics are round-tripped without
|
||||
# ## modification.
|
||||
# ##
|
||||
# ## example: metric_version = 1; deprecated in 1.13
|
||||
# ## metric_version = 2; recommended version
|
||||
# # metric_version = 1
|
||||
#
|
||||
# ## Url tag name (tag containing scrapped url. optional, default is "url")
|
||||
# # url_tag = "scrapeUrl"
|
||||
#
|
||||
# ## An array of Kubernetes services to scrape metrics from.
|
||||
# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
|
||||
#
|
||||
|
@ -5560,7 +5725,7 @@
|
|||
# # username = ""
|
||||
# # password = ""
|
||||
#
|
||||
# ## Specify timeout duration for slower prometheus clients (default is 3s)
|
||||
# ## Specify timeout duration for slower prometheus clients (default is 3s)
|
||||
# # response_timeout = "3s"
|
||||
#
|
||||
# ## Optional TLS Config
|
||||
|
@ -5571,6 +5736,16 @@
|
|||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Receive SNMP traps
|
||||
# [[inputs.snmp_trap]]
|
||||
# ## Transport, local address, and port to listen on. Transport must
|
||||
# ## be "udp://". Omit local address to listen on all interfaces.
|
||||
# ## example: "udp://127.0.0.1:1234"
|
||||
# # service_address = udp://:162
|
||||
# ## Timeout running snmptranslate command
|
||||
# # timeout = "5s"
|
||||
|
||||
|
||||
# # Generic socket listener capable of handling multiple socket types.
|
||||
# [[inputs.socket_listener]]
|
||||
# ## URL to listen on
|
||||
|
@ -5626,6 +5801,10 @@
|
|||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# # data_format = "influx"
|
||||
#
|
||||
# ## Content encoding for message payloads, can be set to "gzip" to or
|
||||
# ## "identity" to apply no encoding.
|
||||
# # content_encoding = "identity"
|
||||
|
||||
|
||||
# # Statsd UDP/TCP Server
|
||||
|
@ -5688,6 +5867,18 @@
|
|||
# percentile_limit = 1000
|
||||
|
||||
|
||||
# # Suricata stats plugin
|
||||
# [[inputs.suricata]]
|
||||
# ## Data sink for Suricata stats log
|
||||
# # This is expected to be a filename of a
|
||||
# # unix socket to be created for listening.
|
||||
# source = "/var/run/suricata-stats.sock"
|
||||
#
|
||||
# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect"
|
||||
# # becomes "detect_alert" when delimiter is "_".
|
||||
# delimiter = "_"
|
||||
|
||||
|
||||
# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
|
||||
# [[inputs.syslog]]
|
||||
# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
|
||||
|
|
|
@ -9,9 +9,9 @@
|
|||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
#
|
||||
# Environment variables can be used anywhere in this config file, simply prepend
|
||||
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
# Environment variables can be used anywhere in this config file, simply surround
|
||||
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
|
||||
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
|
||||
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
|
@ -35,7 +35,9 @@
|
|||
## This controls the size of writes that Telegraf sends to output plugins.
|
||||
metric_batch_size = 1000
|
||||
|
||||
## Maximum number of unwritten metrics per output.
|
||||
## Maximum number of unwritten metrics per output. Increasing this value
|
||||
## allows for longer periods of output downtime without dropping metrics at the
|
||||
## cost of higher maximum memory usage.
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
|
@ -66,7 +68,13 @@
|
|||
## Log only error level messages.
|
||||
# quiet = false
|
||||
|
||||
## Log file name, the empty string means to log to stderr.
|
||||
## Log target controls the destination for logs and can be one of "file",
|
||||
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
|
||||
## is determined by the "logfile" setting.
|
||||
# logtarget = "file"
|
||||
|
||||
## Name of the file to be logged to when using the "file" logtarget. If set to
|
||||
## the empty string then logs are written to stderr.
|
||||
# logfile = ""
|
||||
|
||||
## The logfile will be rotated after the time interval specified. When set
|
||||
|
@ -89,9 +97,10 @@
|
|||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
# OUTPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
|
||||
# Configuration for sending metrics to InfluxDB
|
||||
[[outputs.influxdb]]
|
||||
## The full HTTP or UDP URL for your InfluxDB instance.
|
||||
|
@ -103,8 +112,16 @@
|
|||
# urls = ["http://127.0.0.1:8086"]
|
||||
|
||||
## The target database for metrics; will be created as needed.
|
||||
## For UDP url endpoint database needs to be configured on server side.
|
||||
# database = "telegraf"
|
||||
|
||||
## The value of this tag will be used to determine the database. If this
|
||||
## tag is not set the 'database' option is used as the default.
|
||||
# database_tag = ""
|
||||
|
||||
## If true, the database tag will not be added to the metric.
|
||||
# exclude_database_tag = false
|
||||
|
||||
## If true, no CREATE DATABASE queries will be sent. Set to true when using
|
||||
## Telegraf with a user without permissions to create databases or when the
|
||||
## database already exists.
|
||||
|
@ -161,6 +178,7 @@
|
|||
# ##
|
||||
# ## Multiple URLs can be specified for a single cluster, only ONE of the
|
||||
# ## urls will be written to each interval.
|
||||
# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
|
||||
# urls = ["http://127.0.0.1:9999"]
|
||||
#
|
||||
# ## Token for authentication.
|
||||
|
@ -206,10 +224,12 @@
|
|||
# ## Use TLS but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
# INPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
|
|
Loading…
Reference in New Issue