Update example config
This commit is contained in:
parent
407f0fe545
commit
589da9c481
|
@ -151,6 +151,9 @@
|
|||
# ## Telegraf tag to use as a routing key
|
||||
# ## ie, if this tag exists, its value will be used as the routing key
|
||||
# routing_tag = "host"
|
||||
# ## Delivery Mode controls if a published message is persistent
|
||||
# ## Valid options are "transient" and "persistent". default: "transient"
|
||||
# delivery_mode = "transient"
|
||||
#
|
||||
# ## InfluxDB retention policy
|
||||
# # retention_policy = "default"
|
||||
|
@ -253,8 +256,21 @@
|
|||
# # %m - month (01..12)
|
||||
# # %d - day of month (e.g., 01)
|
||||
# # %H - hour (00..23)
|
||||
# # %V - week of the year (ISO week) (01..53)
|
||||
# ## Additionally, you can specify a tag name using the notation {{tag_name}}
|
||||
# ## which will be used as part of the index name. If the tag does not exist,
|
||||
# ## the default tag value will be used.
|
||||
# # index_name = "telegraf-{{host}}-%Y.%m.%d"
|
||||
# # default_tag_value = "none"
|
||||
# index_name = "telegraf-%Y.%m.%d" # required.
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Template Config
|
||||
# ## Set to true if you want telegraf to manage its index template.
|
||||
# ## If enabled it will create a recommended index template for telegraf indexes
|
||||
|
@ -580,6 +596,10 @@
|
|||
#
|
||||
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
|
||||
# # expiration_interval = "60s"
|
||||
#
|
||||
# ## Collectors to enable, valid entries are "gocollector" and "process".
|
||||
# ## If unset, both are enabled.
|
||||
# collectors_exclude = ["gocollector", "process"]
|
||||
|
||||
|
||||
# # Configuration for the Riemann server to send metrics to
|
||||
|
@ -653,6 +673,46 @@
|
|||
# # data_format = "influx"
|
||||
|
||||
|
||||
# # Configuration for Wavefront server to send metrics to
|
||||
# [[outputs.wavefront]]
|
||||
# ## DNS name of the wavefront proxy server
|
||||
# host = "wavefront.example.com"
|
||||
#
|
||||
# ## Port that the Wavefront proxy server listens on
|
||||
# port = 2878
|
||||
#
|
||||
# ## prefix for metrics keys
|
||||
# #prefix = "my.specific.prefix."
|
||||
#
|
||||
# ## whether to use "value" for name of simple fields
|
||||
# #simple_fields = false
|
||||
#
|
||||
# ## character to use between metric and field name. defaults to . (dot)
|
||||
# #metric_separator = "."
|
||||
#
|
||||
# ## Convert metric name paths to use metricSeperator character
|
||||
# ## When true (default) will convert all _ (underscore) chartacters in final metric name
|
||||
# #convert_paths = true
|
||||
#
|
||||
# ## Use Regex to sanitize metric and tag names from invalid characters
|
||||
# ## Regex is more thorough, but significantly slower
|
||||
# #use_regex = false
|
||||
#
|
||||
# ## point tags to use as the source name for Wavefront (if none found, host will be used)
|
||||
# #source_override = ["hostname", "snmp_host", "node_host"]
|
||||
#
|
||||
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
|
||||
# #convert_bool = true
|
||||
#
|
||||
# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
|
||||
# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for
|
||||
# ## any metrics beginning with "elasticsearch"
|
||||
# #[[outputs.wavefront.string_to_number.elasticsearch]]
|
||||
# # green = 1.0
|
||||
# # yellow = 0.5
|
||||
# # red = 0.0
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# PROCESSOR PLUGINS #
|
||||
|
@ -667,6 +727,16 @@
|
|||
# AGGREGATOR PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Keep the aggregate basicstats of each metric passing through.
|
||||
# [[aggregators.basicstats]]
|
||||
# ## General Aggregator Arguments:
|
||||
# ## The period on which to flush & clear the aggregator.
|
||||
# period = "30s"
|
||||
# ## If true, the original metric will be dropped by the
|
||||
# ## aggregator and will not get sent to the output plugins.
|
||||
# drop_original = false
|
||||
|
||||
|
||||
# # Create aggregate histograms.
|
||||
# [[aggregators.histogram]]
|
||||
# ## The period in which to flush the aggregator.
|
||||
|
@ -825,6 +895,18 @@
|
|||
# bcacheDevs = ["bcache0"]
|
||||
|
||||
|
||||
# # Collect bond interface status, slaves statuses and failures count
|
||||
# [[inputs.bond]]
|
||||
# ## Sets 'proc' directory path
|
||||
# ## If not specified, then default is /proc
|
||||
# # host_proc = "/proc"
|
||||
#
|
||||
# ## By default, telegraf gather stats for all bond interfaces
|
||||
# ## Setting interfaces will restrict the stats to the specified
|
||||
# ## bond interfaces.
|
||||
# # bond_interfaces = ["bond0"]
|
||||
|
||||
|
||||
# # Read Cassandra metrics through Jolokia
|
||||
# [[inputs.cassandra]]
|
||||
# # This is the context root used to compose the jolokia url
|
||||
|
@ -932,7 +1014,7 @@
|
|||
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
# delay = "5m"
|
||||
#
|
||||
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
# ## gaps or overlap in pulled data
|
||||
# interval = "5m"
|
||||
#
|
||||
|
@ -1003,7 +1085,7 @@
|
|||
# ## http://admin:secret@couchbase-0.example.com:8091/
|
||||
# ##
|
||||
# ## If no servers are specified, then localhost is used as the host.
|
||||
# ## If no protocol is specifed, HTTP is used.
|
||||
# ## If no protocol is specified, HTTP is used.
|
||||
# ## If no port is specified, 8091 is used.
|
||||
# servers = ["http://localhost:8091"]
|
||||
|
||||
|
@ -1015,6 +1097,50 @@
|
|||
# hosts = ["http://localhost:8086/_stats"]
|
||||
|
||||
|
||||
# # Input plugin for DC/OS metrics
|
||||
# [[inputs.dcos]]
|
||||
# ## The DC/OS cluster URL.
|
||||
# cluster_url = "https://dcos-ee-master-1"
|
||||
#
|
||||
# ## The ID of the service account.
|
||||
# service_account_id = "telegraf"
|
||||
# ## The private key file for the service account.
|
||||
# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
|
||||
#
|
||||
# ## Path containing login token. If set, will read on every gather.
|
||||
# # token_file = "/home/dcos/.dcos/token"
|
||||
#
|
||||
# ## In all filter options if both include and exclude are empty all items
|
||||
# ## will be collected. Arrays may contain glob patterns.
|
||||
# ##
|
||||
# ## Node IDs to collect metrics from. If a node is excluded, no metrics will
|
||||
# ## be collected for its containers or apps.
|
||||
# # node_include = []
|
||||
# # node_exclude = []
|
||||
# ## Container IDs to collect container metrics from.
|
||||
# # container_include = []
|
||||
# # container_exclude = []
|
||||
# ## Container IDs to collect app metrics from.
|
||||
# # app_include = []
|
||||
# # app_exclude = []
|
||||
#
|
||||
# ## Maximum concurrent connections to the cluster.
|
||||
# # max_connections = 10
|
||||
# ## Maximum time to receive a response from cluster.
|
||||
# # response_timeout = "20s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## If false, skip chain & host verification
|
||||
# # insecure_skip_verify = true
|
||||
#
|
||||
# ## Recommended filtering to reduce series cardinality.
|
||||
# # [inputs.dcos.tagdrop]
|
||||
# # path = ["/var/lib/mesos/slave/slaves/*"]
|
||||
|
||||
|
||||
# # Read metrics from one or many disque servers
|
||||
# [[inputs.disque]]
|
||||
# ## An array of URI to gather stats about. Specify an ip or hostname
|
||||
|
@ -1059,6 +1185,9 @@
|
|||
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
# endpoint = "unix:///var/run/docker.sock"
|
||||
#
|
||||
# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
|
||||
# gather_services = false
|
||||
#
|
||||
# ## Only collect metrics for these containers, collect all if empty
|
||||
# container_names = []
|
||||
#
|
||||
|
@ -1124,10 +1253,21 @@
|
|||
# ## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
# cluster_health = false
|
||||
#
|
||||
# ## Adjust cluster_health_level when you want to also obtain detailed health stats
|
||||
# ## The options are
|
||||
# ## - indices (default)
|
||||
# ## - cluster
|
||||
# # cluster_health_level = "indices"
|
||||
#
|
||||
# ## Set cluster_stats to true when you want to also obtain cluster stats from the
|
||||
# ## Master node.
|
||||
# cluster_stats = false
|
||||
#
|
||||
# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
|
||||
# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
|
||||
# ## "breakers". Per default, all stats are gathered.
|
||||
# # node_stats = ["jvm", "http"]
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
|
@ -1408,7 +1548,7 @@
|
|||
# ##
|
||||
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
#
|
||||
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
|
||||
# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
|
||||
# ## gaps or overlap in pulled data
|
||||
# interval = "30s"
|
||||
#
|
||||
|
@ -1876,16 +2016,16 @@
|
|||
# bind_password = ""
|
||||
|
||||
|
||||
# # A plugin to collect stats from OpenSMTPd
|
||||
# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
|
||||
# [[inputs.opensmtpd]]
|
||||
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||
# #use_sudo = false
|
||||
#
|
||||
# ## The default location of the smtpctl binary can be overridden with:
|
||||
# #binary = "/usr/sbin/smtpctl"
|
||||
# binary = "/usr/sbin/smtpctl"
|
||||
#
|
||||
# ## The default timeout of 1s can be overriden with:
|
||||
# #timeout = "1s"
|
||||
# ## The default timeout of 1000ms can be overriden with (in milliseconds):
|
||||
# timeout = 1000
|
||||
|
||||
|
||||
# # Read metrics of passenger using passenger-status
|
||||
|
@ -1901,6 +2041,15 @@
|
|||
# command = "passenger-status -v --show=xml"
|
||||
|
||||
|
||||
# # Gather counters from PF
|
||||
# [[inputs.pf]]
|
||||
# ## PF require root access on most systems.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
|
||||
# ## Users must configure sudo to allow telegraf user to run pfctl with no password.
|
||||
# ## pfctl can be restricted to only list command "pfctl -s info".
|
||||
# use_sudo = false
|
||||
|
||||
|
||||
# # Read metrics of phpfpm, via HTTP status page or socket
|
||||
# [[inputs.phpfpm]]
|
||||
# ## An array of addresses to gather stats about. Specify an ip or hostname
|
||||
|
@ -1942,6 +2091,13 @@
|
|||
# # interface = ""
|
||||
|
||||
|
||||
# # Measure postfix queue statistics
|
||||
# [[inputs.postfix]]
|
||||
# ## Postfix queue directory. If not provided, telegraf will try to use
|
||||
# ## 'postconf -h queue_directory' to determine it.
|
||||
# # queue_directory = "/var/spool/postfix"
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
# [[inputs.postgresql]]
|
||||
# ## specify address via a url matching:
|
||||
|
@ -2044,6 +2200,10 @@
|
|||
# # pattern = "nginx"
|
||||
# ## user as argument for pgrep (ie, pgrep -u <user>)
|
||||
# # user = "nginx"
|
||||
# ## Systemd unit name
|
||||
# # systemd_unit = "nginx.service"
|
||||
# ## CGroup name or path
|
||||
# # cgroup = "systemd/system.slice/nginx.service"
|
||||
#
|
||||
# ## override for process_name
|
||||
# ## This is optional; default is sourced from /proc/<pid>/status
|
||||
|
@ -2191,6 +2351,40 @@
|
|||
# # remove_numbers = true
|
||||
|
||||
|
||||
# # Read metrics from storage devices supporting S.M.A.R.T.
|
||||
# [[inputs.smart]]
|
||||
# ## Optionally specify the path to the smartctl executable
|
||||
# # path = "/usr/bin/smartctl"
|
||||
# #
|
||||
# ## On most platforms smartctl requires root access.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
|
||||
# ## Sudo must be configured to to allow the telegraf user to run smartctl
|
||||
# ## with out password.
|
||||
# # use_sudo = false
|
||||
# #
|
||||
# ## Skip checking disks in this power mode. Defaults to
|
||||
# ## "standby" to not wake up disks that have stoped rotating.
|
||||
# ## See --nocheck in the man pages for smartctl.
|
||||
# ## smartctl version 5.41 and 5.42 have faulty detection of
|
||||
# ## power mode and might require changing this value to
|
||||
# ## "never" depending on your disks.
|
||||
# # nocheck = "standby"
|
||||
# #
|
||||
# ## Gather detailed metrics for each SMART Attribute.
|
||||
# ## Defaults to "false"
|
||||
# ##
|
||||
# # attributes = false
|
||||
# #
|
||||
# ## Optionally specify devices to exclude from reporting.
|
||||
# # excludes = [ "/dev/pass6" ]
|
||||
# #
|
||||
# ## Optionally specify devices and device type, if unset
|
||||
# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
|
||||
# ## done and all found will be included except for the
|
||||
# ## excluded in excludes.
|
||||
# # devices = [ "/dev/ada0 -d atacam" ]
|
||||
|
||||
|
||||
# # Retrieves SNMP values from remote agents
|
||||
# [[inputs.snmp]]
|
||||
# agents = [ "127.0.0.1:161" ]
|
||||
|
@ -2354,11 +2548,11 @@
|
|||
# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
|
||||
|
||||
|
||||
# # Read metrics from Solr Server
|
||||
# # Read stats from one or more Solr servers or cores
|
||||
# [[inputs.solr]]
|
||||
# ## specify a list of one or more Solr servers
|
||||
# servers = ["http://localhost:8983"]
|
||||
# ##
|
||||
#
|
||||
# ## specify a list of one or more Solr cores (default - all)
|
||||
# # cores = ["main"]
|
||||
|
||||
|
@ -2407,7 +2601,7 @@
|
|||
# #
|
||||
# #
|
||||
# ## Options for the sadf command. The values on the left represent the sadf
|
||||
# ## options and the values on the right their description (wich are used for
|
||||
# ## options and the values on the right their description (which are used for
|
||||
# ## grouping and prefixing metrics).
|
||||
# ##
|
||||
# ## Run 'sar -h' or 'man sar' to find out the supported options for your
|
||||
|
@ -2438,6 +2632,18 @@
|
|||
# # vg = "rootvg"
|
||||
|
||||
|
||||
# # Reads metrics from a Teamspeak 3 Server via ServerQuery
|
||||
# [[inputs.teamspeak]]
|
||||
# ## Server address for Teamspeak 3 ServerQuery
|
||||
# # server = "127.0.0.1:10011"
|
||||
# ## Username for ServerQuery
|
||||
# username = "serverqueryuser"
|
||||
# ## Password for ServerQuery
|
||||
# password = "secret"
|
||||
# ## Array of virtual servers
|
||||
# # virtual_servers = [1]
|
||||
|
||||
|
||||
# # Gather metrics from the Tomcat server status page.
|
||||
# [[inputs.tomcat]]
|
||||
# ## URL of the Tomcat server status
|
||||
|
@ -2472,6 +2678,21 @@
|
|||
# pools = ["redis_pool", "mc_pool"]
|
||||
|
||||
|
||||
# # A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver
|
||||
# [[inputs.unbound]]
|
||||
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||
# #use_sudo = false
|
||||
#
|
||||
# ## The default location of the unbound-control binary can be overridden with:
|
||||
# binary = "/usr/sbin/unbound-control"
|
||||
#
|
||||
# ## The default timeout of 1s can be overriden with:
|
||||
# timeout = "1s"
|
||||
#
|
||||
# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields
|
||||
# fieldpass = ["total_*", "num_*","time_up", "mem_*"]
|
||||
|
||||
|
||||
# # A plugin to collect stats from Varnish HTTP Cache
|
||||
# [[inputs.varnish]]
|
||||
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||
|
@ -2485,6 +2706,10 @@
|
|||
# ## Glob matching can be used, ie, stats = ["MAIN.*"]
|
||||
# ## stats may also be set to ["*"], which will collect all stats
|
||||
# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
|
||||
#
|
||||
# ## Optional name for the varnish instance (or working directory) to query
|
||||
# ## Usually appened after -n in varnish cli
|
||||
# #name = instanceName
|
||||
|
||||
|
||||
# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
|
||||
|
@ -2687,7 +2912,10 @@
|
|||
|
||||
# # Read metrics from MQTT topic(s)
|
||||
# [[inputs.mqtt_consumer]]
|
||||
# servers = ["localhost:1883"]
|
||||
# ## MQTT broker URLs to be used. The format should be scheme://host:port,
|
||||
# ## schema can be tcp, ssl, or ws.
|
||||
# servers = ["tcp://localhost:1883"]
|
||||
#
|
||||
# ## MQTT QoS, must be 0, 1, or 2
|
||||
# qos = 0
|
||||
# ## Connection timeout for initial connection in seconds
|
||||
|
@ -2812,7 +3040,7 @@
|
|||
|
||||
# # Statsd UDP/TCP Server
|
||||
# [[inputs.statsd]]
|
||||
# ## Protocol, must be "tcp" or "udp" (default=udp)
|
||||
# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
|
||||
# protocol = "udp"
|
||||
#
|
||||
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
|
||||
|
@ -2899,19 +3127,6 @@
|
|||
# # socket_listener plugin
|
||||
# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
|
||||
|
||||
# # A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver
|
||||
# [[inputs.unbound]]
|
||||
# ## If running as a restricted user you can prepend sudo for additional access:
|
||||
# #use_sudo = false
|
||||
#
|
||||
# ## The default location of the unbound-control binary can be overridden with:
|
||||
# binary = "/usr/sbin/unbound-control"
|
||||
#
|
||||
# # The default timeout of 1s can be overriden with:
|
||||
# #timeout = "1s"
|
||||
#
|
||||
# # Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields
|
||||
# fieldpass = ["total_*", "num_*","time_up", "mem_*"]
|
||||
|
||||
# # A Webhooks Event collector
|
||||
# [[inputs.webhooks]]
|
||||
|
@ -2933,6 +3148,9 @@
|
|||
#
|
||||
# [inputs.webhooks.papertrail]
|
||||
# path = "/papertrail"
|
||||
#
|
||||
# [inputs.webhooks.particle]
|
||||
# path = "/particle"
|
||||
|
||||
|
||||
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
|
||||
|
|
|
@ -45,7 +45,7 @@ var sampleConfig = `
|
|||
`
|
||||
|
||||
func (s *Unbound) Description() string {
|
||||
return "A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver "
|
||||
return "A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver"
|
||||
}
|
||||
|
||||
// SampleConfig displays configuration instructions
|
||||
|
|
|
@ -64,7 +64,7 @@ func (wb *Webhooks) SampleConfig() string {
|
|||
|
||||
[inputs.webhooks.papertrail]
|
||||
path = "/papertrail"
|
||||
|
||||
|
||||
[inputs.webhooks.particle]
|
||||
path = "/particle"
|
||||
`
|
||||
|
|
Loading…
Reference in New Issue