diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0e740f5c8..43d647beb 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1,19 +1,21 @@ -# Telegraf configuration - +# Telegraf Configuration +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -48,10 +50,12 @@ quiet = false ## Override default hostname, if empty use os.Hostname() hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false ############################################################################### -# OUTPUTS # +# OUTPUT PLUGINS # ############################################################################### # Configuration for influxdb server to send metrics to @@ -87,59 +91,1189 @@ # insecure_skip_verify = false +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Configuration for the AMQP server to send metrics to +# [[outputs.amqp]] +# ## AMQP url +# url = "amqp://localhost:5672/influxdb" +# ## AMQP exchange +# exchange = "telegraf" +# ## Auth method. PLAIN and EXTERNAL are supported +# # auth_method = "PLAIN" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## InfluxDB retention policy +# # retention_policy = "default" +# ## InfluxDB database +# # database = "telegraf" +# ## InfluxDB precision +# # precision = "s" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = 'us-east-1' +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = 'InfluxData/Telegraf' + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" # required. +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## timeout in seconds for the write connection to graphite +# timeout = 2 + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## CompressionCodec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : No compression +# ## 1 : Gzip compression +# ## 2 : Snappy compression +# compression_codec = 0 +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# required_acks = -1 +# +# ## The total number of times to retry sending a message +# max_retry = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## format of the Data payload in the kinesis PutRecord, supported +# ## String and Custom. +# format = "string" +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librator API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# +# ## Librato API token +# api_token = "my-secret-token" # required. +# +# ### Debug +# # debug = false +# +# ### Tag Field to populate source attribute (optional) +# ### This is typically the _hostname_ from which the metric was obtained. +# source_tag = "host" +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## Telnet Mode ## +# ## DNS name of the OpenTSDB server in telnet mode +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server in telnet mode +# port = 4242 +# +# ## Debug true - Prints OpenTSDB communication +# debug = false + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# # listen = ":9126" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + + ############################################################################### -# INPUTS # +# INPUT PLUGINS # ############################################################################### # Read metrics about cpu usage [[inputs.cpu]] - # Whether to report per-cpu stats or not + ## Whether to report per-cpu stats or not percpu = true - # Whether to report total system cpu stats or not + ## Whether to report total system cpu stats or not totalcpu = true - # Comment this line if you want the raw CPU time metrics + ## Comment this line if you want the raw CPU time metrics fielddrop = ["time_*"] + # Read metrics about disk usage by mount point [[inputs.disk]] - # By default, telegraf gather stats for all mountpoints. - # Setting mountpoints will restrict the stats to the specified mountpoints. - # mount_points=["/"] + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. + # mount_points = ["/"] - # Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - # present on /run, /var/run, /dev/shm or /dev). + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). ignore_fs = ["tmpfs", "devtmpfs"] + # Read metrics about disk IO by device [[inputs.diskio]] - # By default, telegraf will gather stats for all devices including - # disk partitions. - # Setting devices will restrict the stats to the specified devices. + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. # devices = ["sda", "sdb"] - # Uncomment the following line if you do not need disk serial numbers. + ## Uncomment the following line if you do not need disk serial numbers. # skip_serial_number = true -# Get kernel statistics from /proc/stat -[[inputs.kernel]] - # no configuration # Read metrics about memory usage [[inputs.mem]] # no configuration + # Get the number of processes and group them by status [[inputs.processes]] # no configuration + # Read metrics about swap memory usage [[inputs.swap]] # no configuration + # Read metrics about system load & uptime [[inputs.system]] # no configuration +# # Read stats from an aerospike server +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of Apache status URI to gather stats. +# urls = ["http://localhost/server-status?auto"] + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Read metrics from one or many couchbase clusters +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specifed, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple HOSTs from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] # required +# +# ## Domains or subdomains to query. "."(root) is default +# domains = ["."] # optional +# +# ## Query record type. Default is "A" +# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# record_type = "A" # optional +# +# ## Dns server port. 53 is default +# port = 53 # optional +# +# ## Query timeout in seconds. Default is 2 seconds +# timeout = 2 # optional + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# ## Only collect metrics for these domains, collect all if empty +# domains = [] + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# servers = ["http://localhost:9200"] +# +# ## set local to false when you want to read the indices stats from all nodes +# ## within the cluster +# local = true +# +# ## set cluster_health to true when you want to also obtain cluster level stats +# cluster_health = false + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# +# ## If no servers are specified, then default to 127.0.0.1:1936 +# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] +# ## Or you can also use local socket(not work yet) +# ## servers = ["socket://run/haproxy/admin.sock"] + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## a name for the service being polled +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## HTTP parameters (all values must be strings) +# [inputs.httpjson.parameters] +# event_type = "cpu_spike" +# threshold = "0.75" +# +# ## HTTP Header parameters (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# urls = [ +# "http://localhost:8086/debug/vars" +# ] + + +# # Read metrics from one or many bare metal servers +# [[inputs.ipmi_sensor]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# ## This is the context root used to compose the jolokia url +# context = "/jolokia/read" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "stable" +# host = "192.168.103.2" +# port = "8180" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# jmx = "/java.lang:type=Memory/HeapMemoryUsage" + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URI to gather stats about LeoFS. +# ## Specify an ip or hostname with port. ie 127.0.0.1:4020 +# servers = ["127.0.0.1:4021"] + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats" +# # ] +# # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# # Timeout, in ms. +# timeout = 100 +# # A list of Mesos masters, default value is localhost:5050. +# masters = ["localhost:5050"] +# # Metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "slaves", +# "frameworks", +# "messages", +# "evqueue", +# "registrar", +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:27017"] + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## root:passwd@tcp(127.0.0.1:3306)/?tls=false +# ## root@tcp(127.0.0.1:3306)/?tls=false +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] + + +# # TCP or UDP 'ping' given url and collect response time in seconds +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# protocol = "tcp" +# ## Server address (default localhost) +# address = "github.com:80" +# ## Set timeout (default 1.0 seconds) +# timeout = 1.0 +# ## Set read timeout (default 1.0 seconds) +# read_timeout = 1.0 +# ## Optional string sent to the server +# # send = "ssh" +# ## Optional expected string in answer +# # expect = "ssh" + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# ## An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/status"] + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remove host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## NOTE: this plugin forks the ping command. You may need to set capabilities +# ## via setcap cap_net_raw+p /bin/ping +# +# ## urls to ping +# urls = ["www.google.com"] # required +# ## number of pings to send (ping -c ) +# count = 1 # required +# ## interval, in s, at which to ping. 0 == default (ping -i ) +# ping_interval = 0.0 +# ## ping timeout, in s. 0 == no timeout (ping -t ) +# timeout = 0.0 +# ## interface to send ping from (ping -I ) +# interface = "" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by comas) +# # +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (coma separated) +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## Must specify one of: pid_file, exe, or pattern +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# +# ## Field name prefix +# prefix = "" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token + + +# # Reads last_run_summary.yaml file and converts to measurments +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Read metrics from one or many RabbitMQ servers via the management API +# [[inputs.rabbitmq]] +# url = "http://localhost:15672" # required +# # name = "rmq-server-1" # optional tag +# # username = "guest" +# # password = "guest" +# +# ## A list of nodes to pull metrics about. If not specified, metrics for +# ## all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Reads oids value from one or many snmp agents +# [[inputs.snmp]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters. +# # servers = [ +# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# # ] + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # Read metrics of ZFS from arcstats, zfetchstats and vdev_cache_stats +# [[inputs.zfs]] +# ## ZFS kstat path +# ## If not specified, then default is: +# kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# +# ## By default, don't gather zpool stats +# poolMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] + + + ############################################################################### -# SERVICE INPUTS # +# SERVICE INPUT PLUGINS # ############################################################################### + +# # Generic UDP listener +# [[inputs.udp_listener]] +# ## Address and port to host UDP listener on +# service_address = ":8092" +# +# ## Number of UDP messages allowed to queue up. Once filled, the +# ## UDP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## UDP packet size for the server to listen for. This will depend +# ## on the size of the packets that the client is sending, which is +# ## usually 1500 bytes, but can be as large as 65,535 bytes. +# udp_packet_size = 1500 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # A Github Webhook Event collector +# [[inputs.github_webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1618" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer]] +# ## topic(s) to consume +# topics = ["telegraf"] +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# ## Zookeeper Chroot +# zookeeper_chroot = "/" +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# servers = ["localhost:1883"] +# ## MQTT QoS, must be 0, 1, or 2 +# qos = 0 +# +# ## Topics to subscribe to +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# # if true, messages that can't be delivered while the subscriber is offline +# # will be delivered when it comes back (such as on service restart). +# # NOTE: if true, client_id MUST be set +# persistent_session = false +# # If empty, a random client ID will be generated. +# client_id = "" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# ## Use Transport Layer Security +# secure = false +# ## subject(s) to consume +# subjects = ["telegraf"] +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Statsd Server +# [[inputs.statsd]] +# ## Address and port to host UDP listener on +# service_address = ":8125" +# ## Delete gauges every interval (default=false) +# delete_gauges = false +# ## Delete counters every interval (default=false) +# delete_counters = false +# ## Delete sets every interval (default=false) +# delete_sets = false +# ## Delete timings & histograms every interval (default=true) +# delete_timings = true +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [90] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## UDP packet size for the server to listen for. This will depend on the size +# ## of the packets that the client is sending, which is usually 1500 bytes. +# udp_packet_size = 1500 + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# ## Address and port to host TCP listener on +# service_address = ":8094" +# +# ## Number of TCP messages allowed to queue up. Once filled, the +# ## TCP listener will start dropping packets. +# allowed_pending_messages = 10000 +# +# ## Maximum number of concurrent TCP connections to allow +# max_tcp_connections = 250 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + diff --git a/internal/config/config.go b/internal/config/config.go index b15c5e651..715fa777c 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -22,6 +22,15 @@ import ( "github.com/influxdata/toml/ast" ) +var ( + // Default input plugins + inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", + "processes", "disk", "diskio"} + + // Default output plugins + outputDefaults = []string{"influxdb"} +) + // Config specifies the URL/user/password for the database that telegraf // will be logging to, as well as all the plugins that the user has // specified @@ -135,21 +144,23 @@ func (c *Config) ListTags() string { } var header = `# Telegraf Configuration - +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -188,55 +199,72 @@ var header = `# Telegraf Configuration omit_hostname = false -# -# OUTPUTS: -# - +############################################################################### +# OUTPUT PLUGINS # +############################################################################### ` -var pluginHeader = ` -# -# INPUTS: -# +var inputHeader = ` + +############################################################################### +# INPUT PLUGINS # +############################################################################### ` var serviceInputHeader = ` -# -# SERVICE INPUTS: -# + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### ` // PrintSampleConfig prints the sample config -func PrintSampleConfig(pluginFilters []string, outputFilters []string) { +func PrintSampleConfig(inputFilters []string, outputFilters []string) { fmt.Printf(header) - // Filter outputs - var onames []string - for oname := range outputs.Outputs { - if len(outputFilters) == 0 || sliceContains(oname, outputFilters) { - onames = append(onames, oname) + if len(outputFilters) != 0 { + printFilteredOutputs(outputFilters, false) + } else { + printFilteredOutputs(outputDefaults, false) + // Print non-default outputs, commented + var pnames []string + for pname := range outputs.Outputs { + if !sliceContains(pname, outputDefaults) { + pnames = append(pnames, pname) + } } - } - sort.Strings(onames) - - // Print Outputs - for _, oname := range onames { - creator := outputs.Outputs[oname] - output := creator() - printConfig(oname, output, "outputs") + sort.Strings(pnames) + printFilteredOutputs(pnames, true) } + fmt.Printf(inputHeader) + if len(inputFilters) != 0 { + printFilteredInputs(inputFilters, false) + } else { + printFilteredInputs(inputDefaults, false) + // Print non-default inputs, commented + var pnames []string + for pname := range inputs.Inputs { + if !sliceContains(pname, inputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredInputs(pnames, true) + } +} + +func printFilteredInputs(inputFilters []string, commented bool) { // Filter inputs var pnames []string for pname := range inputs.Inputs { - if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) { + if sliceContains(pname, inputFilters) { pnames = append(pnames, pname) } } sort.Strings(pnames) // Print Inputs - fmt.Printf(pluginHeader) servInputs := make(map[string]telegraf.ServiceInput) for _, pname := range pnames { creator := inputs.Inputs[pname] @@ -248,13 +276,34 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) { continue } - printConfig(pname, input, "inputs") + printConfig(pname, input, "inputs", commented) } // Print Service Inputs + if len(servInputs) == 0 { + return + } fmt.Printf(serviceInputHeader) for name, input := range servInputs { - printConfig(name, input, "inputs") + printConfig(name, input, "inputs", commented) + } +} + +func printFilteredOutputs(outputFilters []string, commented bool) { + // Filter outputs + var onames []string + for oname := range outputs.Outputs { + if sliceContains(oname, outputFilters) { + onames = append(onames, oname) + } + } + sort.Strings(onames) + + // Print Outputs + for _, oname := range onames { + creator := outputs.Outputs[oname] + output := creator() + printConfig(oname, output, "outputs", commented) } } @@ -263,13 +312,26 @@ type printer interface { SampleConfig() string } -func printConfig(name string, p printer, op string) { - fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name) +func printConfig(name string, p printer, op string, commented bool) { + comment := "" + if commented { + comment = "# " + } + fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment, + op, name) + config := p.SampleConfig() if config == "" { - fmt.Printf("\n # no configuration\n") + fmt.Printf("\n%s # no configuration\n\n", comment) } else { - fmt.Printf(config) + lines := strings.Split(config, "\n") + for i, line := range lines { + if i == 0 || i == len(lines)-1 { + fmt.Print("\n") + continue + } + fmt.Print(comment + line + "\n") + } } } @@ -285,7 +347,7 @@ func sliceContains(name string, list []string) bool { // PrintInputConfig prints the config usage of a single input. func PrintInputConfig(name string) error { if creator, ok := inputs.Inputs[name]; ok { - printConfig(name, creator(), "inputs") + printConfig(name, creator(), "inputs", false) } else { return errors.New(fmt.Sprintf("Input %s not found", name)) } @@ -295,7 +357,7 @@ func PrintInputConfig(name string) error { // PrintOutputConfig prints the config usage of a single output. func PrintOutputConfig(name string) error { if creator, ok := outputs.Outputs[name]; ok { - printConfig(name, creator(), "outputs") + printConfig(name, creator(), "outputs", false) } else { return errors.New(fmt.Sprintf("Output %s not found", name)) } @@ -327,42 +389,42 @@ func (c *Config) LoadDirectory(path string) error { func (c *Config) LoadConfig(path string) error { tbl, err := config.ParseFile(path) if err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) if !ok { - return errors.New("invalid configuration") + return fmt.Errorf("%s: invalid configuration", path) } switch name { case "agent": if err = config.UnmarshalTable(subTable, c.Agent); err != nil { log.Printf("Could not parse [agent] config\n") - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case "global_tags", "tags": if err = config.UnmarshalTable(subTable, c.Tags); err != nil { log.Printf("Could not parse [global_tags] config\n") - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case "outputs": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } default: - return fmt.Errorf("Unsupported config format: %s", - pluginName) + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) } } case "inputs", "plugins": @@ -370,24 +432,24 @@ func (c *Config) LoadConfig(path string) error { switch pluginSubTable := pluginVal.(type) { case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } default: - return fmt.Errorf("Unsupported config format: %s", - pluginName) + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) } } // Assume it's an input input for legacy config file support if no other // identifiers are present default: if err = c.addInput(name, subTable); err != nil { - return err + return fmt.Errorf("Error parsing %s, %s", path, err) } } } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 822e5924f..d726590b4 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -24,9 +24,8 @@ type Disque struct { var sampleConfig = ` ## An array of URI to gather stats about. Specify an ip or hostname - ## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, - ## 10.0.0.1:10000, etc. - + ## with optional port and password. + ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. ## If no servers are specified, then localhost is used as the host. servers = ["localhost"] ` diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 397482a98..2231f2921 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -35,7 +35,8 @@ var sampleConfig = ` ## Domains or subdomains to query. "."(root) is default domains = ["."] # optional - ## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS" + ## Query record type. Default is "A" + ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. record_type = "A" # optional ## Dns server port. 53 is default diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 9fd9491ca..d2e09ccd0 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -22,7 +22,7 @@ const sampleConfig = ` ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" - ## Data format to consume. This can be "json", "influx", "graphite" or "nagios + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 07c87199f..a2cda43d6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -56,7 +56,7 @@ var sampleConfig = ` ## Offset (must be either "oldest" or "newest") offset = "oldest" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index ccb76daae..b096a20d9 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -34,7 +34,16 @@ var sampleConfig = ` # A list of Mesos masters, default value is localhost:5050. masters = ["localhost:5050"] # Metrics groups to be collected, by default, all enabled. - master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] + master_collections = [ + "resources", + "master", + "system", + "slaves", + "frameworks", + "messages", + "evqueue", + "registrar", + ] ` // SampleConfig returns a sample configuration block diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 50a20740a..c64d2139b 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -78,7 +78,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 235601100..232d5740f 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -55,7 +55,7 @@ var sampleConfig = ` ## name a queue group queue_group = "telegraf_consumers" - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index d8d0d1978..da8ee8001 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -26,7 +26,8 @@ var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_rese var sampleConfig = ` ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: ## host=localhost user=pqotest password=... sslmode=... dbname=app_production ## diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 67097db4b..4ebf752ff 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -38,38 +38,41 @@ type query []struct { var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var sampleConfig = ` - # specify address via a url matching: - # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] - # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # - # All connection parameters are optional. # - # Without the dbname parameter, the driver will default to a database - # with the same name as the user. This dbname is just for instantiating a - # connection with the server and doesn't restrict the databases we are trying - # to grab metrics for. + ## All connection parameters are optional. # + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. # address = "host=localhost user=postgres sslmode=disable" - # A list of databases to pull metrics about. If not specified, metrics for all - # databases are gathered. - # databases = ["app_production", "testing"] + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. + ## databases = ["app_production", "testing"] # - # Define the toml config where the sql queries are stored - # New queries can be added, if the withdbname is set to true and there is no databases defined - # in the 'databases field', the sql query is ended by a 'is not null' in order to make the query - # succeed. - # Example : - # The sqlquery : "SELECT * FROM pg_stat_database where datname" become "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" - # because the databases variable was set to ['postgres', 'pgbench' ] and the withdbname was true. - # Be careful that if the withdbname is set to false you d'ont have to define the where clause (aka with the dbname) - # the tagvalue field is used to define custom tags (separated by comas) + ## Define the toml config where the sql queries are stored + ## New queries can be added, if the withdbname is set to true and there is no + ## databases defined in the 'databases field', the sql query is ended by a + ## 'is not null' in order to make the query succeed. + ## Example : + ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become + ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" + ## because the databases variable was set to ['postgres', 'pgbench' ] and the + ## withdbname was true. Be careful that if the withdbname is set to false you + ## don't have to define the where clause (aka with the dbname) the tagvalue + ## field is used to define custom tags (separated by comas) # - # Structure : - # [[inputs.postgresql_extensible.query]] - # sqlquery string - # version string - # withdbname boolean - # tagvalue string (coma separated) + ## Structure : + ## [[inputs.postgresql_extensible.query]] + ## sqlquery string + ## version string + ## withdbname boolean + ## tagvalue string (coma separated) [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" version=901 diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 0281cc24a..460a79faf 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -26,10 +26,10 @@ var sampleConfig = ` ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - ### Use SSL but skip chain & host verification - # insecure_skip_verify = false - ### Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + ## Use bearer token for authorization + # bearer_token = /path/to/bearer/token ` func (p *Prometheus) SampleConfig() string { diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index a56e53ff7..4c2de93c9 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -178,7 +178,6 @@ var sampleConfig = ` max_repetition = 127 oid = "ifOutOctets" - [[inputs.snmp.host]] address = "192.168.2.13:161" #address = "127.0.0.1:161" @@ -219,10 +218,8 @@ var sampleConfig = ` # if empty get all instances mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # if empty get all subtables - # sub_tables could be not "real subtables" + # sub_tables could be not "real subtables" sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] - - ` // SampleConfig returns sample configuration message diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index dd239fedf..a1b991058 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -53,7 +53,7 @@ const sampleConfig = ` ## Maximum number of concurrent TCP connections to allow max_tcp_connections = 250 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 9b0a65d6f..794f1791d 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -48,7 +48,7 @@ const sampleConfig = ` ## usually 1500 bytes, but can be as large as 65,535 bytes. udp_packet_size = 1500 - ## Data format to consume. This can be "json", "influx" or "graphite" + ## Data format to consume. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index c9531b2a5..bf9353d6e 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -89,7 +89,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 743c0f03f..1d47642b2 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -23,7 +23,7 @@ var sampleConfig = ` ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 3cecfeeab..1fafa1353 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -59,16 +59,27 @@ var sampleConfig = ` ## ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" - ## CompressionCodec represents the various compression codecs recognized by Kafka in messages. + ## CompressionCodec represents the various compression codecs recognized by + ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression compression_codec = 0 - ## RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements it must see before responding - ## 0 : the producer never waits for an acknowledgement from the broker. This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails). - ## 1 : the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost). - ## -1 : the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains. + ## RequiredAcks is used in Produce Requests to tell the broker how many + ## replica acknowledgements it must see before responding + ## 0 : the producer never waits for an acknowledgement from the broker. + ## This option provides the lowest latency but the weakest durability + ## guarantees (some data will be lost when a server fails). + ## 1 : the producer gets an acknowledgement after the leader replica has + ## received the data. This option provides better durability as the + ## client waits until the server acknowledges the request as successful + ## (only messages that were written to the now-dead leader but not yet + ## replicated will be lost). + ## -1: the producer gets an acknowledgement after all in-sync replicas have + ## received the data. This option provides the best durability, we + ## guarantee that no messages will be lost as long as at least one in + ## sync replica remains. required_acks = -1 ## The total number of times to retry sending a message @@ -81,7 +92,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index f13500db9..c57ee8cd0 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -32,7 +32,7 @@ var sampleConfig = ` ## Use SSL but skip chain & host verification # insecure_skip_verify = false - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index 75b998484..fd4053222 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -24,7 +24,7 @@ var sampleConfig = ` ## NSQ topic for producer messages topic = "telegraf" - ## Data format to output. This can be "influx" or "graphite" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md