# Telegraf configuration # Telegraf is entirely plugin driven. All metrics are gathered from the # declared plugins. # Even if a plugin has no configuration, it must be declared in here # to be active. Declaring a plugin means just specifying the name # as a section with no variables. To deactivate a plugin, comment # out the name and any variables. # Use 'telegraf -config telegraf.toml -test' to see what metrics a config # file would generate. # One rule that plugins conform to is wherever a connection string # can be passed, the values '' and 'localhost' are treated specially. # They indicate to the plugin to use their own builtin configuration to # connect to the local system. # NOTE: The configuration has a few required parameters. They are marked # with 'required'. Be sure to edit those to make this configuration work. # Tags can also be specified via a normal map, but only one form at a time: [tags] # dc = "us-east-1" # Configuration for telegraf agent [agent] # Default data collection interval for all plugins interval = "10s" # If utc = false, uses local time (utc is highly recommended) utc = true # Precision of writes, valid values are n, u, ms, s, m, and h # note: using second precision greatly helps InfluxDB compression precision = "s" # run telegraf in debug mode debug = false # Override default hostname, if empty use os.Hostname() hostname = "" ############################################################################### # OUTPUTS # ############################################################################### [outputs] # Configuration for influxdb server to send metrics to [[outputs.influxdb]] # The full HTTP endpoint URL for your InfluxDB instance # Multiple urls can be specified for InfluxDB cluster support. Server to # write to will be randomly chosen each interval. urls = ["http://localhost:8086"] # required. # The target database for metrics. This database must already exist database = "telegraf" # required. # Connection timeout (for the connection with InfluxDB), formatted as a string. # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". # If not provided, will default to 0 (no timeout) # timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" # Set the user agent for the POSTs (can be useful for log differentiation) # user_agent = "telegraf" [[outputs.influxdb]] urls = ["udp://localhost:8089"] database = "udp-telegraf" # Configuration for the Kafka server to send metrics to [[outputs.kafka]] # URLs of kafka brokers brokers = ["localhost:9092"] # Kafka topic for producer messages topic = "telegraf" # Telegraf tag to use as a routing key # ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" ############################################################################### # PLUGINS # ############################################################################### # Read Apache status information (mod_status) [apache] # An array of Apache status URI to gather stats. urls = ["http://localhost/server-status?auto"] # Read metrics about cpu usage [cpu] # Whether to report per-cpu stats or not percpu = true # Whether to report total system cpu stats or not totalcpu = true # Comment this line if you want the raw CPU time metrics drop = ["cpu_time"] # Read metrics about disk usage by mount point [disk] # no configuration # Read metrics from one or many disque servers [disque] # An array of URI to gather stats about. Specify an ip or hostname # with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, # 10.0.0.1:10000, etc. # # If no servers are specified, then localhost is used as the host. servers = ["localhost"] # Read stats from one or more Elasticsearch servers or clusters [elasticsearch] # specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] # set local to false when you want to read the indices stats from all nodes # within the cluster local = true # Read flattened metrics from one or more commands that output JSON to stdout [exec] # specify commands via an array of tables [[exec.commands]] # the command to run command = "/usr/bin/mycollector --foo=bar" # name of the command (used as a prefix for measurements) name = "mycollector" # Read metrics of haproxy, via socket or csv stats page [haproxy] # An array of address to gather stats about. Specify an ip on hostname # with optional port. ie localhost, 10.10.3.33:1936, etc. # # If no servers are specified, then default to 127.0.0.1:1936 servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] # Or you can also use local socket(not work yet) # servers = ["socket:/run/haproxy/admin.sock"] # Read flattened metrics from one or more JSON HTTP endpoints [httpjson] # Specify services via an array of tables [[httpjson.services]] # a name for the service being polled name = "webserver_stats" # URL of each server in the service's cluster servers = [ "http://localhost:9999/stats/", "http://localhost:9998/stats/", ] # HTTP method to use (case-sensitive) method = "GET" # HTTP parameters (all values must be strings) [httpjson.services.parameters] event_type = "cpu_spike" threshold = "0.75" # Read metrics about disk IO by device [io] # no configuration # read metrics from a Kafka topic [kafka] # topic to consume topic = "topic_with_metrics" # the name of the consumer group consumerGroupName = "telegraf_metrics_consumers" # an array of Zookeeper connection strings zookeeperPeers = ["localhost:2181"] # Batch size of points sent to InfluxDB batchSize = 1000 # Read metrics from a LeoFS Server via SNMP [leofs] # An array of URI to gather stats about LeoFS. # Specify an ip or hostname with port. ie 127.0.0.1:4020 # # If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. servers = ["127.0.0.1:4021"] # Read metrics from local Lustre service on OST, MDS [lustre2] # An array of /proc globs to search for Lustre stats # If not specified, the default will work on Lustre 2.5.x # # ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] # Read metrics about memory usage [mem] # no configuration # Read metrics from one or many memcached servers [memcached] # An array of address to gather stats about. Specify an ip on hostname # with optional port. ie localhost, 10.0.0.1:11211, etc. # # If no servers are specified, then localhost is used as the host. servers = ["localhost"] # Read metrics from one or many MongoDB servers [mongodb] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. # # If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. servers = ["127.0.0.1:27017"] # Read metrics from one or many mysql servers [mysql] # specify servers via a url matching: # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # e.g. # servers = ["root:root@http://10.0.0.18/?tls=false"] # servers = ["root:passwd@tcp(127.0.0.1:3306)/"] # # If no servers are specified, then localhost is used as the host. servers = ["localhost"] # Read metrics about network interface usage [net] # By default, telegraf gathers stats from any up interface (excluding loopback) # Setting interfaces will tell it to gather these explicit interfaces, # regardless of status. # # interfaces = ["eth0", ... ] # Read Nginx's basic status information (ngx_http_stub_status_module) [nginx] # An array of Nginx stub_status URI to gather stats. urls = ["http://localhost/status"] # Ping given url(s) and return statistics [ping] # urls to ping urls = ["www.google.com"] # required # number of pings to send (ping -c ) count = 1 # required # interval, in s, at which to ping. 0 == default (ping -i ) ping_interval = 0.0 # ping timeout, in s. 0 == no timeout (ping -t ) timeout = 0.0 # interface to send ping from (ping -I ) interface = "" # Read metrics from one or many postgresql servers [postgresql] # specify servers via an array of tables [[postgresql.servers]] # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: # host=localhost user=pqotest password=... sslmode=... dbname=app_production # # All connection parameters are optional. By default, the host is localhost # and the user is the currently running user. For localhost, we default # to sslmode=disable as well. # # Without the dbname parameter, the driver will default to a database # with the same name as the user. This dbname is just for instantiating a # connection with the server and doesn't restrict the databases we are trying # to grab metrics for. # address = "sslmode=disable" # A list of databases to pull metrics about. If not specified, metrics for all # databases are gathered. # databases = ["app_production", "blah_testing"] # [[postgresql.servers]] # address = "influx@remoteserver" # Read metrics from one or many prometheus clients [prometheus] # An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] # Read metrics from one or many RabbitMQ servers via the management API [rabbitmq] # Specify servers via an array of tables [[rabbitmq.servers]] # name = "rmq-server-1" # optional tag # url = "http://localhost:15672" # username = "guest" # password = "guest" # A list of nodes to pull metrics about. If not specified, metrics for # all nodes are gathered. # nodes = ["rabbit@node1", "rabbit@node2"] # Read metrics from one or many redis servers [redis] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, # 10.0.0.1:10000, etc. # # If no servers are specified, then localhost is used as the host. servers = ["localhost"] # Read metrics from one or many RethinkDB servers [rethinkdb] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, # rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. # # If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port. servers = ["127.0.0.1:28015"] # Read metrics about swap memory usage [swap] # no configuration # Read metrics about system load & uptime [system] # no configuration