Merge remote-tracking branch 'upstream/master' into cherry-20160401-1

Conflicts:
	plugins/inputs/all/all.go
This commit is contained in:
Subhachandra Chandra 2016-04-07 16:59:53 -07:00
commit 44f35f1d35
44 changed files with 2680 additions and 405 deletions

View File

@ -1,4 +1,19 @@
## v0.12.0 [unreleased] ## v0.12.1 [unreleased]
### Features
- [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs.
- [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener.
- [#935](https://github.com/influxdata/telegraf/pull/935): AWS Cloudwatch input plugin. Thanks @joshhardy & @ljosa!
- [#943](https://github.com/influxdata/telegraf/pull/943): http_response input plugin. Thanks @Lswith!
- [#939](https://github.com/influxdata/telegraf/pull/939): sysstat input plugin. Thanks @zbindenren!
### Bugfixes
- [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name)
- [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw!
- [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj!
- [#645](https://github.com/influxdata/telegraf/issues/645): docker plugin i/o error on closed pipe. Thanks @tripledes!
## v0.12.0 [2016-04-05]
### Features ### Features
- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. - [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file.
@ -28,6 +43,8 @@
- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic.
- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! - [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert!
- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! - [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk!
- [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout.
- [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF!
## v0.11.1 [2016-03-17] ## v0.11.1 [2016-03-17]

View File

@ -114,7 +114,7 @@ creating the `Parser` object.
You should also add the following to your SampleConfig() return: You should also add the following to your SampleConfig() return:
```toml ```toml
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
@ -244,7 +244,7 @@ instantiating and creating the `Serializer` object.
You should also add the following to your SampleConfig() return: You should also add the following to your SampleConfig() return:
```toml ```toml
## Data format to output. This can be "influx" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md

9
Godeps
View File

@ -9,10 +9,12 @@ github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
@ -32,6 +34,7 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
@ -41,7 +44,7 @@ github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3 golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3

View File

@ -1,3 +1,4 @@
github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
@ -9,24 +10,24 @@ github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2 github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6 github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew fc32781af5e85e548d3f1abaf0fa3dbe8a72495c github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4 github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 5979233c5d6225d4a8e438cdd0b411888449ddab github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb c190778997f4154294e6160c41b90140641ac915 github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
@ -37,7 +38,6 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
@ -47,9 +47,8 @@ github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5 github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172 golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172

167
README.md
View File

@ -20,12 +20,12 @@ new plugins.
### Linux deb and rpm Packages: ### Linux deb and rpm Packages:
Latest: Latest:
* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_amd64.deb * http://get.influxdb.org/telegraf/telegraf_0.12.0-1_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.x86_64.rpm * http://get.influxdb.org/telegraf/telegraf-0.12.0-1.x86_64.rpm
Latest (arm): Latest (arm):
* http://get.influxdb.org/telegraf/telegraf_0.11.1-1_armhf.deb * http://get.influxdb.org/telegraf/telegraf_0.12.0-1_armhf.deb
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1.armhf.rpm * http://get.influxdb.org/telegraf/telegraf-0.12.0-1.armhf.rpm
##### Package Instructions: ##### Package Instructions:
@ -46,28 +46,28 @@ to use this repo to install & update telegraf.
### Linux tarballs: ### Linux tarballs:
Latest: Latest:
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_amd64.tar.gz * http://get.influxdb.org/telegraf/telegraf-0.12.0-1_linux_amd64.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_i386.tar.gz * http://get.influxdb.org/telegraf/telegraf-0.12.0-1_linux_i386.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_linux_armhf.tar.gz * http://get.influxdb.org/telegraf/telegraf-0.12.0-1_linux_armhf.tar.gz
##### tarball Instructions: ##### tarball Instructions:
To install the full directory structure with config file, run: To install the full directory structure with config file, run:
``` ```
sudo tar -C / -zxvf ./telegraf-0.11.1-1_linux_amd64.tar.gz sudo tar -C / -zxvf ./telegraf-0.12.0-1_linux_amd64.tar.gz
``` ```
To extract only the binary, run: To extract only the binary, run:
``` ```
tar -zxvf telegraf-0.11.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf tar -zxvf telegraf-0.12.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
``` ```
### FreeBSD tarball: ### FreeBSD tarball:
Latest: Latest:
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_freebsd_amd64.tar.gz * http://get.influxdb.org/telegraf/telegraf-0.12.0-1_freebsd_amd64.tar.gz
##### tarball Instructions: ##### tarball Instructions:
@ -87,8 +87,8 @@ brew install telegraf
### Windows Binaries (EXPERIMENTAL) ### Windows Binaries (EXPERIMENTAL)
Latest: Latest:
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_amd64.zip * http://get.influxdb.org/telegraf/telegraf-0.12.0-1_windows_amd64.zip
* http://get.influxdb.org/telegraf/telegraf-0.11.1-1_windows_i386.zip * http://get.influxdb.org/telegraf/telegraf-0.12.0-1_windows_i386.zip
### From Source: ### From Source:
@ -156,55 +156,58 @@ more information on each, please look at the directory of the same name in
Currently implemented sources: Currently implemented sources:
* aerospike * [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch)
* apache * [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike)
* bcache * [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
* couchbase * [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
* couchdb * [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
* disque * [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
* dns query time * [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
* docker * [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query)
* dovecot * [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
* elasticsearch * [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
* exec (generic executable plugin, support JSON, influx, graphite and nagios) * [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
* haproxy * [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec ) (generic executable plugin, support JSON, influx, graphite and nagios)
* httpjson (generic JSON-emitting http service plugin) * [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
* influxdb * [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
* ipmi_sensor * [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* jolokia * [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
* leofs * [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
* lustre2 * [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
* mailchimp * [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
* memcached * [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
* mesos * [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp)
* mongodb * [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached)
* mysql * [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos)
* net_response * [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb)
* nginx * [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql)
* nsq * [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
* ntpq * [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
* phpfpm * [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
* phusion passenger * [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
* ping * [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
* postgresql * [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
* postgresql_extensible * [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping)
* powerdns * [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql)
* procstat * [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible)
* prometheus * [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns)
* puppetagent * [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat)
* rabbitmq * [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus)
* raindrops * [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent)
* redis * [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq)
* rethinkdb * [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops)
* riak * [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
* sensors (only available if built from source) * [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
* snmp * [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
* sql server (microsoft) * [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source)
* twemproxy * [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
* zfs * [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
* zookeeper * [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
* win_perf_counters (windows performance counters) * [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
* system * [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
* [sysstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sysstat)
* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system)
* cpu * cpu
* mem * mem
* net * net
@ -217,33 +220,33 @@ Currently implemented sources:
Telegraf can also collect metrics via the following service plugins: Telegraf can also collect metrics via the following service plugins:
* statsd * [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
* udp_listener * [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
* tcp_listener * [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
* mqtt_consumer * [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
* kafka_consumer * [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
* nats_consumer * [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
* github_webhooks * [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
We'll be adding support for many more over the coming months. Read on if you We'll be adding support for many more over the coming months. Read on if you
want to add support for another service or third-party API. want to add support for another service or third-party API.
## Supported Output Plugins ## Supported Output Plugins
* influxdb * [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb)
* amon * [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon)
* amqp * [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp)
* aws kinesis * [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis)
* aws cloudwatch * [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch)
* datadog * [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
* graphite * [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
* kafka * [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
* librato * [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
* mqtt * [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
* nsq * [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
* opentsdb * [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
* prometheus * [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
* riemann * [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann)
## Contributing ## Contributing

View File

@ -2,10 +2,11 @@
Telegraf is able to parse the following input data formats into metrics: Telegraf is able to parse the following input data formats into metrics:
1. InfluxDB Line Protocol 1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
1. JSON 1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
1. Graphite 1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
1. Value, ie 45 or "booyah" 1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
Telegraf metrics, like InfluxDB Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
@ -38,7 +39,7 @@ example, in the exec plugin:
## measurement name suffix (for separating different commands) ## measurement name suffix (for separating different commands)
name_suffix = "_mycollector" name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
@ -50,7 +51,7 @@ example, in the exec plugin:
Each data_format has an additional set of configuration options available, which Each data_format has an additional set of configuration options available, which
I'll go over below. I'll go over below.
## Influx: # Influx:
There are no additional configuration options for InfluxDB line-protocol. The There are no additional configuration options for InfluxDB line-protocol. The
metrics are parsed directly into Telegraf metrics. metrics are parsed directly into Telegraf metrics.
@ -65,14 +66,14 @@ metrics are parsed directly into Telegraf metrics.
## measurement name suffix (for separating different commands) ## measurement name suffix (for separating different commands)
name_suffix = "_mycollector" name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx" data_format = "influx"
``` ```
## JSON: # JSON:
The JSON data format flattens JSON into metric _fields_. For example, this JSON: The JSON data format flattens JSON into metric _fields_. For example, this JSON:
@ -110,7 +111,7 @@ For example, if you had this configuration:
## measurement name suffix (for separating different commands) ## measurement name suffix (for separating different commands)
name_suffix = "_mycollector" name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
@ -141,21 +142,20 @@ Your Telegraf metrics would get tagged with "my_tag_1"
exec_mycollector,my_tag_1=foo a=5,b_c=6 exec_mycollector,my_tag_1=foo a=5,b_c=6
``` ```
## Value: # Value:
The "value" data format translates single values into Telegraf metrics. This The "value" data format translates single values into Telegraf metrics. This
is done by assigning a measurement name (which can be overridden using the is done by assigning a measurement name and setting a single field ("value")
`name_override` config option), and setting a single field ("value") as the as the parsed metric.
parsed metric.
#### Value Configuration: #### Value Configuration:
You can tell Telegraf what type of metric to collect by using the `data_type` You **must** tell Telegraf what type of metric to collect by using the
configuration option. `data_type` configuration option.
It is also recommended that you set `name_override` to a measurement name that **Note:** It is also recommended that you set `name_override` to a measurement
makes sense for your metric, otherwise it will just be set to the name of the name that makes sense for your metric, otherwise it will just be set to the
plugin. name of the plugin.
```toml ```toml
[[inputs.exec]] [[inputs.exec]]
@ -165,15 +165,15 @@ plugin.
## override the default metric name of "exec" ## override the default metric name of "exec"
name_override = "entropy_available" name_override = "entropy_available"
## Data format to consume. This can be "json", "value", influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "value" data_format = "value"
data_type = "integer" data_type = "integer" # required
``` ```
## Graphite: # Graphite:
The Graphite data format translates graphite _dot_ buckets directly into The Graphite data format translates graphite _dot_ buckets directly into
telegraf measurement names, with a single value field, and without any tags. For telegraf measurement names, with a single value field, and without any tags. For
@ -301,7 +301,7 @@ There are many more options available,
## measurement name suffix (for separating different commands) ## measurement name suffix (for separating different commands)
name_suffix = "_mycollector" name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
@ -327,7 +327,7 @@ There are many more options available,
] ]
``` ```
## Nagios: # Nagios:
There are no additional configuration options for Nagios line-protocol. The There are no additional configuration options for Nagios line-protocol. The
metrics are parsed directly into Telegraf metrics. metrics are parsed directly into Telegraf metrics.
@ -344,7 +344,7 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
## measurement name suffix (for separating different commands) ## measurement name suffix (for separating different commands)
name_suffix = "_mycollector" name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx", "graphite" or "nagios" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -29,7 +29,8 @@ config option, for example, in the `file` output plugin:
## Files to write to, "stdout" is a specially handled file. ## Files to write to, "stdout" is a specially handled file.
files = ["stdout"] files = ["stdout"]
## Data format to output. This can be "influx" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
@ -53,7 +54,8 @@ metrics are serialized directly into InfluxDB line-protocol.
## Files to write to, "stdout" is a specially handled file. ## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"] files = ["stdout", "/tmp/metrics.out"]
## Data format to output. This can be "influx", "json" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
@ -87,7 +89,8 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
## Files to write to, "stdout" is a specially handled file. ## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"] files = ["stdout", "/tmp/metrics.out"]
## Data format to output. This can be "influx", "json" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
@ -123,7 +126,8 @@ The Json data format serialized Telegraf metrics in json format. The format is:
## Files to write to, "stdout" is a specially handled file. ## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"] files = ["stdout", "/tmp/metrics.out"]
## Data format to output. This can be "influx", "json" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md

36
docs/WINDOWS_SERVICE.md Normal file
View File

@ -0,0 +1,36 @@
# Running Telegraf as a Windows Service
If you have tried to install Go binaries as Windows Services with the **sc.exe**
tool you may have seen that the service errors and stops running after a while.
**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a
[number of scenarios](http://nssm.cc/scenarios) including running Go binaries
that were not specifically designed to run only in Windows platforms.
## NSSM Installation via Chocolatey
You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/)
with these commands
```powershell
iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))
choco install -y nssm
```
## Installing Telegraf as a Windows Service with NSSM
You can download the latest Telegraf Windows binaries (still Experimental at
the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf).
Then you can create a C:\telegraf folder, unzip the binary there and modify the
**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**.
Once you have NSSM installed in your system, the process is quite straightforward.
You only need to type this command in your Windows shell
```powershell
nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config
```
And now your service will be installed in Windows and you will be able to start and
stop it gracefully

View File

@ -378,6 +378,11 @@
# skip_serial_number = true # skip_serial_number = true
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage # Read metrics about memory usage
[[inputs.mem]] [[inputs.mem]]
# no configuration # no configuration
@ -424,6 +429,36 @@
# bcacheDevs = ["bcache0"] # bcacheDevs = ["bcache0"]
# # Pull Metric Statistics from Amazon CloudWatch
# [[inputs.cloudwatch]]
# ## Amazon Region
# region = 'us-east-1'
#
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
# period = '1m'
#
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
# delay = '1m'
#
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
# ## gaps or overlap in pulled data
# interval = '1m'
#
# ## Metric Statistic Namespace (required)
# namespace = 'AWS/ELB'
#
# ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h
# #[[inputs.cloudwatch.metrics]]
# # names = ['Latency', 'RequestCount']
# #
# # ## Dimension filters for Metric (optional)
# # [[inputs.cloudwatch.metrics.dimensions]]
# # name = 'LoadBalancerName'
# # value = 'p-example'
# # Read metrics from one or many couchbase clusters # # Read metrics from one or many couchbase clusters
# [[inputs.couchbase]] # [[inputs.couchbase]]
# ## specify servers via a url matching: # ## specify servers via a url matching:
@ -534,6 +569,25 @@
# ## servers = ["socket://run/haproxy/admin.sock"] # ## servers = ["socket://run/haproxy/admin.sock"]
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
# ## Server address (default http://localhost)
# address = "http://github.com"
# ## Set response_timeout (default 5 seconds)
# response_timeout = 5
# ## HTTP Request Method
# method = "GET"
# ## Whether to follow redirects from the server (defaults to false)
# follow_redirects = true
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
# ## Optional HTTP Request Body
# # body = '''
# # {'fake':'data'}
# # '''
# # Read flattened metrics from one or more JSON HTTP endpoints # # Read flattened metrics from one or more JSON HTTP endpoints
# [[inputs.httpjson]] # [[inputs.httpjson]]
# ## NOTE This plugin only reads numerical measurements, strings and booleans # ## NOTE This plugin only reads numerical measurements, strings and booleans
@ -894,6 +948,8 @@
# #
# ## Field name prefix # ## Field name prefix
# prefix = "" # prefix = ""
# ## comment this out if you want raw cpu_time stats
# fielddrop = ["cpu_time_*"]
# # Read metrics from one or many prometheus clients # # Read metrics from one or many prometheus clients
@ -1238,10 +1294,6 @@
# ## calculation of percentiles. Raising this limit increases the accuracy # ## calculation of percentiles. Raising this limit increases the accuracy
# ## of percentiles but also increases the memory usage and cpu time. # ## of percentiles but also increases the memory usage and cpu time.
# percentile_limit = 1000 # percentile_limit = 1000
#
# ## UDP packet size for the server to listen for. This will depend on the size
# ## of the packets that the client is sending, which is usually 1500 bytes.
# udp_packet_size = 1500
# # Generic TCP listener # # Generic TCP listener
@ -1272,11 +1324,6 @@
# ## UDP listener will start dropping packets. # ## UDP listener will start dropping packets.
# allowed_pending_messages = 10000 # allowed_pending_messages = 10000
# #
# ## UDP packet size for the server to listen for. This will depend
# ## on the size of the packets that the client is sending, which is
# ## usually 1500 bytes, but can be as large as 65,535 bytes.
# udp_packet_size = 1500
#
# ## Data format to consume. # ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read # ## Each data format has it's own unique set of configuration options, read
# ## more about them here: # ## more about them here:

View File

@ -5,6 +5,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra" _ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
_ "github.com/influxdata/telegraf/plugins/inputs/disque" _ "github.com/influxdata/telegraf/plugins/inputs/disque"
@ -15,6 +16,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/exec"
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
@ -51,6 +53,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd" _ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
_ "github.com/influxdata/telegraf/plugins/inputs/system" _ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/trig" _ "github.com/influxdata/telegraf/plugins/inputs/trig"

View File

@ -0,0 +1,86 @@
# Amazon CloudWatch Statistics Input
This plugin will pull Metric Statistics from Amazon CloudWatch.
### Amazon Authentication
This plugin uses a credential chain for Authentication with the CloudWatch
API endpoint. In the following order the plugin will attempt to authenticate.
1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
### Configuration:
```toml
[[inputs.cloudwatch]]
## Amazon Region (required)
region = 'us-east-1'
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = '1m'
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = '1m'
## Override global run interval (optional - defaults to global interval)
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = '1m'
## Metric Statistic Namespace (required)
namespace = 'AWS/ELB'
## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
[[inputs.cloudwatch.metrics]]
names = ['Latency', 'RequestCount']
## Dimension filters for Metric (optional)
[[inputs.cloudwatch.metrics.dimensions]]
name = 'LoadBalancerName'
value = 'p-example'
```
#### Requirements and Terminology
Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html) and access pattern to allow monitoring of any CloudWatch Metric.
- `region` must be a valid AWS [Region](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchRegions) value
- `period` must be a valid CloudWatch [Period](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchPeriods) value
- `namespace` must be a valid CloudWatch [Namespace](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Namespace) value
- `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names
- `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs
#### Restrictions and Limitations
- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html)
- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/)
### Measurements & Fields:
Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic
Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case)
- cloudwatch_{namespace}
- {metric}_sum (metric Sum value)
- {metric}_average (metric Average value)
- {metric}_minimum (metric Minimum value)
- {metric}_maximum (metric Maximum value)
- {metric}_sample_count (metric SampleCount value)
### Tags:
Each measurement is tagged with the following identifiers to uniquely identify the associated metric
Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case)
- All measurements have the following tags:
- region (CloudWatch Region)
- unit (CloudWatch Metric Unit)
- {dimension-name} (Cloudwatch Dimension value - one for each metric dimension)
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter cloudwatch -test
> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
```

View File

@ -0,0 +1,305 @@
package cloudwatch
import (
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type (
CloudWatch struct {
Region string `toml:"region"`
Period internal.Duration `toml:"period"`
Delay internal.Duration `toml:"delay"`
Namespace string `toml:"namespace"`
Metrics []*Metric `toml:"metrics"`
client cloudwatchClient
metricCache *MetricCache
}
Metric struct {
MetricNames []string `toml:"names"`
Dimensions []*Dimension `toml:"dimensions"`
}
Dimension struct {
Name string `toml:"name"`
Value string `toml:"value"`
}
MetricCache struct {
TTL time.Duration
Fetched time.Time
Metrics []*cloudwatch.Metric
}
cloudwatchClient interface {
ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error)
GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error)
}
)
func (c *CloudWatch) SampleConfig() string {
return `
## Amazon Region
region = 'us-east-1'
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = '1m'
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = '1m'
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = '1m'
## Metric Statistic Namespace (required)
namespace = 'AWS/ELB'
## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
#[[inputs.cloudwatch.metrics]]
# names = ['Latency', 'RequestCount']
#
# ## Dimension filters for Metric (optional)
# [[inputs.cloudwatch.metrics.dimensions]]
# name = 'LoadBalancerName'
# value = 'p-example'
`
}
func (c *CloudWatch) Description() string {
return "Pull Metric Statistics from Amazon CloudWatch"
}
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
if c.client == nil {
c.initializeCloudWatch()
}
var metrics []*cloudwatch.Metric
// check for provided metric filter
if c.Metrics != nil {
metrics = []*cloudwatch.Metric{}
for _, m := range c.Metrics {
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
for k, d := range m.Dimensions {
dimensions[k] = &cloudwatch.Dimension{
Name: aws.String(d.Name),
Value: aws.String(d.Value),
}
}
for _, name := range m.MetricNames {
metrics = append(metrics, &cloudwatch.Metric{
Namespace: aws.String(c.Namespace),
MetricName: aws.String(name),
Dimensions: dimensions,
})
}
}
} else {
var err error
metrics, err = c.fetchNamespaceMetrics()
if err != nil {
return err
}
}
metricCount := len(metrics)
var errChan = make(chan error, metricCount)
now := time.Now()
// limit concurrency or we can easily exhaust user connection limit
semaphore := make(chan byte, 64)
for _, m := range metrics {
semaphore <- 0x1
go c.gatherMetric(acc, m, now, semaphore, errChan)
}
for i := 1; i <= metricCount; i++ {
err := <-errChan
if err != nil {
return err
}
}
return nil
}
func init() {
inputs.Add("cloudwatch", func() telegraf.Input {
return &CloudWatch{}
})
}
/*
* Initialize CloudWatch client
*/
func (c *CloudWatch) initializeCloudWatch() error {
config := &aws.Config{
Region: aws.String(c.Region),
Credentials: credentials.NewChainCredentials(
[]credentials.Provider{
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
}),
}
c.client = cloudwatch.New(session.New(config))
return nil
}
/*
* Fetch available metrics for given CloudWatch Namespace
*/
func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err error) {
if c.metricCache != nil && c.metricCache.IsValid() {
metrics = c.metricCache.Metrics
return
}
metrics = []*cloudwatch.Metric{}
var token *string
for more := true; more; {
params := &cloudwatch.ListMetricsInput{
Namespace: aws.String(c.Namespace),
Dimensions: []*cloudwatch.DimensionFilter{},
NextToken: token,
MetricName: nil,
}
resp, err := c.client.ListMetrics(params)
if err != nil {
return nil, err
}
metrics = append(metrics, resp.Metrics...)
token = resp.NextToken
more = token != nil
}
cacheTTL, _ := time.ParseDuration("1hr")
c.metricCache = &MetricCache{
Metrics: metrics,
Fetched: time.Now(),
TTL: cacheTTL,
}
return
}
/*
* Gather given Metric and emit any error
*/
func (c *CloudWatch) gatherMetric(acc telegraf.Accumulator, metric *cloudwatch.Metric, now time.Time, semaphore chan byte, errChan chan error) {
params := c.getStatisticsInput(metric, now)
resp, err := c.client.GetMetricStatistics(params)
if err != nil {
errChan <- err
<-semaphore
return
}
for _, point := range resp.Datapoints {
tags := map[string]string{
"region": c.Region,
"unit": snakeCase(*point.Unit),
}
for _, d := range metric.Dimensions {
tags[snakeCase(*d.Name)] = *d.Value
}
// record field for each statistic
fields := map[string]interface{}{}
if point.Average != nil {
fields[formatField(*metric.MetricName, cloudwatch.StatisticAverage)] = *point.Average
}
if point.Maximum != nil {
fields[formatField(*metric.MetricName, cloudwatch.StatisticMaximum)] = *point.Maximum
}
if point.Minimum != nil {
fields[formatField(*metric.MetricName, cloudwatch.StatisticMinimum)] = *point.Minimum
}
if point.SampleCount != nil {
fields[formatField(*metric.MetricName, cloudwatch.StatisticSampleCount)] = *point.SampleCount
}
if point.Sum != nil {
fields[formatField(*metric.MetricName, cloudwatch.StatisticSum)] = *point.Sum
}
acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
}
errChan <- nil
<-semaphore
}
/*
* Formatting helpers
*/
func formatField(metricName string, statistic string) string {
return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic))
}
func formatMeasurement(namespace string) string {
namespace = strings.Replace(namespace, "/", "_", -1)
namespace = snakeCase(namespace)
return fmt.Sprintf("cloudwatch_%s", namespace)
}
func snakeCase(s string) string {
s = internal.SnakeCase(s)
s = strings.Replace(s, "__", "_", -1)
return s
}
/*
* Map Metric to *cloudwatch.GetMetricStatisticsInput for given timeframe
*/
func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric, now time.Time) *cloudwatch.GetMetricStatisticsInput {
end := now.Add(-c.Delay.Duration)
input := &cloudwatch.GetMetricStatisticsInput{
StartTime: aws.Time(end.Add(-c.Period.Duration)),
EndTime: aws.Time(end),
MetricName: metric.MetricName,
Namespace: metric.Namespace,
Period: aws.Int64(int64(c.Period.Duration.Seconds())),
Dimensions: metric.Dimensions,
Statistics: []*string{
aws.String(cloudwatch.StatisticAverage),
aws.String(cloudwatch.StatisticMaximum),
aws.String(cloudwatch.StatisticMinimum),
aws.String(cloudwatch.StatisticSum),
aws.String(cloudwatch.StatisticSampleCount)},
}
return input
}
/*
* Check Metric Cache validity
*/
func (c *MetricCache) IsValid() bool {
return c.Metrics != nil && time.Since(c.Fetched) < c.TTL
}

View File

@ -0,0 +1,131 @@
package cloudwatch
import (
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
type mockCloudWatchClient struct{}
func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
metric := &cloudwatch.Metric{
Namespace: params.Namespace,
MetricName: aws.String("Latency"),
Dimensions: []*cloudwatch.Dimension{
&cloudwatch.Dimension{
Name: aws.String("LoadBalancerName"),
Value: aws.String("p-example"),
},
},
}
result := &cloudwatch.ListMetricsOutput{
Metrics: []*cloudwatch.Metric{metric},
}
return result, nil
}
func (m *mockCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
dataPoint := &cloudwatch.Datapoint{
Timestamp: params.EndTime,
Minimum: aws.Float64(0.1),
Maximum: aws.Float64(0.3),
Average: aws.Float64(0.2),
Sum: aws.Float64(123),
SampleCount: aws.Float64(100),
Unit: aws.String("Seconds"),
}
result := &cloudwatch.GetMetricStatisticsOutput{
Label: aws.String("Latency"),
Datapoints: []*cloudwatch.Datapoint{dataPoint},
}
return result, nil
}
func TestGather(t *testing.T) {
duration, _ := time.ParseDuration("1m")
internalDuration := internal.Duration{
Duration: duration,
}
c := &CloudWatch{
Region: "us-east-1",
Namespace: "AWS/ELB",
Delay: internalDuration,
Period: internalDuration,
}
var acc testutil.Accumulator
c.client = &mockCloudWatchClient{}
c.Gather(&acc)
fields := map[string]interface{}{}
fields["latency_minimum"] = 0.1
fields["latency_maximum"] = 0.3
fields["latency_average"] = 0.2
fields["latency_sum"] = 123.0
fields["latency_sample_count"] = 100.0
tags := map[string]string{}
tags["unit"] = "seconds"
tags["region"] = "us-east-1"
tags["load_balancer_name"] = "p-example"
assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb"))
acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags)
}
func TestGenerateStatisticsInputParams(t *testing.T) {
d := &cloudwatch.Dimension{
Name: aws.String("LoadBalancerName"),
Value: aws.String("p-example"),
}
m := &cloudwatch.Metric{
MetricName: aws.String("Latency"),
Dimensions: []*cloudwatch.Dimension{d},
}
duration, _ := time.ParseDuration("1m")
internalDuration := internal.Duration{
Duration: duration,
}
c := &CloudWatch{
Namespace: "AWS/ELB",
Delay: internalDuration,
Period: internalDuration,
}
c.initializeCloudWatch()
now := time.Now()
params := c.getStatisticsInput(m, now)
assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration))
assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration))
assert.Len(t, params.Dimensions, 1)
assert.Len(t, params.Statistics, 5)
assert.EqualValues(t, *params.Period, 60)
}
func TestMetricsCacheTimeout(t *testing.T) {
ttl, _ := time.ParseDuration("5ms")
cache := &MetricCache{
Metrics: []*cloudwatch.Metric{},
Fetched: time.Now(),
TTL: ttl,
}
assert.True(t, cache.IsValid())
time.Sleep(ttl)
assert.False(t, cache.IsValid())
}

View File

@ -5,11 +5,11 @@ docker containers. You can read Docker's documentation for their remote API
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage) [here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
The docker plugin uses the excellent The docker plugin uses the excellent
[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to [docker engine-api](https://github.com/docker/engine-api) library to
gather stats. Documentation for the library can be found gather stats. Documentation for the library can be found
[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation [here](https://godoc.org/github.com/docker/engine-api) and documentation
for the stat structure can be found for the stat structure can be found
[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats) [here](https://godoc.org/github.com/docker/engine-api/types#Stats)
### Configuration: ### Configuration:

View File

@ -3,6 +3,7 @@ package system
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"log" "log"
"regexp" "regexp"
"strconv" "strconv"
@ -10,12 +11,15 @@ import (
"sync" "sync"
"time" "time"
"golang.org/x/net/context"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"github.com/fsouza/go-dockerclient"
) )
// Docker object
type Docker struct { type Docker struct {
Endpoint string Endpoint string
ContainerNames []string ContainerNames []string
@ -23,14 +27,14 @@ type Docker struct {
client DockerClient client DockerClient
} }
// DockerClient interface, useful for testing
type DockerClient interface { type DockerClient interface {
// Docker Client wrapper Info(ctx context.Context) (types.Info, error)
// Useful for test ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
Info() (*docker.Env, error) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error)
ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error)
Stats(opts docker.StatsOptions) error
} }
// KB, MB, GB, TB, PB...human friendly
const ( const (
KB = 1000 KB = 1000
MB = 1000 * KB MB = 1000 * KB
@ -52,28 +56,32 @@ var sampleConfig = `
container_names = [] container_names = []
` `
// Description returns input description
func (d *Docker) Description() string { func (d *Docker) Description() string {
return "Read metrics about docker containers" return "Read metrics about docker containers"
} }
// SampleConfig prints sampleConfig
func (d *Docker) SampleConfig() string { return sampleConfig } func (d *Docker) SampleConfig() string { return sampleConfig }
// Gather starts stats collection
func (d *Docker) Gather(acc telegraf.Accumulator) error { func (d *Docker) Gather(acc telegraf.Accumulator) error {
if d.client == nil { if d.client == nil {
var c *docker.Client var c *client.Client
var err error var err error
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
if d.Endpoint == "ENV" { if d.Endpoint == "ENV" {
c, err = docker.NewClientFromEnv() c, err = client.NewEnvClient()
if err != nil { if err != nil {
return err return err
} }
} else if d.Endpoint == "" { } else if d.Endpoint == "" {
c, err = docker.NewClient("unix:///var/run/docker.sock") c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders)
if err != nil { if err != nil {
return err return err
} }
} else { } else {
c, err = docker.NewClient(d.Endpoint) c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders)
if err != nil { if err != nil {
return err return err
} }
@ -88,8 +96,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
} }
// List containers // List containers
opts := docker.ListContainersOptions{} opts := types.ContainerListOptions{}
containers, err := d.client.ListContainers(opts) containers, err := d.client.ContainerList(context.Background(), opts)
if err != nil { if err != nil {
return err return err
} }
@ -99,7 +107,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
wg.Add(len(containers)) wg.Add(len(containers))
for _, container := range containers { for _, container := range containers {
go func(c docker.APIContainers) { go func(c types.Container) {
defer wg.Done() defer wg.Done()
err := d.gatherContainer(c, acc) err := d.gatherContainer(c, acc)
if err != nil { if err != nil {
@ -114,23 +122,22 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// Init vars // Init vars
var driverStatus [][]string
dataFields := make(map[string]interface{}) dataFields := make(map[string]interface{})
metadataFields := make(map[string]interface{}) metadataFields := make(map[string]interface{})
now := time.Now() now := time.Now()
// Get info from docker daemon // Get info from docker daemon
info, err := d.client.Info() info, err := d.client.Info(context.Background())
if err != nil { if err != nil {
return err return err
} }
fields := map[string]interface{}{ fields := map[string]interface{}{
"n_cpus": info.GetInt64("NCPU"), "n_cpus": info.NCPU,
"n_used_file_descriptors": info.GetInt64("NFd"), "n_used_file_descriptors": info.NFd,
"n_containers": info.GetInt64("Containers"), "n_containers": info.Containers,
"n_images": info.GetInt64("Images"), "n_images": info.Images,
"n_goroutines": info.GetInt64("NGoroutines"), "n_goroutines": info.NGoroutines,
"n_listener_events": info.GetInt64("NEventsListener"), "n_listener_events": info.NEventsListener,
} }
// Add metrics // Add metrics
acc.AddFields("docker", acc.AddFields("docker",
@ -138,13 +145,11 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
nil, nil,
now) now)
acc.AddFields("docker", acc.AddFields("docker",
map[string]interface{}{"memory_total": info.GetInt64("MemTotal")}, map[string]interface{}{"memory_total": info.MemTotal},
map[string]string{"unit": "bytes"}, map[string]string{"unit": "bytes"},
now) now)
// Get storage metrics // Get storage metrics
driverStatusRaw := []byte(info.Get("DriverStatus")) for _, rawData := range info.DriverStatus {
json.Unmarshal(driverStatusRaw, &driverStatus)
for _, rawData := range driverStatus {
// Try to convert string to int (bytes) // Try to convert string to int (bytes)
value, err := parseSize(rawData[1]) value, err := parseSize(rawData[1])
if err != nil { if err != nil {
@ -159,12 +164,12 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
now) now)
} else if strings.HasPrefix(name, "data_space_") { } else if strings.HasPrefix(name, "data_space_") {
// data space // data space
field_name := strings.TrimPrefix(name, "data_space_") fieldName := strings.TrimPrefix(name, "data_space_")
dataFields[field_name] = value dataFields[fieldName] = value
} else if strings.HasPrefix(name, "metadata_space_") { } else if strings.HasPrefix(name, "metadata_space_") {
// metadata space // metadata space
field_name := strings.TrimPrefix(name, "metadata_space_") fieldName := strings.TrimPrefix(name, "metadata_space_")
metadataFields[field_name] = value metadataFields[fieldName] = value
} }
} }
if len(dataFields) > 0 { if len(dataFields) > 0 {
@ -183,9 +188,10 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
} }
func (d *Docker) gatherContainer( func (d *Docker) gatherContainer(
container docker.APIContainers, container types.Container,
acc telegraf.Accumulator, acc telegraf.Accumulator,
) error { ) error {
var v *types.StatsJSON
// Parse container name // Parse container name
cname := "unknown" cname := "unknown"
if len(container.Names) > 0 { if len(container.Names) > 0 {
@ -204,28 +210,14 @@ func (d *Docker) gatherContainer(
} }
} }
statChan := make(chan *docker.Stats) r, err := d.client.ContainerStats(context.Background(), container.ID, false)
done := make(chan bool)
statOpts := docker.StatsOptions{
Stream: false,
ID: container.ID,
Stats: statChan,
Done: done,
Timeout: time.Duration(time.Second * 5),
}
go func() {
err := d.client.Stats(statOpts)
if err != nil { if err != nil {
log.Printf("Error getting docker stats: %s\n", err.Error()) log.Printf("Error getting docker stats: %s\n", err.Error())
} }
}() defer r.Close()
dec := json.NewDecoder(r)
stat := <-statChan if err = dec.Decode(&v); err != nil {
close(done) log.Printf("Error decoding: %s\n", err.Error())
if stat == nil {
return nil
} }
// Add labels to tags // Add labels to tags
@ -233,13 +225,13 @@ func (d *Docker) gatherContainer(
tags[k] = v tags[k] = v
} }
gatherContainerStats(stat, acc, tags) gatherContainerStats(v, acc, tags)
return nil return nil
} }
func gatherContainerStats( func gatherContainerStats(
stat *docker.Stats, stat *types.StatsJSON,
acc telegraf.Accumulator, acc telegraf.Accumulator,
tags map[string]string, tags map[string]string,
) { ) {
@ -250,35 +242,35 @@ func gatherContainerStats(
"usage": stat.MemoryStats.Usage, "usage": stat.MemoryStats.Usage,
"fail_count": stat.MemoryStats.Failcnt, "fail_count": stat.MemoryStats.Failcnt,
"limit": stat.MemoryStats.Limit, "limit": stat.MemoryStats.Limit,
"total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault, "total_pgmafault": stat.MemoryStats.Stats["total_pgmajfault"],
"cache": stat.MemoryStats.Stats.Cache, "cache": stat.MemoryStats.Stats["cache"],
"mapped_file": stat.MemoryStats.Stats.MappedFile, "mapped_file": stat.MemoryStats.Stats["mapped_file"],
"total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile, "total_inactive_file": stat.MemoryStats.Stats["total_inactive_file"],
"pgpgout": stat.MemoryStats.Stats.Pgpgout, "pgpgout": stat.MemoryStats.Stats["pagpgout"],
"rss": stat.MemoryStats.Stats.Rss, "rss": stat.MemoryStats.Stats["rss"],
"total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile, "total_mapped_file": stat.MemoryStats.Stats["total_mapped_file"],
"writeback": stat.MemoryStats.Stats.Writeback, "writeback": stat.MemoryStats.Stats["writeback"],
"unevictable": stat.MemoryStats.Stats.Unevictable, "unevictable": stat.MemoryStats.Stats["unevictable"],
"pgpgin": stat.MemoryStats.Stats.Pgpgin, "pgpgin": stat.MemoryStats.Stats["pgpgin"],
"total_unevictable": stat.MemoryStats.Stats.TotalUnevictable, "total_unevictable": stat.MemoryStats.Stats["total_unevictable"],
"pgmajfault": stat.MemoryStats.Stats.Pgmajfault, "pgmajfault": stat.MemoryStats.Stats["pgmajfault"],
"total_rss": stat.MemoryStats.Stats.TotalRss, "total_rss": stat.MemoryStats.Stats["total_rss"],
"total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge, "total_rss_huge": stat.MemoryStats.Stats["total_rss_huge"],
"total_writeback": stat.MemoryStats.Stats.TotalWriteback, "total_writeback": stat.MemoryStats.Stats["total_write_back"],
"total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon, "total_inactive_anon": stat.MemoryStats.Stats["total_inactive_anon"],
"rss_huge": stat.MemoryStats.Stats.RssHuge, "rss_huge": stat.MemoryStats.Stats["rss_huge"],
"hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit, "hierarchical_memory_limit": stat.MemoryStats.Stats["hierarchical_memory_limit"],
"total_pgfault": stat.MemoryStats.Stats.TotalPgfault, "total_pgfault": stat.MemoryStats.Stats["total_pgfault"],
"total_active_file": stat.MemoryStats.Stats.TotalActiveFile, "total_active_file": stat.MemoryStats.Stats["total_active_file"],
"active_anon": stat.MemoryStats.Stats.ActiveAnon, "active_anon": stat.MemoryStats.Stats["active_anon"],
"total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon, "total_active_anon": stat.MemoryStats.Stats["total_active_anon"],
"total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout, "total_pgpgout": stat.MemoryStats.Stats["total_pgpgout"],
"total_cache": stat.MemoryStats.Stats.TotalCache, "total_cache": stat.MemoryStats.Stats["total_cache"],
"inactive_anon": stat.MemoryStats.Stats.InactiveAnon, "inactive_anon": stat.MemoryStats.Stats["inactive_anon"],
"active_file": stat.MemoryStats.Stats.ActiveFile, "active_file": stat.MemoryStats.Stats["active_file"],
"pgfault": stat.MemoryStats.Stats.Pgfault, "pgfault": stat.MemoryStats.Stats["pgfault"],
"inactive_file": stat.MemoryStats.Stats.InactiveFile, "inactive_file": stat.MemoryStats.Stats["inactive_file"],
"total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin, "total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
"usage_percent": calculateMemPercent(stat), "usage_percent": calculateMemPercent(stat),
} }
acc.AddFields("docker_mem", memfields, tags, now) acc.AddFields("docker_mem", memfields, tags, now)
@ -287,7 +279,7 @@ func gatherContainerStats(
"usage_total": stat.CPUStats.CPUUsage.TotalUsage, "usage_total": stat.CPUStats.CPUUsage.TotalUsage,
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
"usage_system": stat.CPUStats.SystemCPUUsage, "usage_system": stat.CPUStats.SystemUsage,
"throttling_periods": stat.CPUStats.ThrottlingData.Periods, "throttling_periods": stat.CPUStats.ThrottlingData.Periods,
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
@ -323,7 +315,7 @@ func gatherContainerStats(
gatherBlockIOMetrics(stat, acc, tags, now) gatherBlockIOMetrics(stat, acc, tags, now)
} }
func calculateMemPercent(stat *docker.Stats) float64 { func calculateMemPercent(stat *types.StatsJSON) float64 {
var memPercent = 0.0 var memPercent = 0.0
if stat.MemoryStats.Limit > 0 { if stat.MemoryStats.Limit > 0 {
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0 memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
@ -331,11 +323,11 @@ func calculateMemPercent(stat *docker.Stats) float64 {
return memPercent return memPercent
} }
func calculateCPUPercent(stat *docker.Stats) float64 { func calculateCPUPercent(stat *types.StatsJSON) float64 {
var cpuPercent = 0.0 var cpuPercent = 0.0
// calculate the change for the cpu and system usage of the container in between readings // calculate the change for the cpu and system usage of the container in between readings
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage) cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
systemDelta := float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage) systemDelta := float64(stat.CPUStats.SystemUsage) - float64(stat.PreCPUStats.SystemUsage)
if systemDelta > 0.0 && cpuDelta > 0.0 { if systemDelta > 0.0 && cpuDelta > 0.0 {
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0 cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
@ -344,7 +336,7 @@ func calculateCPUPercent(stat *docker.Stats) float64 {
} }
func gatherBlockIOMetrics( func gatherBlockIOMetrics(
stat *docker.Stats, stat *types.StatsJSON,
acc telegraf.Accumulator, acc telegraf.Accumulator,
tags map[string]string, tags map[string]string,
now time.Time, now time.Time,
@ -353,7 +345,7 @@ func gatherBlockIOMetrics(
// Make a map of devices to their block io stats // Make a map of devices to their block io stats
deviceStatMap := make(map[string]map[string]interface{}) deviceStatMap := make(map[string]map[string]interface{})
for _, metric := range blkioStats.IOServiceBytesRecursive { for _, metric := range blkioStats.IoServiceBytesRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
_, ok := deviceStatMap[device] _, ok := deviceStatMap[device]
if !ok { if !ok {
@ -364,7 +356,7 @@ func gatherBlockIOMetrics(
deviceStatMap[device][field] = metric.Value deviceStatMap[device][field] = metric.Value
} }
for _, metric := range blkioStats.IOServicedRecursive { for _, metric := range blkioStats.IoServicedRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
_, ok := deviceStatMap[device] _, ok := deviceStatMap[device]
if !ok { if !ok {
@ -375,40 +367,38 @@ func gatherBlockIOMetrics(
deviceStatMap[device][field] = metric.Value deviceStatMap[device][field] = metric.Value
} }
for _, metric := range blkioStats.IOQueueRecursive { for _, metric := range blkioStats.IoQueuedRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op)) field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value deviceStatMap[device][field] = metric.Value
} }
for _, metric := range blkioStats.IOServiceTimeRecursive { for _, metric := range blkioStats.IoServiceTimeRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op)) field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value deviceStatMap[device][field] = metric.Value
} }
for _, metric := range blkioStats.IOWaitTimeRecursive { for _, metric := range blkioStats.IoWaitTimeRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op)) field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value deviceStatMap[device][field] = metric.Value
} }
for _, metric := range blkioStats.IOMergedRecursive { for _, metric := range blkioStats.IoMergedRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op)) field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
deviceStatMap[device][field] = metric.Value deviceStatMap[device][field] = metric.Value
} }
for _, metric := range blkioStats.IOTimeRecursive { for _, metric := range blkioStats.IoTimeRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op)) deviceStatMap[device]["io_time_recursive"] = metric.Value
deviceStatMap[device][field] = metric.Value
} }
for _, metric := range blkioStats.SectorsRecursive { for _, metric := range blkioStats.SectorsRecursive {
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op)) deviceStatMap[device]["sectors_recursive"] = metric.Value
deviceStatMap[device][field] = metric.Value
} }
for device, fields := range deviceStatMap { for device, fields := range deviceStatMap {

View File

@ -1,13 +1,18 @@
package system package system
import ( import (
"encoding/json" "io"
"io/ioutil"
"strings"
"testing" "testing"
"time" "time"
"golang.org/x/net/context"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/registry"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/fsouza/go-dockerclient"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -114,58 +119,58 @@ func TestDockerGatherContainerStats(t *testing.T) {
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags) acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
} }
func testStats() *docker.Stats { func testStats() *types.StatsJSON {
stats := &docker.Stats{ stats := &types.StatsJSON{}
Read: time.Now(), stats.Read = time.Now()
Networks: make(map[string]docker.NetworkStats), stats.Networks = make(map[string]types.NetworkStats)
}
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002} stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
stats.CPUStats.CPUUsage.UsageInUsermode = 100 stats.CPUStats.CPUUsage.UsageInUsermode = 100
stats.CPUStats.CPUUsage.TotalUsage = 500 stats.CPUStats.CPUUsage.TotalUsage = 500
stats.CPUStats.CPUUsage.UsageInKernelmode = 200 stats.CPUStats.CPUUsage.UsageInKernelmode = 200
stats.CPUStats.SystemCPUUsage = 100 stats.CPUStats.SystemUsage = 100
stats.CPUStats.ThrottlingData.Periods = 1 stats.CPUStats.ThrottlingData.Periods = 1
stats.PreCPUStats.CPUUsage.TotalUsage = 400 stats.PreCPUStats.CPUUsage.TotalUsage = 400
stats.PreCPUStats.SystemCPUUsage = 50 stats.PreCPUStats.SystemUsage = 50
stats.MemoryStats.Stats.TotalPgmafault = 0 stats.MemoryStats.Stats = make(map[string]uint64)
stats.MemoryStats.Stats.Cache = 0 stats.MemoryStats.Stats["total_pgmajfault"] = 0
stats.MemoryStats.Stats.MappedFile = 0 stats.MemoryStats.Stats["cache"] = 0
stats.MemoryStats.Stats.TotalInactiveFile = 0 stats.MemoryStats.Stats["mapped_file"] = 0
stats.MemoryStats.Stats.Pgpgout = 0 stats.MemoryStats.Stats["total_inactive_file"] = 0
stats.MemoryStats.Stats.Rss = 0 stats.MemoryStats.Stats["pagpgout"] = 0
stats.MemoryStats.Stats.TotalMappedFile = 0 stats.MemoryStats.Stats["rss"] = 0
stats.MemoryStats.Stats.Writeback = 0 stats.MemoryStats.Stats["total_mapped_file"] = 0
stats.MemoryStats.Stats.Unevictable = 0 stats.MemoryStats.Stats["writeback"] = 0
stats.MemoryStats.Stats.Pgpgin = 0 stats.MemoryStats.Stats["unevictable"] = 0
stats.MemoryStats.Stats.TotalUnevictable = 0 stats.MemoryStats.Stats["pgpgin"] = 0
stats.MemoryStats.Stats.Pgmajfault = 0 stats.MemoryStats.Stats["total_unevictable"] = 0
stats.MemoryStats.Stats.TotalRss = 44 stats.MemoryStats.Stats["pgmajfault"] = 0
stats.MemoryStats.Stats.TotalRssHuge = 444 stats.MemoryStats.Stats["total_rss"] = 44
stats.MemoryStats.Stats.TotalWriteback = 55 stats.MemoryStats.Stats["total_rss_huge"] = 444
stats.MemoryStats.Stats.TotalInactiveAnon = 0 stats.MemoryStats.Stats["total_write_back"] = 55
stats.MemoryStats.Stats.RssHuge = 0 stats.MemoryStats.Stats["total_inactive_anon"] = 0
stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0 stats.MemoryStats.Stats["rss_huge"] = 0
stats.MemoryStats.Stats.TotalPgfault = 0 stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
stats.MemoryStats.Stats.TotalActiveFile = 0 stats.MemoryStats.Stats["total_pgfault"] = 0
stats.MemoryStats.Stats.ActiveAnon = 0 stats.MemoryStats.Stats["total_active_file"] = 0
stats.MemoryStats.Stats.TotalActiveAnon = 0 stats.MemoryStats.Stats["active_anon"] = 0
stats.MemoryStats.Stats.TotalPgpgout = 0 stats.MemoryStats.Stats["total_active_anon"] = 0
stats.MemoryStats.Stats.TotalCache = 0 stats.MemoryStats.Stats["total_pgpgout"] = 0
stats.MemoryStats.Stats.InactiveAnon = 0 stats.MemoryStats.Stats["total_cache"] = 0
stats.MemoryStats.Stats.ActiveFile = 1 stats.MemoryStats.Stats["inactive_anon"] = 0
stats.MemoryStats.Stats.Pgfault = 2 stats.MemoryStats.Stats["active_file"] = 1
stats.MemoryStats.Stats.InactiveFile = 3 stats.MemoryStats.Stats["pgfault"] = 2
stats.MemoryStats.Stats.TotalPgpgin = 4 stats.MemoryStats.Stats["inactive_file"] = 3
stats.MemoryStats.Stats["total_pgpgin"] = 4
stats.MemoryStats.MaxUsage = 1001 stats.MemoryStats.MaxUsage = 1001
stats.MemoryStats.Usage = 1111 stats.MemoryStats.Usage = 1111
stats.MemoryStats.Failcnt = 1 stats.MemoryStats.Failcnt = 1
stats.MemoryStats.Limit = 2000 stats.MemoryStats.Limit = 2000
stats.Networks["eth0"] = docker.NetworkStats{ stats.Networks["eth0"] = types.NetworkStats{
RxDropped: 1, RxDropped: 1,
RxBytes: 2, RxBytes: 2,
RxErrors: 3, RxErrors: 3,
@ -176,23 +181,23 @@ func testStats() *docker.Stats {
TxBytes: 4, TxBytes: 4,
} }
sbr := docker.BlkioStatsEntry{ sbr := types.BlkioStatEntry{
Major: 6, Major: 6,
Minor: 0, Minor: 0,
Op: "read", Op: "read",
Value: 100, Value: 100,
} }
sr := docker.BlkioStatsEntry{ sr := types.BlkioStatEntry{
Major: 6, Major: 6,
Minor: 0, Minor: 0,
Op: "write", Op: "write",
Value: 101, Value: 101,
} }
stats.BlkioStats.IOServiceBytesRecursive = append( stats.BlkioStats.IoServiceBytesRecursive = append(
stats.BlkioStats.IOServiceBytesRecursive, sbr) stats.BlkioStats.IoServiceBytesRecursive, sbr)
stats.BlkioStats.IOServicedRecursive = append( stats.BlkioStats.IoServicedRecursive = append(
stats.BlkioStats.IOServicedRecursive, sr) stats.BlkioStats.IoServicedRecursive, sr)
return stats return stats
} }
@ -200,35 +205,78 @@ func testStats() *docker.Stats {
type FakeDockerClient struct { type FakeDockerClient struct {
} }
func (d FakeDockerClient) Info() (*docker.Env, error) { func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
env := docker.Env{"Containers=108", "OomKillDisable=false", "SystemTime=2016-02-24T00:55:09.15073105-05:00", "NEventsListener=0", "ID=5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD", "Debug=false", "LoggingDriver=json-file", "KernelVersion=4.3.0-1-amd64", "IndexServerAddress=https://index.docker.io/v1/", "MemTotal=3840757760", "Images=199", "CpuCfsQuota=true", "Name=absol", "SwapLimit=false", "IPv4Forwarding=true", "ExecutionDriver=native-0.2", "InitSha1=23a51f3c916d2b5a3bbb31caf301fd2d14edd518", "ExperimentalBuild=false", "CpuCfsPeriod=true", "RegistryConfig={\"IndexConfigs\":{\"docker.io\":{\"Mirrors\":null,\"Name\":\"docker.io\",\"Official\":true,\"Secure\":true}},\"InsecureRegistryCIDRs\":[\"127.0.0.0/8\"],\"Mirrors\":null}", "OperatingSystem=Linux Mint LMDE (containerized)", "BridgeNfIptables=true", "HttpsProxy=", "Labels=null", "MemoryLimit=false", "DriverStatus=[[\"Pool Name\",\"docker-8:1-1182287-pool\"],[\"Pool Blocksize\",\"65.54 kB\"],[\"Backing Filesystem\",\"extfs\"],[\"Data file\",\"/dev/loop0\"],[\"Metadata file\",\"/dev/loop1\"],[\"Data Space Used\",\"17.3 GB\"],[\"Data Space Total\",\"107.4 GB\"],[\"Data Space Available\",\"36.53 GB\"],[\"Metadata Space Used\",\"20.97 MB\"],[\"Metadata Space Total\",\"2.147 GB\"],[\"Metadata Space Available\",\"2.127 GB\"],[\"Udev Sync Supported\",\"true\"],[\"Deferred Removal Enabled\",\"false\"],[\"Data loop file\",\"/var/lib/docker/devicemapper/devicemapper/data\"],[\"Metadata loop file\",\"/var/lib/docker/devicemapper/devicemapper/metadata\"],[\"Library Version\",\"1.02.115 (2016-01-25)\"]]", "NFd=19", "HttpProxy=", "Driver=devicemapper", "NGoroutines=39", "InitPath=/usr/lib/docker.io/dockerinit", "NCPU=4", "DockerRootDir=/var/lib/docker", "NoProxy=", "BridgeNfIp6tables=true"} env := types.Info{
return &env, nil Containers: 108,
OomKillDisable: false,
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
NEventsListener: 0,
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
Debug: false,
LoggingDriver: "json-file",
KernelVersion: "4.3.0-1-amd64",
IndexServerAddress: "https://index.docker.io/v1/",
MemTotal: 3840757760,
Images: 199,
CPUCfsQuota: true,
Name: "absol",
SwapLimit: false,
IPv4Forwarding: true,
ExecutionDriver: "native-0.2",
ExperimentalBuild: false,
CPUCfsPeriod: true,
RegistryConfig: &registry.ServiceConfig{
IndexConfigs: map[string]*registry.IndexInfo{
"docker.io": {
Name: "docker.io",
Mirrors: []string{},
Official: true,
Secure: true,
},
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
OperatingSystem: "Linux Mint LMDE (containerized)",
BridgeNfIptables: true,
HTTPSProxy: "",
Labels: []string{},
MemoryLimit: false,
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
NFd: 19,
HTTPProxy: "",
Driver: "devicemapper",
NGoroutines: 39,
NCPU: 4,
DockerRootDir: "/var/lib/docker",
NoProxy: "",
BridgeNfIP6tables: true,
}
return env, nil
} }
func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]docker.APIContainers, error) { func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
container1 := docker.APIContainers{ container1 := types.Container{
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
Names: []string{"/etcd"},
Image: "quay.io/coreos/etcd:v2.2.2", Image: "quay.io/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379", Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941930, Created: 1455941930,
Status: "Up 4 hours", Status: "Up 4 hours",
Ports: []docker.APIPort{ Ports: []types.Port{
docker.APIPort{ types.Port{
PrivatePort: 7001, PrivatePort: 7001,
PublicPort: 0, PublicPort: 0,
Type: "tcp", Type: "tcp",
}, },
docker.APIPort{ types.Port{
PrivatePort: 4001, PrivatePort: 4001,
PublicPort: 0, PublicPort: 0,
Type: "tcp", Type: "tcp",
}, },
docker.APIPort{ types.Port{
PrivatePort: 2380, PrivatePort: 2380,
PublicPort: 0, PublicPort: 0,
Type: "tcp", Type: "tcp",
}, },
docker.APIPort{ types.Port{
PrivatePort: 2379, PrivatePort: 2379,
PublicPort: 2379, PublicPort: 2379,
Type: "tcp", Type: "tcp",
@ -237,31 +285,31 @@ func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]d
}, },
SizeRw: 0, SizeRw: 0,
SizeRootFs: 0, SizeRootFs: 0,
Names: []string{"/etcd"},
} }
container2 := docker.APIContainers{ container2 := types.Container{
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
Names: []string{"/etcd2"},
Image: "quay.io/coreos/etcd:v2.2.2", Image: "quay.io/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379", Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941933, Created: 1455941933,
Status: "Up 4 hours", Status: "Up 4 hours",
Ports: []docker.APIPort{ Ports: []types.Port{
docker.APIPort{ types.Port{
PrivatePort: 7002, PrivatePort: 7002,
PublicPort: 0, PublicPort: 0,
Type: "tcp", Type: "tcp",
}, },
docker.APIPort{ types.Port{
PrivatePort: 4002, PrivatePort: 4002,
PublicPort: 0, PublicPort: 0,
Type: "tcp", Type: "tcp",
}, },
docker.APIPort{ types.Port{
PrivatePort: 2381, PrivatePort: 2381,
PublicPort: 0, PublicPort: 0,
Type: "tcp", Type: "tcp",
}, },
docker.APIPort{ types.Port{
PrivatePort: 2382, PrivatePort: 2382,
PublicPort: 2382, PublicPort: 2382,
Type: "tcp", Type: "tcp",
@ -270,21 +318,19 @@ func (d FakeDockerClient) ListContainers(opts docker.ListContainersOptions) ([]d
}, },
SizeRw: 0, SizeRw: 0,
SizeRootFs: 0, SizeRootFs: 0,
Names: []string{"/etcd2"},
} }
containers := []docker.APIContainers{container1, container2} containers := []types.Container{container1, container2}
return containers, nil return containers, nil
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s //#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
} }
func (d FakeDockerClient) Stats(opts docker.StatsOptions) error { func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
var stat io.ReadCloser
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}` jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
var stat docker.Stats stat = ioutil.NopCloser(strings.NewReader(jsonStat))
json.Unmarshal([]byte(jsonStat), &stat) return stat, nil
opts.Stats <- &stat
return nil
} }
func TestDockerGatherInfo(t *testing.T) { func TestDockerGatherInfo(t *testing.T) {
@ -299,12 +345,12 @@ func TestDockerGatherInfo(t *testing.T) {
acc.AssertContainsTaggedFields(t, acc.AssertContainsTaggedFields(t,
"docker", "docker",
map[string]interface{}{ map[string]interface{}{
"n_listener_events": int64(0), "n_listener_events": int(0),
"n_cpus": int64(4), "n_cpus": int(4),
"n_used_file_descriptors": int64(19), "n_used_file_descriptors": int(19),
"n_containers": int64(108), "n_containers": int(108),
"n_images": int64(199), "n_images": int(199),
"n_goroutines": int64(39), "n_goroutines": int(39),
}, },
map[string]string{}, map[string]string{},
) )

View File

@ -85,7 +85,7 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[s
// Extend connection // Extend connection
c.SetDeadline(time.Now().Add(defaultTimeout)) c.SetDeadline(time.Now().Add(defaultTimeout))
c.Write([]byte("EXPORT\tdomain\n\n")) c.Write([]byte("EXPORT\tdomain\n"))
var buf bytes.Buffer var buf bytes.Buffer
io.Copy(&buf, c) io.Copy(&buf, c)
// buf := bufio.NewReader(c) // buf := bufio.NewReader(c)

View File

@ -2,13 +2,6 @@
Please also see: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) Please also see: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md)
The exec input plugin can execute arbitrary commands which output:
* JSON [javascript object notation](http://www.json.org/)
* InfluxDB [line-protocol](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/)
* Graphite [graphite-protocol](http://graphite.readthedocs.org/en/latest/feeding-carbon.html)
### Example 1 - JSON ### Example 1 - JSON
#### Configuration #### Configuration
@ -22,7 +15,7 @@ are configured for ```[[inputs.exec]]``` in JSON format.
# Shell/commands array # Shell/commands array
commands = ["/tmp/test.sh", "/tmp/test2.sh"] commands = ["/tmp/test.sh", "/tmp/test2.sh"]
# Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) # Data format to consume.
# NOTE json only reads numerical measurements, strings and booleans are ignored. # NOTE json only reads numerical measurements, strings and booleans are ignored.
data_format = "json" data_format = "json"
@ -94,7 +87,7 @@ in influx line-protocol format.
# command = "/usr/bin/line_protocol_collector" # command = "/usr/bin/line_protocol_collector"
commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"] commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"]
# Data format to consume. This can be "json" or "influx" (line-protocol) # Data format to consume.
# NOTE json only reads numerical measurements, strings and booleans are ignored. # NOTE json only reads numerical measurements, strings and booleans are ignored.
data_format = "influx" data_format = "influx"
``` ```
@ -121,8 +114,8 @@ Each line must end in \n, just as the Influx line protocol does.
We can also change the data_format to "graphite" to use the metrics collecting scripts such as (compatible with graphite): We can also change the data_format to "graphite" to use the metrics collecting scripts such as (compatible with graphite):
* Nagios [Mertics Plugins] (https://exchange.nagios.org/directory/Plugins) * Nagios [Metrics Plugins](https://exchange.nagios.org/directory/Plugins)
* Sensu [Mertics Plugins] (https://github.com/sensu-plugins) * Sensu [Metrics Plugins](https://github.com/sensu-plugins)
In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format. In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format.
@ -133,7 +126,7 @@ In this example a script called /tmp/test.sh and a script called /tmp/test2.sh a
# Shell/commands array # Shell/commands array
commands = ["/tmp/test.sh","/tmp/test2.sh"] commands = ["/tmp/test.sh","/tmp/test2.sh"]
# Data format to consume. This can be "json", "influx" or "graphite" (line-protocol) # Data format to consume.
# NOTE json only reads numerical measurements, strings and booleans are ignored. # NOTE json only reads numerical measurements, strings and booleans are ignored.
data_format = "graphite" data_format = "graphite"
@ -186,5 +179,5 @@ sensu.metric.net.server0.eth0.rx_dropped 0 1444234982
The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines. The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines.
More detail information about templates, please refer to [The graphite Input] (https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md) More detail information about templates, please refer to [The graphite Input](https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md)

View File

@ -0,0 +1,44 @@
# Example Input Plugin
This input plugin will test HTTP/HTTPS connections.
### Configuration:
```
# List of UDP/TCP connections you want to check
[[inputs.http_response]]
## Server address (default http://localhost)
address = "http://github.com"
## Set response_timeout (default 5 seconds)
response_timeout = 5
## HTTP Request Method
method = "GET"
## HTTP Request Headers
[inputs.http_response.headers]
Host = github.com
## Whether to follow redirects from the server (defaults to false)
follow_redirects = true
## Optional HTTP Request Body
body = '''
{'fake':'data'}
'''
```
### Measurements & Fields:
- http_response
- response_time (float, seconds)
- http_response_code (int) #The code received
### Tags:
- All measurements have the following tags:
- server
- method
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter http_response -test
http_response,method=GET,server=http://www.github.com http_response_code=200i,response_time=6.223266528 1459419354977857955
```

View File

@ -0,0 +1,154 @@
package http_response
import (
"errors"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// HTTPResponse struct
type HTTPResponse struct {
Address string
Body string
Method string
ResponseTimeout int
Headers map[string]string
FollowRedirects bool
}
// Description returns the plugin Description
func (h *HTTPResponse) Description() string {
return "HTTP/HTTPS request given an address a method and a timeout"
}
var sampleConfig = `
## Server address (default http://localhost)
address = "http://github.com"
## Set response_timeout (default 5 seconds)
response_timeout = 5
## HTTP Request Method
method = "GET"
## Whether to follow redirects from the server (defaults to false)
follow_redirects = true
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
## Optional HTTP Request Body
# body = '''
# {'fake':'data'}
# '''
`
// SampleConfig returns the plugin SampleConfig
func (h *HTTPResponse) SampleConfig() string {
return sampleConfig
}
// ErrRedirectAttempted indicates that a redirect occurred
var ErrRedirectAttempted = errors.New("redirect")
// CreateHttpClient creates an http client which will timeout at the specified
// timeout period and can follow redirects if specified
func CreateHttpClient(followRedirects bool, ResponseTimeout time.Duration) *http.Client {
client := &http.Client{
Timeout: time.Second * ResponseTimeout,
}
if followRedirects == false {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return ErrRedirectAttempted
}
}
return client
}
// CreateHeaders takes a map of header strings and puts them
// into a http.Header Object
func CreateHeaders(headers map[string]string) http.Header {
httpHeaders := make(http.Header)
for key := range headers {
httpHeaders.Add(key, headers[key])
}
return httpHeaders
}
// HTTPGather gathers all fields and returns any errors it encounters
func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) {
// Prepare fields
fields := make(map[string]interface{})
client := CreateHttpClient(h.FollowRedirects, time.Duration(h.ResponseTimeout))
var body io.Reader
if h.Body != "" {
body = strings.NewReader(h.Body)
}
request, err := http.NewRequest(h.Method, h.Address, body)
if err != nil {
return nil, err
}
request.Header = CreateHeaders(h.Headers)
// Start Timer
start := time.Now()
resp, err := client.Do(request)
if err != nil {
if h.FollowRedirects {
return nil, err
}
if urlError, ok := err.(*url.Error); ok &&
urlError.Err == ErrRedirectAttempted {
err = nil
} else {
return nil, err
}
}
fields["response_time"] = time.Since(start).Seconds()
fields["http_response_code"] = resp.StatusCode
return fields, nil
}
// Gather gets all metric fields and tags and returns any errors it encounters
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
// Set default values
if h.ResponseTimeout < 1 {
h.ResponseTimeout = 5
}
// Check send and expected string
if h.Method == "" {
h.Method = "GET"
}
if h.Address == "" {
h.Address = "http://localhost"
}
addr, err := url.Parse(h.Address)
if err != nil {
return err
}
if addr.Scheme != "http" && addr.Scheme != "https" {
return errors.New("Only http and https are supported")
}
// Prepare data
tags := map[string]string{"server": h.Address, "method": h.Method}
var fields map[string]interface{}
// Gather data
fields, err = h.HTTPGather()
if err != nil {
return err
}
// Add metrics
acc.AddFields("http_response", fields, tags)
return nil
}
func init() {
inputs.Add("http_response", func() telegraf.Input {
return &HTTPResponse{}
})
}

View File

@ -0,0 +1,241 @@
package http_response
import (
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestCreateHeaders(t *testing.T) {
fakeHeaders := map[string]string{
"Accept": "text/plain",
"Content-Type": "application/json",
"Cache-Control": "no-cache",
}
headers := CreateHeaders(fakeHeaders)
testHeaders := make(http.Header)
testHeaders.Add("Accept", "text/plain")
testHeaders.Add("Content-Type", "application/json")
testHeaders.Add("Cache-Control", "no-cache")
assert.Equal(t, testHeaders, headers)
}
func setUpTestMux() http.Handler {
mux := http.NewServeMux()
mux.HandleFunc("/redirect", func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/good", http.StatusMovedPermanently)
})
mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "hit the good page!")
})
mux.HandleFunc("/badredirect", func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/badredirect", http.StatusMovedPermanently)
})
mux.HandleFunc("/mustbepostmethod", func(w http.ResponseWriter, req *http.Request) {
if req.Method != "POST" {
http.Error(w, "method wasn't post", http.StatusMethodNotAllowed)
return
}
fmt.Fprintf(w, "used post correctly!")
})
mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) {
body, err := ioutil.ReadAll(req.Body)
req.Body.Close()
if err != nil {
http.Error(w, "couldn't read request body", http.StatusBadRequest)
return
}
if string(body) == "" {
http.Error(w, "body was empty", http.StatusBadRequest)
return
}
fmt.Fprintf(w, "sent a body!")
})
mux.HandleFunc("/twosecondnap", func(w http.ResponseWriter, req *http.Request) {
time.Sleep(time.Second * 2)
return
})
return mux
}
func TestFields(t *testing.T) {
mux := setUpTestMux()
ts := httptest.NewServer(mux)
defer ts.Close()
h := &HTTPResponse{
Address: ts.URL + "/good",
Body: "{ 'test': 'data'}",
Method: "GET",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err := h.HTTPGather()
require.NoError(t, err)
assert.NotEmpty(t, fields)
if assert.NotNil(t, fields["http_response_code"]) {
assert.Equal(t, http.StatusOK, fields["http_response_code"])
}
assert.NotNil(t, fields["response_time"])
}
func TestRedirects(t *testing.T) {
mux := setUpTestMux()
ts := httptest.NewServer(mux)
defer ts.Close()
h := &HTTPResponse{
Address: ts.URL + "/redirect",
Body: "{ 'test': 'data'}",
Method: "GET",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err := h.HTTPGather()
require.NoError(t, err)
assert.NotEmpty(t, fields)
if assert.NotNil(t, fields["http_response_code"]) {
assert.Equal(t, http.StatusOK, fields["http_response_code"])
}
h = &HTTPResponse{
Address: ts.URL + "/badredirect",
Body: "{ 'test': 'data'}",
Method: "GET",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err = h.HTTPGather()
require.Error(t, err)
}
func TestMethod(t *testing.T) {
mux := setUpTestMux()
ts := httptest.NewServer(mux)
defer ts.Close()
h := &HTTPResponse{
Address: ts.URL + "/mustbepostmethod",
Body: "{ 'test': 'data'}",
Method: "POST",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err := h.HTTPGather()
require.NoError(t, err)
assert.NotEmpty(t, fields)
if assert.NotNil(t, fields["http_response_code"]) {
assert.Equal(t, http.StatusOK, fields["http_response_code"])
}
h = &HTTPResponse{
Address: ts.URL + "/mustbepostmethod",
Body: "{ 'test': 'data'}",
Method: "GET",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err = h.HTTPGather()
require.NoError(t, err)
assert.NotEmpty(t, fields)
if assert.NotNil(t, fields["http_response_code"]) {
assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"])
}
//check that lowercase methods work correctly
h = &HTTPResponse{
Address: ts.URL + "/mustbepostmethod",
Body: "{ 'test': 'data'}",
Method: "head",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err = h.HTTPGather()
require.NoError(t, err)
assert.NotEmpty(t, fields)
if assert.NotNil(t, fields["http_response_code"]) {
assert.Equal(t, http.StatusMethodNotAllowed, fields["http_response_code"])
}
}
func TestBody(t *testing.T) {
mux := setUpTestMux()
ts := httptest.NewServer(mux)
defer ts.Close()
h := &HTTPResponse{
Address: ts.URL + "/musthaveabody",
Body: "{ 'test': 'data'}",
Method: "GET",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err := h.HTTPGather()
require.NoError(t, err)
assert.NotEmpty(t, fields)
if assert.NotNil(t, fields["http_response_code"]) {
assert.Equal(t, http.StatusOK, fields["http_response_code"])
}
h = &HTTPResponse{
Address: ts.URL + "/musthaveabody",
Method: "GET",
ResponseTimeout: 20,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
fields, err = h.HTTPGather()
require.NoError(t, err)
assert.NotEmpty(t, fields)
if assert.NotNil(t, fields["http_response_code"]) {
assert.Equal(t, http.StatusBadRequest, fields["http_response_code"])
}
}
func TestTimeout(t *testing.T) {
mux := setUpTestMux()
ts := httptest.NewServer(mux)
defer ts.Close()
h := &HTTPResponse{
Address: ts.URL + "/twosecondnap",
Body: "{ 'test': 'data'}",
Method: "GET",
ResponseTimeout: 1,
Headers: map[string]string{
"Content-Type": "application/json",
},
FollowRedirects: true,
}
_, err := h.HTTPGather()
require.Error(t, err)
}

View File

@ -28,7 +28,7 @@ func NewConnection(server string) *Connection {
if inx1 > 0 { if inx1 > 0 {
security := server[0:inx1] security := server[0:inx1]
connstr = server[inx1+1 : len(server)] connstr = server[inx1+1 : len(server)]
up := strings.Split(security, ":") up := strings.SplitN(security, ":", 2)
conn.Username = up[0] conn.Username = up[0]
conn.Password = up[1] conn.Password = up[1]
} }

View File

@ -1,16 +1,28 @@
# Telegraf plugin: Jolokia # Telegraf plugin: Jolokia
#### Plugin arguments: #### Configuration
- **context** string: Context root used of jolokia url
- **servers** []Server: List of servers ```toml
+ **name** string: Server's logical name [[inputs.jolokia]]
+ **host** string: Server's ip address or hostname ## This is the context root used to compose the jolokia url
+ **port** string: Server's listening port context = "/jolokia/read"
- **metrics** []Metric
+ **name** string: Name of the measure ## List of servers exposing jolokia read service
+ **jmx** string: Jmx path that identifies mbeans attributes [[inputs.jolokia.servers]]
+ **pass** []string: Attributes to retain when collecting values name = "stable"
+ **drop** []string: Attributes to drop when collecting values host = "192.168.103.2"
port = "8180"
# username = "myuser"
# password = "mypassword"
## List of metrics collected on above servers
## Each metric consists in a name, a jmx path and either
## a pass or drop slice attribute.
## This collect all heap memory usage metrics.
[[inputs.jolokia.metrics]]
name = "heap_memory_usage"
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
```
#### Description #### Description
@ -21,31 +33,3 @@ See: https://jolokia.org/
# Measurements: # Measurements:
Jolokia plugin produces one measure for each metric configured, adding Server's `name`, `host` and `port` as tags. Jolokia plugin produces one measure for each metric configured, adding Server's `name`, `host` and `port` as tags.
Given a configuration like:
```ini
[jolokia]
[[jolokia.servers]]
name = "as-service-1"
host = "127.0.0.1"
port = "8080"
[[jolokia.servers]]
name = "as-service-2"
host = "127.0.0.1"
port = "8180"
[[jolokia.metrics]]
name = "heap_memory_usage"
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
pass = ["used", "max"]
```
The collected metrics will be:
```
jolokia_heap_memory_usage name=as-service-1,host=127.0.0.1,port=8080 used=xxx,max=yyy
jolokia_heap_memory_usage name=as-service-2,host=127.0.0.1,port=8180 used=vvv,max=zzz
```

View File

@ -22,7 +22,8 @@ from the same topic in parallel.
## Offset (must be either "oldest" or "newest") ## Offset (must be either "oldest" or "newest")
offset = "oldest" offset = "oldest"
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -103,7 +103,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
dialAddrs[0], err.Error()) dialAddrs[0], err.Error())
} }
dialInfo.Direct = true dialInfo.Direct = true
dialInfo.Timeout = time.Duration(10) * time.Second dialInfo.Timeout = 5 * time.Second
if m.Ssl.Enabled { if m.Ssl.Enabled {
tlsConfig := &tls.Config{} tlsConfig := &tls.Config{}

View File

@ -43,7 +43,7 @@ func testSetup(m *testing.M) {
log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error()) log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error())
} }
dialInfo.Direct = true dialInfo.Direct = true
dialInfo.Timeout = time.Duration(10) * time.Second dialInfo.Timeout = 5 * time.Second
sess, err := mgo.DialWithInfo(dialInfo) sess, err := mgo.DialWithInfo(dialInfo)
if err != nil { if err != nil {
log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error()) log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error())

View File

@ -35,7 +35,7 @@ The plugin expects messages in the
## Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -23,7 +23,8 @@ from a NATS cluster in parallel.
## Maximum number of metrics to buffer between collection intervals ## Maximum number of metrics to buffer between collection intervals
metric_buffer = 100000 metric_buffer = 100000
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -43,6 +43,8 @@ var sampleConfig = `
## Field name prefix ## Field name prefix
prefix = "" prefix = ""
## comment this out if you want raw cpu_time stats
fielddrop = ["cpu_time_*"]
` `
func (_ *Procstat) SampleConfig() string { func (_ *Procstat) SampleConfig() string {

View File

@ -80,10 +80,10 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
var rt http.RoundTripper = &http.Transport{ var rt http.RoundTripper = &http.Transport{
Dial: (&net.Dialer{ Dial: (&net.Dialer{
Timeout: 10 * time.Second, Timeout: 5 * time.Second,
KeepAlive: 30 * time.Second, KeepAlive: 30 * time.Second,
}).Dial, }).Dial,
TLSHandshakeTimeout: 10 * time.Second, TLSHandshakeTimeout: 5 * time.Second,
TLSClientConfig: &tls.Config{ TLSClientConfig: &tls.Config{
InsecureSkipVerify: p.InsecureSkipVerify, InsecureSkipVerify: p.InsecureSkipVerify,
}, },

View File

@ -62,7 +62,7 @@
Using this configuration: Using this configuration:
``` ```
[[inputs.nginx]] [[inputs.redis]]
## specify servers via a url matching: ## specify servers via a url matching:
## [protocol://][:password]@address[:port] ## [protocol://][:password]@address[:port]
## e.g. ## e.g.

View File

@ -18,7 +18,9 @@ import (
) )
const ( const (
UDP_PACKET_SIZE int = 1500 // UDP packet limit, see
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
UDP_PACKET_SIZE int = 65507
defaultFieldName = "value" defaultFieldName = "value"
@ -157,10 +159,6 @@ const sampleConfig = `
## calculation of percentiles. Raising this limit increases the accuracy ## calculation of percentiles. Raising this limit increases the accuracy
## of percentiles but also increases the memory usage and cpu time. ## of percentiles but also increases the memory usage and cpu time.
percentile_limit = 1000 percentile_limit = 1000
## UDP packet size for the server to listen for. This will depend on the size
## of the packets that the client is sending, which is usually 1500 bytes.
udp_packet_size = 1500
` `
func (_ *Statsd) SampleConfig() string { func (_ *Statsd) SampleConfig() string {
@ -274,12 +272,12 @@ func (s *Statsd) udpListen() error {
} }
log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String()) log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String())
buf := make([]byte, s.UDPPacketSize)
for { for {
select { select {
case <-s.done: case <-s.done:
return nil return nil
default: default:
buf := make([]byte, s.UDPPacketSize)
n, _, err := s.listener.ReadFromUDP(buf) n, _, err := s.listener.ReadFromUDP(buf)
if err != nil && !strings.Contains(err.Error(), "closed network") { if err != nil && !strings.Contains(err.Error(), "closed network") {
log.Printf("ERROR READ: %s\n", err.Error()) log.Printf("ERROR READ: %s\n", err.Error())
@ -300,11 +298,12 @@ func (s *Statsd) udpListen() error {
// single statsd metric into a struct. // single statsd metric into a struct.
func (s *Statsd) parser() error { func (s *Statsd) parser() error {
defer s.wg.Done() defer s.wg.Done()
var packet []byte
for { for {
select { select {
case <-s.done: case <-s.done:
return nil return nil
case packet := <-s.in: case packet = <-s.in:
lines := strings.Split(string(packet), "\n") lines := strings.Split(string(packet), "\n")
for _, line := range lines { for _, line := range lines {
line = strings.TrimSpace(line) line = strings.TrimSpace(line)
@ -631,7 +630,7 @@ func (s *Statsd) Stop() {
func init() { func init() {
inputs.Add("statsd", func() telegraf.Input { inputs.Add("statsd", func() telegraf.Input {
return &Statsd{ return &Statsd{
ConvertNames: true, MetricSeparator: "_",
UDPPacketSize: UDP_PACKET_SIZE, UDPPacketSize: UDP_PACKET_SIZE,
} }
}) })

View File

@ -0,0 +1,447 @@
# sysstat Input Plugin
Collect [sysstat](https://github.com/sysstat/sysstat) metrics - requires the sysstat
package installed.
This plugin collects system metrics with the sysstat collector utility `sadc` and parses
the created binary data file with the `sadf` utility.
### Configuration:
```toml
# Sysstat metrics collector
[[inputs.sysstat]]
## Path to the sadc command.
#
## On Debian and Arch Linux the default path is /usr/lib/sa/sadc whereas
## on RHEL and CentOS the default path is /usr/lib64/sa/sadc
sadc_path = "/usr/lib/sa/sadc" # required
#
#
## Path to the sadf command, if it is not in PATH
# sadf_path = "/usr/bin/sadf"
#
#
## Activities is a list of activities, that are passed as argument to the
## sadc collector utility (e.g: DISK, SNMP etc...)
## The more activities that are added, the more data is collected.
# activities = ["DISK"]
#
#
## Group metrics to measurements.
##
## If group is false each metric will be prefixed with a description
## and represents itself a measurement.
##
## If Group is true, corresponding metrics are grouped to a single measurement.
# group = true
#
#
## Options for the sadf command. The values on the left represent the sadf options and
## the values on the right their description (wich are used for grouping and prefixing metrics).
##
## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version.
[inputs.sysstat.options]
-C = "cpu"
-B = "paging"
-b = "io"
-d = "disk" # requires DISK activity
"-n ALL" = "network"
"-P ALL" = "per_cpu"
-q = "queue"
-R = "mem"
-r = "mem_util"
-S = "swap_util"
-u = "cpu_util"
-v = "inode"
-W = "swap"
-w = "task"
# -H = "hugepages" # only available for newer linux distributions
# "-I ALL" = "interrupts" # requires INT activity
#
#
## Device tags can be used to add additional tags for devices. For example the configuration below
## adds a tag vg with value rootvg for all metrics with sda devices.
# [[inputs.sysstat.device_tags.sda]]
# vg = "rootvg"
```
### Measurements & Fields:
#### If group=true
- cpu
- pct_idle (float)
- pct_iowait (float)
- pct_nice (float)
- pct_steal (float)
- pct_system (float)
- pct_user (float)
- disk
- avgqu-sz (float)
- avgrq-sz (float)
- await (float)
- pct_util (float)
- rd_sec_pers (float)
- svctm (float)
- tps (float)
And much more, depending on the options you configure.
#### If group=false
- cpu_pct_idle
- value (float)
- cpu_pct_iowait
- value (float)
- cpu_pct_nice
- value (float)
- cpu_pct_steal
- value (float)
- cpu_pct_system
- value (float)
- cpu_pct_user
- value (float)
- disk_avgqu-sz
- value (float)
- disk_avgrq-sz
- value (float)
- disk_await
- value (float)
- disk_pct_util
- value (float)
- disk_rd_sec_per_s
- value (float)
- disk_svctm
- value (float)
- disk_tps
- value (float)
And much more, depending on the options you configure.
### Tags:
- All measurements have the following tags:
- device
And more if you define some `device_tags`.
### Example Output:
With the configuration below:
```toml
[[inputs.sysstat]]
sadc_path = "/usr/lib/sa/sadc" # required
activities = ["DISK", "SNMP", "INT"]
group = true
[inputs.sysstat.options]
-C = "cpu"
-B = "paging"
-b = "io"
-d = "disk" # requires DISK activity
-H = "hugepages"
"-I ALL" = "interrupts" # requires INT activity
"-n ALL" = "network"
"-P ALL" = "per_cpu"
-q = "queue"
-R = "mem"
"-r ALL" = "mem_util"
-S = "swap_util"
-u = "cpu_util"
-v = "inode"
-W = "swap"
-w = "task"
[[inputs.sysstat.device_tags.sda]]
vg = "rootvg"
```
you get the following output:
```
$ telegraf -config telegraf.conf -input-filter sysstat -test
* Plugin: sysstat, Collection 1
> cpu_util,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626657883725
> swap pswpin_per_s=0,pswpout_per_s=0 1459255626658387650
> per_cpu,device=cpu1 pct_idle=98.98,pct_iowait=0,pct_nice=0.26,pct_steal=0,pct_system=0.51,pct_user=0.26 1459255626659630437
> per_cpu,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626659670744
> per_cpu,device=cpu0 pct_idle=98.73,pct_iowait=0,pct_nice=0.76,pct_steal=0,pct_system=0.51,pct_user=0 1459255626659697515
> hugepages kbhugfree=0,kbhugused=0,pct_hugused=0 1459255626660057517
> network,device=lo coll_per_s=0,pct_ifutil=0,rxcmp_per_s=0,rxdrop_per_s=0,rxerr_per_s=0,rxfifo_per_s=0,rxfram_per_s=0,rxkB_per_s=0.81,rxmcst_per_s=0,rxpck_per_s=16,txcarr_per_s=0,txcmp_per_s=0,txdrop_per_s=0,txerr_per_s=0,txfifo_per_s=0,txkB_per_s=0.81,txpck_per_s=16 1459255626661197666
> network access_per_s=0,active_per_s=0,asmf_per_s=0,asmok_per_s=0,asmrq_per_s=0,atmptf_per_s=0,badcall_per_s=0,call_per_s=0,estres_per_s=0,fragcrt_per_s=0,fragf_per_s=0,fragok_per_s=0,fwddgm_per_s=0,getatt_per_s=0,hit_per_s=0,iadrerr_per_s=0,iadrmk_per_s=0,iadrmkr_per_s=0,idel_per_s=16,idgm_per_s=0,idgmerr_per_s=0,idisc_per_s=0,idstunr_per_s=0,iech_per_s=0,iechr_per_s=0,ierr_per_s=0,ihdrerr_per_s=0,imsg_per_s=0,ip-frag=0,iparmpb_per_s=0,irec_per_s=16,iredir_per_s=0,iseg_per_s=16,isegerr_per_s=0,isrcq_per_s=0,itm_per_s=0,itmex_per_s=0,itmr_per_s=0,iukwnpr_per_s=0,miss_per_s=0,noport_per_s=0,oadrmk_per_s=0,oadrmkr_per_s=0,odgm_per_s=0,odisc_per_s=0,odstunr_per_s=0,oech_per_s=0,oechr_per_s=0,oerr_per_s=0,omsg_per_s=0,onort_per_s=0,oparmpb_per_s=0,oredir_per_s=0,orq_per_s=16,orsts_per_s=0,oseg_per_s=16,osrcq_per_s=0,otm_per_s=0,otmex_per_s=0,otmr_per_s=0,packet_per_s=0,passive_per_s=0,rawsck=0,read_per_s=0,retrans_per_s=0,saccess_per_s=0,scall_per_s=0,sgetatt_per_s=0,sread_per_s=0,swrite_per_s=0,tcp-tw=7,tcp_per_s=0,tcpsck=1543,totsck=4052,udp_per_s=0,udpsck=2,write_per_s=0 1459255626661381788
> network,device=ens33 coll_per_s=0,pct_ifutil=0,rxcmp_per_s=0,rxdrop_per_s=0,rxerr_per_s=0,rxfifo_per_s=0,rxfram_per_s=0,rxkB_per_s=0,rxmcst_per_s=0,rxpck_per_s=0,txcarr_per_s=0,txcmp_per_s=0,txdrop_per_s=0,txerr_per_s=0,txfifo_per_s=0,txkB_per_s=0,txpck_per_s=0 1459255626661533072
> disk,device=sda,vg=rootvg avgqu-sz=0.01,avgrq-sz=8.5,await=3.31,pct_util=0.1,rd_sec_per_s=0,svctm=0.25,tps=4,wr_sec_per_s=34 1459255626663974389
> queue blocked=0,ldavg-1=1.61,ldavg-15=1.34,ldavg-5=1.67,plist-sz=1415,runq-sz=0 1459255626664159054
> paging fault_per_s=0.25,majflt_per_s=0,pct_vmeff=0,pgfree_per_s=19,pgpgin_per_s=0,pgpgout_per_s=17,pgscand_per_s=0,pgscank_per_s=0,pgsteal_per_s=0 1459255626664304249
> mem_util kbactive=2206568,kbanonpg=1472208,kbbuffers=118020,kbcached=1035252,kbcommit=8717200,kbdirty=156,kbinact=418912,kbkstack=24672,kbmemfree=1744868,kbmemused=3610272,kbpgtbl=87116,kbslab=233804,kbvmused=0,pct_commit=136.13,pct_memused=67.42 1459255626664554981
> io bread_per_s=0,bwrtn_per_s=34,rtps=0,tps=4,wtps=4 1459255626664596198
> inode dentunusd=235039,file-nr=17120,inode-nr=94505,pty-nr=14 1459255626664663693
> interrupts,device=i000 intr_per_s=0 1459255626664800109
> interrupts,device=i003 intr_per_s=0 1459255626665255145
> interrupts,device=i004 intr_per_s=0 1459255626665281776
> interrupts,device=i006 intr_per_s=0 1459255626665297416
> interrupts,device=i007 intr_per_s=0 1459255626665321008
> interrupts,device=i010 intr_per_s=0 1459255626665339413
> interrupts,device=i012 intr_per_s=0 1459255626665361510
> interrupts,device=i013 intr_per_s=0 1459255626665381327
> interrupts,device=i015 intr_per_s=1 1459255626665397313
> interrupts,device=i001 intr_per_s=0.25 1459255626665412985
> interrupts,device=i002 intr_per_s=0 1459255626665430475
> interrupts,device=i005 intr_per_s=0 1459255626665453944
> interrupts,device=i008 intr_per_s=0 1459255626665470650
> interrupts,device=i011 intr_per_s=0 1459255626665486069
> interrupts,device=i009 intr_per_s=0 1459255626665502913
> interrupts,device=i014 intr_per_s=0 1459255626665518152
> task cswch_per_s=722.25,proc_per_s=0 1459255626665849646
> cpu,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626666639715
> mem bufpg_per_s=0,campg_per_s=1.75,frmpg_per_s=-8.25 1459255626666770205
> swap_util kbswpcad=0,kbswpfree=1048572,kbswpused=0,pct_swpcad=0,pct_swpused=0 1459255626667313276
```
If you change the group value to false like below:
```toml
[[inputs.sysstat]]
sadc_path = "/usr/lib/sa/sadc" # required
activities = ["DISK", "SNMP", "INT"]
group = false
[inputs.sysstat.options]
-C = "cpu"
-B = "paging"
-b = "io"
-d = "disk" # requires DISK activity
-H = "hugepages"
"-I ALL" = "interrupts" # requires INT activity
"-n ALL" = "network"
"-P ALL" = "per_cpu"
-q = "queue"
-R = "mem"
"-r ALL" = "mem_util"
-S = "swap_util"
-u = "cpu_util"
-v = "inode"
-W = "swap"
-w = "task"
[[inputs.sysstat.device_tags.sda]]
vg = "rootvg"
```
you get the following output:
```
$ telegraf -config telegraf.conf -input-filter sysstat -test
* Plugin: sysstat, Collection 1
> io_tps value=0.5 1459255780126025822
> io_rtps value=0 1459255780126025822
> io_wtps value=0.5 1459255780126025822
> io_bread_per_s value=0 1459255780126025822
> io_bwrtn_per_s value=38 1459255780126025822
> cpu_util_pct_user,device=all value=39.07 1459255780126025822
> cpu_util_pct_nice,device=all value=0 1459255780126025822
> cpu_util_pct_system,device=all value=47.94 1459255780126025822
> cpu_util_pct_iowait,device=all value=0 1459255780126025822
> cpu_util_pct_steal,device=all value=0 1459255780126025822
> cpu_util_pct_idle,device=all value=12.98 1459255780126025822
> swap_pswpin_per_s value=0 1459255780126025822
> cpu_pct_user,device=all value=39.07 1459255780126025822
> cpu_pct_nice,device=all value=0 1459255780126025822
> cpu_pct_system,device=all value=47.94 1459255780126025822
> cpu_pct_iowait,device=all value=0 1459255780126025822
> cpu_pct_steal,device=all value=0 1459255780126025822
> cpu_pct_idle,device=all value=12.98 1459255780126025822
> per_cpu_pct_user,device=all value=39.07 1459255780126025822
> per_cpu_pct_nice,device=all value=0 1459255780126025822
> per_cpu_pct_system,device=all value=47.94 1459255780126025822
> per_cpu_pct_iowait,device=all value=0 1459255780126025822
> per_cpu_pct_steal,device=all value=0 1459255780126025822
> per_cpu_pct_idle,device=all value=12.98 1459255780126025822
> per_cpu_pct_user,device=cpu0 value=33.5 1459255780126025822
> per_cpu_pct_nice,device=cpu0 value=0 1459255780126025822
> per_cpu_pct_system,device=cpu0 value=65.25 1459255780126025822
> per_cpu_pct_iowait,device=cpu0 value=0 1459255780126025822
> per_cpu_pct_steal,device=cpu0 value=0 1459255780126025822
> per_cpu_pct_idle,device=cpu0 value=1.25 1459255780126025822
> per_cpu_pct_user,device=cpu1 value=44.85 1459255780126025822
> per_cpu_pct_nice,device=cpu1 value=0 1459255780126025822
> per_cpu_pct_system,device=cpu1 value=29.55 1459255780126025822
> per_cpu_pct_iowait,device=cpu1 value=0 1459255780126025822
> per_cpu_pct_steal,device=cpu1 value=0 1459255780126025822
> per_cpu_pct_idle,device=cpu1 value=25.59 1459255780126025822
> hugepages_kbhugfree value=0 1459255780126025822
> hugepages_kbhugused value=0 1459255780126025822
> hugepages_pct_hugused value=0 1459255780126025822
> interrupts_intr_per_s,device=i000 value=0 1459255780126025822
> inode_dentunusd value=252876 1459255780126025822
> mem_util_kbmemfree value=1613612 1459255780126025822
> disk_tps,device=sda,vg=rootvg value=0.5 1459255780126025822
> swap_pswpout_per_s value=0 1459255780126025822
> network_rxpck_per_s,device=ens33 value=0 1459255780126025822
> queue_runq-sz value=4 1459255780126025822
> task_proc_per_s value=0 1459255780126025822
> task_cswch_per_s value=2019 1459255780126025822
> mem_frmpg_per_s value=0 1459255780126025822
> mem_bufpg_per_s value=0.5 1459255780126025822
> mem_campg_per_s value=1.25 1459255780126025822
> interrupts_intr_per_s,device=i001 value=0 1459255780126025822
> inode_file-nr value=19104 1459255780126025822
> mem_util_kbmemused value=3741528 1459255780126025822
> disk_rd_sec_per_s,device=sda,vg=rootvg value=0 1459255780126025822
> network_txpck_per_s,device=ens33 value=0 1459255780126025822
> queue_plist-sz value=1512 1459255780126025822
> paging_pgpgin_per_s value=0 1459255780126025822
> paging_pgpgout_per_s value=19 1459255780126025822
> paging_fault_per_s value=0.25 1459255780126025822
> paging_majflt_per_s value=0 1459255780126025822
> paging_pgfree_per_s value=34.25 1459255780126025822
> paging_pgscank_per_s value=0 1459255780126025822
> paging_pgscand_per_s value=0 1459255780126025822
> paging_pgsteal_per_s value=0 1459255780126025822
> paging_pct_vmeff value=0 1459255780126025822
> interrupts_intr_per_s,device=i002 value=0 1459255780126025822
> interrupts_intr_per_s,device=i003 value=0 1459255780126025822
> interrupts_intr_per_s,device=i004 value=0 1459255780126025822
> interrupts_intr_per_s,device=i005 value=0 1459255780126025822
> interrupts_intr_per_s,device=i006 value=0 1459255780126025822
> interrupts_intr_per_s,device=i007 value=0 1459255780126025822
> interrupts_intr_per_s,device=i008 value=0 1459255780126025822
> interrupts_intr_per_s,device=i009 value=0 1459255780126025822
> interrupts_intr_per_s,device=i010 value=0 1459255780126025822
> interrupts_intr_per_s,device=i011 value=0 1459255780126025822
> interrupts_intr_per_s,device=i012 value=0 1459255780126025822
> interrupts_intr_per_s,device=i013 value=0 1459255780126025822
> interrupts_intr_per_s,device=i014 value=0 1459255780126025822
> interrupts_intr_per_s,device=i015 value=1 1459255780126025822
> inode_inode-nr value=94709 1459255780126025822
> inode_pty-nr value=14 1459255780126025822
> mem_util_pct_memused value=69.87 1459255780126025822
> mem_util_kbbuffers value=118252 1459255780126025822
> mem_util_kbcached value=1045240 1459255780126025822
> mem_util_kbcommit value=9628152 1459255780126025822
> mem_util_pct_commit value=150.35 1459255780126025822
> mem_util_kbactive value=2303752 1459255780126025822
> mem_util_kbinact value=428340 1459255780126025822
> mem_util_kbdirty value=104 1459255780126025822
> mem_util_kbanonpg value=1568676 1459255780126025822
> mem_util_kbslab value=240032 1459255780126025822
> mem_util_kbkstack value=26224 1459255780126025822
> mem_util_kbpgtbl value=98056 1459255780126025822
> mem_util_kbvmused value=0 1459255780126025822
> disk_wr_sec_per_s,device=sda,vg=rootvg value=38 1459255780126025822
> disk_avgrq-sz,device=sda,vg=rootvg value=76 1459255780126025822
> disk_avgqu-sz,device=sda,vg=rootvg value=0 1459255780126025822
> disk_await,device=sda,vg=rootvg value=2 1459255780126025822
> disk_svctm,device=sda,vg=rootvg value=2 1459255780126025822
> disk_pct_util,device=sda,vg=rootvg value=0.1 1459255780126025822
> network_rxkB_per_s,device=ens33 value=0 1459255780126025822
> network_txkB_per_s,device=ens33 value=0 1459255780126025822
> network_rxcmp_per_s,device=ens33 value=0 1459255780126025822
> network_txcmp_per_s,device=ens33 value=0 1459255780126025822
> network_rxmcst_per_s,device=ens33 value=0 1459255780126025822
> network_pct_ifutil,device=ens33 value=0 1459255780126025822
> network_rxpck_per_s,device=lo value=10.75 1459255780126025822
> network_txpck_per_s,device=lo value=10.75 1459255780126025822
> network_rxkB_per_s,device=lo value=0.77 1459255780126025822
> network_txkB_per_s,device=lo value=0.77 1459255780126025822
> network_rxcmp_per_s,device=lo value=0 1459255780126025822
> network_txcmp_per_s,device=lo value=0 1459255780126025822
> network_rxmcst_per_s,device=lo value=0 1459255780126025822
> network_pct_ifutil,device=lo value=0 1459255780126025822
> network_rxerr_per_s,device=ens33 value=0 1459255780126025822
> network_txerr_per_s,device=ens33 value=0 1459255780126025822
> network_coll_per_s,device=ens33 value=0 1459255780126025822
> network_rxdrop_per_s,device=ens33 value=0 1459255780126025822
> network_txdrop_per_s,device=ens33 value=0 1459255780126025822
> network_txcarr_per_s,device=ens33 value=0 1459255780126025822
> network_rxfram_per_s,device=ens33 value=0 1459255780126025822
> network_rxfifo_per_s,device=ens33 value=0 1459255780126025822
> network_txfifo_per_s,device=ens33 value=0 1459255780126025822
> network_rxerr_per_s,device=lo value=0 1459255780126025822
> network_txerr_per_s,device=lo value=0 1459255780126025822
> network_coll_per_s,device=lo value=0 1459255780126025822
> network_rxdrop_per_s,device=lo value=0 1459255780126025822
> network_txdrop_per_s,device=lo value=0 1459255780126025822
> network_txcarr_per_s,device=lo value=0 1459255780126025822
> network_rxfram_per_s,device=lo value=0 1459255780126025822
> network_rxfifo_per_s,device=lo value=0 1459255780126025822
> network_txfifo_per_s,device=lo value=0 1459255780126025822
> network_call_per_s value=0 1459255780126025822
> network_retrans_per_s value=0 1459255780126025822
> network_read_per_s value=0 1459255780126025822
> network_write_per_s value=0 1459255780126025822
> network_access_per_s value=0 1459255780126025822
> network_getatt_per_s value=0 1459255780126025822
> network_scall_per_s value=0 1459255780126025822
> network_badcall_per_s value=0 1459255780126025822
> network_packet_per_s value=0 1459255780126025822
> network_udp_per_s value=0 1459255780126025822
> network_tcp_per_s value=0 1459255780126025822
> network_hit_per_s value=0 1459255780126025822
> network_miss_per_s value=0 1459255780126025822
> network_sread_per_s value=0 1459255780126025822
> network_swrite_per_s value=0 1459255780126025822
> network_saccess_per_s value=0 1459255780126025822
> network_sgetatt_per_s value=0 1459255780126025822
> network_totsck value=4234 1459255780126025822
> network_tcpsck value=1637 1459255780126025822
> network_udpsck value=2 1459255780126025822
> network_rawsck value=0 1459255780126025822
> network_ip-frag value=0 1459255780126025822
> network_tcp-tw value=4 1459255780126025822
> network_irec_per_s value=10.75 1459255780126025822
> network_fwddgm_per_s value=0 1459255780126025822
> network_idel_per_s value=10.75 1459255780126025822
> network_orq_per_s value=10.75 1459255780126025822
> network_asmrq_per_s value=0 1459255780126025822
> network_asmok_per_s value=0 1459255780126025822
> network_fragok_per_s value=0 1459255780126025822
> network_fragcrt_per_s value=0 1459255780126025822
> network_ihdrerr_per_s value=0 1459255780126025822
> network_iadrerr_per_s value=0 1459255780126025822
> network_iukwnpr_per_s value=0 1459255780126025822
> network_idisc_per_s value=0 1459255780126025822
> network_odisc_per_s value=0 1459255780126025822
> network_onort_per_s value=0 1459255780126025822
> network_asmf_per_s value=0 1459255780126025822
> network_fragf_per_s value=0 1459255780126025822
> network_imsg_per_s value=0 1459255780126025822
> network_omsg_per_s value=0 1459255780126025822
> network_iech_per_s value=0 1459255780126025822
> network_iechr_per_s value=0 1459255780126025822
> network_oech_per_s value=0 1459255780126025822
> network_oechr_per_s value=0 1459255780126025822
> network_itm_per_s value=0 1459255780126025822
> network_itmr_per_s value=0 1459255780126025822
> network_otm_per_s value=0 1459255780126025822
> network_otmr_per_s value=0 1459255780126025822
> network_iadrmk_per_s value=0 1459255780126025822
> network_iadrmkr_per_s value=0 1459255780126025822
> network_oadrmk_per_s value=0 1459255780126025822
> network_oadrmkr_per_s value=0 1459255780126025822
> network_ierr_per_s value=0 1459255780126025822
> network_oerr_per_s value=0 1459255780126025822
> network_idstunr_per_s value=0 1459255780126025822
> network_odstunr_per_s value=0 1459255780126025822
> network_itmex_per_s value=0 1459255780126025822
> network_otmex_per_s value=0 1459255780126025822
> network_iparmpb_per_s value=0 1459255780126025822
> network_oparmpb_per_s value=0 1459255780126025822
> network_isrcq_per_s value=0 1459255780126025822
> network_osrcq_per_s value=0 1459255780126025822
> network_iredir_per_s value=0 1459255780126025822
> network_oredir_per_s value=0 1459255780126025822
> network_active_per_s value=0 1459255780126025822
> network_passive_per_s value=0 1459255780126025822
> network_iseg_per_s value=10.75 1459255780126025822
> network_oseg_per_s value=9.5 1459255780126025822
> network_atmptf_per_s value=0 1459255780126025822
> network_estres_per_s value=0 1459255780126025822
> network_retrans_per_s value=1.5 1459255780126025822
> network_isegerr_per_s value=0.25 1459255780126025822
> network_orsts_per_s value=0 1459255780126025822
> network_idgm_per_s value=0 1459255780126025822
> network_odgm_per_s value=0 1459255780126025822
> network_noport_per_s value=0 1459255780126025822
> network_idgmerr_per_s value=0 1459255780126025822
> queue_ldavg-1 value=2.1 1459255780126025822
> queue_ldavg-5 value=1.82 1459255780126025822
> queue_ldavg-15 value=1.44 1459255780126025822
> queue_blocked value=0 1459255780126025822
> swap_util_kbswpfree value=1048572 1459255780126025822
> swap_util_kbswpused value=0 1459255780126025822
> swap_util_pct_swpused value=0 1459255780126025822
> swap_util_kbswpcad value=0 1459255780126025822
> swap_util_pct_swpcad value=0 1459255780126025822
```

View File

@ -0,0 +1,324 @@
// +build linux
package sysstat
import (
"bufio"
"encoding/csv"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
var (
firstTimestamp time.Time
execCommand = exec.Command // execCommand is used to mock commands in tests.
dfltActivities = []string{"DISK"}
)
const parseInterval = 1 // parseInterval is the interval (in seconds) where the parsing of the binary file takes place.
type Sysstat struct {
// Sadc represents the path to the sadc collector utility.
Sadc string `toml:"sadc_path"`
// Sadf represents the path to the sadf cmd.
Sadf string `toml:"sadf_path"`
// Activities is a list of activities that are passed as argument to the
// collector utility (e.g: DISK, SNMP etc...)
// The more activities that are added, the more data is collected.
Activities []string
// Options is a map of options.
//
// The key represents the actual option that the Sadf command is called with and
// the value represents the description for that option.
//
// For example, if you have the following options map:
// map[string]string{"-C": "cpu", "-d": "disk"}
// The Sadf command is run with the options -C and -d to extract cpu and
// disk metrics from the collected binary file.
//
// If Group is false (see below), each metric will be prefixed with the corresponding description
// and represents itself a measurement.
//
// If Group is true, metrics are grouped to a single measurement with the corresponding description as name.
Options map[string]string
// Group determines if metrics are grouped or not.
Group bool
// DeviceTags adds the possibility to add additional tags for devices.
DeviceTags map[string][]map[string]string `toml:"device_tags"`
tmpFile string
interval int
}
func (*Sysstat) Description() string {
return "Sysstat metrics collector"
}
var sampleConfig = `
## Path to the sadc command.
#
## Common Defaults:
## Debian/Ubuntu: /usr/lib/sysstat/sadc
## Arch: /usr/lib/sa/sadc
## RHEL/CentOS: /usr/lib64/sa/sadc
sadc_path = "/usr/lib/sa/sadc" # required
#
#
## Path to the sadf command, if it is not in PATH
# sadf_path = "/usr/bin/sadf"
#
#
## Activities is a list of activities, that are passed as argument to the
## sadc collector utility (e.g: DISK, SNMP etc...)
## The more activities that are added, the more data is collected.
# activities = ["DISK"]
#
#
## Group metrics to measurements.
##
## If group is false each metric will be prefixed with a description
## and represents itself a measurement.
##
## If Group is true, corresponding metrics are grouped to a single measurement.
# group = true
#
#
## Options for the sadf command. The values on the left represent the sadf options and
## the values on the right their description (wich are used for grouping and prefixing metrics).
##
## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version.
[inputs.sysstat.options]
-C = "cpu"
-B = "paging"
-b = "io"
-d = "disk" # requires DISK activity
"-n ALL" = "network"
"-P ALL" = "per_cpu"
-q = "queue"
-R = "mem"
-r = "mem_util"
-S = "swap_util"
-u = "cpu_util"
-v = "inode"
-W = "swap"
-w = "task"
# -H = "hugepages" # only available for newer linux distributions
# "-I ALL" = "interrupts" # requires INT activity
#
#
## Device tags can be used to add additional tags for devices. For example the configuration below
## adds a tag vg with value rootvg for all metrics with sda devices.
# [[inputs.sysstat.device_tags.sda]]
# vg = "rootvg"
`
func (*Sysstat) SampleConfig() string {
return sampleConfig
}
func (s *Sysstat) Gather(acc telegraf.Accumulator) error {
if s.interval == 0 {
if firstTimestamp.IsZero() {
firstTimestamp = time.Now()
} else {
s.interval = int(time.Since(firstTimestamp).Seconds())
}
}
ts := time.Now().Add(time.Duration(s.interval) * time.Second)
if err := s.collect(); err != nil {
return err
}
var wg sync.WaitGroup
errorChannel := make(chan error, len(s.Options)*2)
for option := range s.Options {
wg.Add(1)
go func(acc telegraf.Accumulator, option string) {
defer wg.Done()
if err := s.parse(acc, option, ts); err != nil {
errorChannel <- err
}
}(acc, option)
}
wg.Wait()
close(errorChannel)
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if _, err := os.Stat(s.tmpFile); err == nil {
if err := os.Remove(s.tmpFile); err != nil {
errorStrings = append(errorStrings, err.Error())
}
}
if len(errorStrings) == 0 {
return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
}
// collect collects sysstat data with the collector utility sadc. It runs the following command:
// Sadc -S <Activity1> -S <Activity2> ... <collectInterval> 2 tmpFile
// The above command collects system metrics during <collectInterval> and saves it in binary form to tmpFile.
func (s *Sysstat) collect() error {
options := []string{}
for _, act := range s.Activities {
options = append(options, "-S", act)
}
s.tmpFile = path.Join("/tmp", fmt.Sprintf("sysstat-%d", time.Now().Unix()))
collectInterval := s.interval - parseInterval // collectInterval has to be smaller than the telegraf data collection interval
if collectInterval < 0 { // If true, interval is not defined yet and Gather is run for the first time.
collectInterval = 1 // In that case we only collect for 1 second.
}
options = append(options, strconv.Itoa(collectInterval), "2", s.tmpFile)
cmd := execCommand(s.Sadc, options...)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to run command %s: %s", strings.Join(cmd.Args, " "), string(out))
}
return nil
}
// parse runs Sadf on the previously saved tmpFile:
// Sadf -p -- -p <option> tmpFile
// and parses the output to add it to the telegraf.Accumulator acc.
func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) error {
cmd := execCommand(s.Sadf, s.sadfOptions(option)...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return fmt.Errorf("running command '%s' failed: %s", strings.Join(cmd.Args, " "), err)
}
r := bufio.NewReader(stdout)
csv := csv.NewReader(r)
csv.Comma = '\t'
csv.FieldsPerRecord = 6
var measurement string
// groupData to accumulate data when Group=true
type groupData struct {
tags map[string]string
fields map[string]interface{}
}
m := make(map[string]groupData)
for {
record, err := csv.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
device := record[3]
value, err := strconv.ParseFloat(record[5], 64)
if err != nil {
return err
}
tags := map[string]string{}
if device != "-" {
tags["device"] = device
if addTags, ok := s.DeviceTags[device]; ok {
for _, tag := range addTags {
for k, v := range tag {
tags[k] = v
}
}
}
}
if s.Group {
measurement = s.Options[option]
if _, ok := m[device]; !ok {
m[device] = groupData{
fields: make(map[string]interface{}),
tags: make(map[string]string),
}
}
g, _ := m[device]
if len(g.tags) == 0 {
for k, v := range tags {
g.tags[k] = v
}
}
g.fields[escape(record[4])] = value
} else {
measurement = s.Options[option] + "_" + escape(record[4])
fields := map[string]interface{}{
"value": value,
}
acc.AddFields(measurement, fields, tags, ts)
}
}
if s.Group {
for _, v := range m {
acc.AddFields(measurement, v.fields, v.tags, ts)
}
}
if err := cmd.Wait(); err != nil {
return fmt.Errorf("command %s failed with %s", strings.Join(cmd.Args, " "), err)
}
return nil
}
// sadfOptions creates the correct options for the sadf utility.
func (s *Sysstat) sadfOptions(activityOption string) []string {
options := []string{
"-p",
"--",
"-p",
}
opts := strings.Split(activityOption, " ")
options = append(options, opts...)
options = append(options, s.tmpFile)
return options
}
// escape removes % and / chars in field names
func escape(dirty string) string {
var fieldEscaper = strings.NewReplacer(
`%`, "pct_",
`/`, "_per_",
)
return fieldEscaper.Replace(dirty)
}
func init() {
s := Sysstat{
Group: true,
Activities: dfltActivities,
}
sadf, _ := exec.LookPath("sadf")
if len(sadf) > 0 {
s.Sadf = sadf
}
inputs.Add("sysstat", func() telegraf.Input {
return &s
})
}

View File

@ -0,0 +1,41 @@
// +build !race
// +build linux
package sysstat
import (
"os/exec"
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
)
// TestInterval verifies that the correct interval is created. It is not
// run with -race option, because in that scenario interval between the two
// Gather calls is greater than wantedInterval.
func TestInterval(t *testing.T) {
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
s.interval = 0
wantedInterval := 3
err := s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Duration(wantedInterval) * time.Second)
err = s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
if s.interval != wantedInterval {
t.Errorf("wrong interval: got %d, want %d", s.interval, wantedInterval)
}
}

View File

@ -0,0 +1,3 @@
// +build !linux
package sysstat

View File

@ -0,0 +1,306 @@
// +build linux
package sysstat
import (
"fmt"
"os"
"os/exec"
"path"
"testing"
"github.com/influxdata/telegraf/testutil"
)
var s = Sysstat{
interval: 10,
Sadc: "/usr/lib/sa/sadc",
Sadf: "/usr/bin/sadf",
Group: false,
Activities: []string{"DISK", "SNMP"},
Options: map[string]string{
"C": "cpu",
"d": "disk",
},
DeviceTags: map[string][]map[string]string{
"sda": {
{
"vg": "rootvg",
},
},
},
}
func TestGather(t *testing.T) {
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
cpuTags := map[string]string{"device": "all"}
diskTags := map[string]string{"device": "sda", "vg": "rootvg"}
tests := []struct {
measurement string
fields map[string]interface{}
tags map[string]string
}{
{
"cpu_pct_user",
map[string]interface{}{
"value": 0.65,
},
cpuTags,
},
{
"cpu_pct_nice",
map[string]interface{}{
"value": 0.0,
},
cpuTags,
},
{
"cpu_pct_system",
map[string]interface{}{
"value": 0.10,
},
cpuTags,
},
{
"cpu_pct_iowait",
map[string]interface{}{
"value": 0.15,
},
cpuTags,
},
{
"cpu_pct_steal",
map[string]interface{}{
"value": 0.0,
},
cpuTags,
},
{
"cpu_pct_idle",
map[string]interface{}{
"value": 99.1,
},
cpuTags,
},
{
"disk_tps",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_rd_sec_per_s",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_wr_sec_per_s",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_avgrq-sz",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_avgqu-sz",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_await",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_svctm",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
{
"disk_pct_util",
map[string]interface{}{
"value": 0.00,
},
diskTags,
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, test.measurement, test.fields, test.tags)
}
}
func TestGatherGrouped(t *testing.T) {
s.Group = true
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
var tests = []struct {
measurement string
fields map[string]interface{}
tags map[string]string
}{
{
"cpu",
map[string]interface{}{
"pct_user": 0.65,
"pct_nice": 0.0,
"pct_system": 0.10,
"pct_iowait": 0.15,
"pct_steal": 0.0,
"pct_idle": 99.1,
},
map[string]string{"device": "all"},
},
{
"disk",
map[string]interface{}{
"tps": 0.00,
"rd_sec_per_s": 0.00,
"wr_sec_per_s": 0.00,
"avgrq-sz": 0.00,
"avgqu-sz": 0.00,
"await": 0.00,
"svctm": 0.00,
"pct_util": 0.00,
},
map[string]string{"device": "sda", "vg": "rootvg"},
},
{
"disk",
map[string]interface{}{
"tps": 2.01,
"rd_sec_per_s": 1.0,
"wr_sec_per_s": 0.00,
"avgrq-sz": 0.30,
"avgqu-sz": 0.60,
"await": 0.70,
"svctm": 0.20,
"pct_util": 0.30,
},
map[string]string{"device": "sdb"},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, test.measurement, test.fields, test.tags)
}
}
func TestEscape(t *testing.T) {
var tests = []struct {
input string
escaped string
}{
{
"%util",
"pct_util",
},
{
"bread/s",
"bread_per_s",
},
{
"%nice",
"pct_nice",
},
}
for _, test := range tests {
if test.escaped != escape(test.input) {
t.Errorf("wrong escape, got %s, wanted %s", escape(test.input), test.escaped)
}
}
}
// Helper function that mock the exec.Command call (and call the test binary)
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
// TestHelperProcess isn't a real test. It's used to mock exec.Command
// For example, if you run:
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- sadf -p -- -p -C tmpFile
// it returns mockData["C"] output.
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
mockData := map[string]string{
"C": `dell-xps 5 2016-03-25 16:18:10 UTC all %user 0.65
dell-xps 5 2016-03-25 16:18:10 UTC all %nice 0.00
dell-xps 5 2016-03-25 16:18:10 UTC all %system 0.10
dell-xps 5 2016-03-25 16:18:10 UTC all %iowait 0.15
dell-xps 5 2016-03-25 16:18:10 UTC all %steal 0.00
dell-xps 5 2016-03-25 16:18:10 UTC all %idle 99.10
`,
"d": `dell-xps 5 2016-03-25 16:18:10 UTC sda tps 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda rd_sec/s 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda wr_sec/s 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda avgrq-sz 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda avgqu-sz 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda await 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda svctm 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sda %util 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sdb tps 2.01
dell-xps 5 2016-03-25 16:18:10 UTC sdb rd_sec/s 1.00
dell-xps 5 2016-03-25 16:18:10 UTC sdb wr_sec/s 0.00
dell-xps 5 2016-03-25 16:18:10 UTC sdb avgrq-sz 0.30
dell-xps 5 2016-03-25 16:18:10 UTC sdb avgqu-sz 0.60
dell-xps 5 2016-03-25 16:18:10 UTC sdb await 0.70
dell-xps 5 2016-03-25 16:18:10 UTC sdb svctm 0.20
dell-xps 5 2016-03-25 16:18:10 UTC sdb %util 0.30
`,
}
args := os.Args
// Previous arguments are tests stuff, that looks like :
// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
cmd, args := args[3], args[4:]
// Handle the case where args[0] is dir:...
switch path.Base(cmd) {
case "sadf":
fmt.Fprint(os.Stdout, mockData[args[3]])
default:
}
// some code here to check arguments perhaps?
os.Exit(0)
}

View File

@ -0,0 +1,27 @@
// +build !linux
package system
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Kernel struct {
}
func (k *Kernel) Description() string {
return "Get kernel statistics from /proc/stat"
}
func (k *Kernel) SampleConfig() string { return "" }
func (k *Kernel) Gather(acc telegraf.Accumulator) error {
return nil
}
func init() {
inputs.Add("kernel", func() telegraf.Input {
return &Kernel{}
})
}

View File

@ -144,11 +144,18 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
continue continue
} }
// Parse out data after (<cmd name>)
i := bytes.LastIndex(data, []byte(")"))
if i == -1 {
continue
}
data = data[i+2:]
stats := bytes.Fields(data) stats := bytes.Fields(data)
if len(stats) < 3 { if len(stats) < 3 {
return fmt.Errorf("Something is terribly wrong with %s", statFile) return fmt.Errorf("Something is terribly wrong with %s", statFile)
} }
switch stats[2][0] { switch stats[0][0] {
case 'R': case 'R':
fields["running"] = fields["running"].(int64) + int64(1) fields["running"] = fields["running"].(int64) + int64(1)
case 'S': case 'S':
@ -163,11 +170,11 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
fields["paging"] = fields["paging"].(int64) + int64(1) fields["paging"] = fields["paging"].(int64) + int64(1)
default: default:
log.Printf("processes: Unknown state [ %s ] in file %s", log.Printf("processes: Unknown state [ %s ] in file %s",
string(stats[2][0]), statFile) string(stats[0][0]), statFile)
} }
fields["total"] = fields["total"].(int64) + int64(1) fields["total"] = fields["total"].(int64) + int64(1)
threads, err := strconv.Atoi(string(stats[19])) threads, err := strconv.Atoi(string(stats[17]))
if err != nil { if err != nil {
log.Printf("processes: Error parsing thread count: %s", err) log.Printf("processes: Error parsing thread count: %s", err)
continue continue

View File

@ -82,6 +82,28 @@ func TestFromProcFiles(t *testing.T) {
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
} }
func TestFromProcFilesWithSpaceInCmd(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("This test only runs on linux")
}
tester := tester{}
processes := &Processes{
readProcFile: tester.testProcFile2,
forceProc: true,
}
var acc testutil.Accumulator
err := processes.Gather(&acc)
require.NoError(t, err)
fields := getEmptyFields()
fields["sleeping"] = tester.calls
fields["total_threads"] = tester.calls * 2
fields["total"] = tester.calls
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
}
func testExecPS() ([]byte, error) { func testExecPS() ([]byte, error) {
return []byte(testPSOut), nil return []byte(testPSOut), nil
} }
@ -96,6 +118,11 @@ func (t *tester) testProcFile(_ string) ([]byte, error) {
return []byte(fmt.Sprintf(testProcStat, "S", "2")), nil return []byte(fmt.Sprintf(testProcStat, "S", "2")), nil
} }
func (t *tester) testProcFile2(_ string) ([]byte, error) {
t.calls++
return []byte(fmt.Sprintf(testProcStat2, "S", "2")), nil
}
func testExecPSError() ([]byte, error) { func testExecPSError() ([]byte, error) {
return []byte(testPSOut), fmt.Errorf("ERROR!") return []byte(testPSOut), fmt.Errorf("ERROR!")
} }
@ -149,3 +176,6 @@ S+
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
` `
const testProcStat2 = `10 (rcuob 0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
`

View File

@ -22,7 +22,8 @@ This is a sample configuration for the plugin.
## Maximum number of concurrent TCP connections to allow ## Maximum number of concurrent TCP connections to allow
max_tcp_connections = 250 max_tcp_connections = 250
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -39,7 +39,7 @@ type TcpListener struct {
acc telegraf.Accumulator acc telegraf.Accumulator
} }
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + var dropwarn = "ERROR: Message queue full. Discarding metric. " +
"You may want to increase allowed_pending_messages in the config\n" "You may want to increase allowed_pending_messages in the config\n"
const sampleConfig = ` const sampleConfig = `
@ -202,11 +202,10 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
if !scanner.Scan() { if !scanner.Scan() {
return return
} }
buf := scanner.Bytes()
select { select {
case t.in <- buf: case t.in <- scanner.Bytes():
default: default:
log.Printf(dropwarn, string(buf)) log.Printf(dropwarn)
} }
} }
} }
@ -215,11 +214,12 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
// tcpParser parses the incoming tcp byte packets // tcpParser parses the incoming tcp byte packets
func (t *TcpListener) tcpParser() error { func (t *TcpListener) tcpParser() error {
defer t.wg.Done() defer t.wg.Done()
var packet []byte
for { for {
select { select {
case <-t.done: case <-t.done:
return nil return nil
case packet := <-t.in: case packet = <-t.in:
if len(packet) == 0 { if len(packet) == 0 {
continue continue
} }

View File

@ -23,7 +23,8 @@ This is a sample configuration for the plugin.
## usually 1500 bytes. ## usually 1500 bytes.
udp_packet_size = 1500 udp_packet_size = 1500
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -30,7 +30,9 @@ type UdpListener struct {
listener *net.UDPConn listener *net.UDPConn
} }
const UDP_PACKET_SIZE int = 1500 // UDP packet limit, see
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
const UDP_PACKET_SIZE int = 65507
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
"You may want to increase allowed_pending_messages in the config\n" "You may want to increase allowed_pending_messages in the config\n"
@ -43,11 +45,6 @@ const sampleConfig = `
## UDP listener will start dropping packets. ## UDP listener will start dropping packets.
allowed_pending_messages = 10000 allowed_pending_messages = 10000
## UDP packet size for the server to listen for. This will depend
## on the size of the packets that the client is sending, which is
## usually 1500 bytes, but can be as large as 65,535 bytes.
udp_packet_size = 1500
## Data format to consume. ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
@ -107,12 +104,12 @@ func (u *UdpListener) udpListen() error {
} }
log.Println("UDP server listening on: ", u.listener.LocalAddr().String()) log.Println("UDP server listening on: ", u.listener.LocalAddr().String())
buf := make([]byte, u.UDPPacketSize)
for { for {
select { select {
case <-u.done: case <-u.done:
return nil return nil
default: default:
buf := make([]byte, u.UDPPacketSize)
n, _, err := u.listener.ReadFromUDP(buf) n, _, err := u.listener.ReadFromUDP(buf)
if err != nil && !strings.Contains(err.Error(), "closed network") { if err != nil && !strings.Contains(err.Error(), "closed network") {
log.Printf("ERROR: %s\n", err.Error()) log.Printf("ERROR: %s\n", err.Error())
@ -130,11 +127,13 @@ func (u *UdpListener) udpListen() error {
func (u *UdpListener) udpParser() error { func (u *UdpListener) udpParser() error {
defer u.wg.Done() defer u.wg.Done()
var packet []byte
for { for {
select { select {
case <-u.done: case <-u.done:
return nil return nil
case packet := <-u.in: case packet = <-u.in:
metrics, err := u.parser.Parse(packet) metrics, err := u.parser.Parse(packet)
if err == nil { if err == nil {
u.storeMetrics(metrics) u.storeMetrics(metrics)