diff --git a/CHANGELOG.md b/CHANGELOG.md
index fa7f9607a..63611e3e4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,14 +1,45 @@
## v0.10.1 [unreleased]
+### Release Notes:
+
+- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
+defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
+successful write to that output occurs.
+- The docker plugin has been significantly overhauled to add more metrics
+and allow for docker-machine (incl OSX) support.
+[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
+for the latest measurements, fields, and tags. There is also now support for
+specifying a docker endpoint to get metrics from.
+
### Features
-- [#509](https://github.com/influxdb/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
+- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
+- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
+- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
+- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
+- AMQP SSL support. Thanks @ekini!
+- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
+- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
+- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
+- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
+- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
+- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
+- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
+- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
+- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
+- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
+- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
### Bugfixes
-- [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
-- [#508](https://github.com/influxdb/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
+- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
+- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
+- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
+- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
+- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
+- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
+- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
## v0.10.0 [2016-01-12]
@@ -51,29 +82,29 @@ configurations overwritten by the upgrade. There is a backup stored at
## v0.2.5 [unreleased]
### Features
-- [#427](https://github.com/influxdb/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
-- [#428](https://github.com/influxdb/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
-- [#449](https://github.com/influxdb/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
+- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
+- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
+- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
### Bugfixes
-- [#430](https://github.com/influxdb/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
-- [#452](https://github.com/influxdb/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
+- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
+- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
## v0.2.4 [2015-12-08]
### Features
-- [#412](https://github.com/influxdb/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
-- [#410](https://github.com/influxdb/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
-- [#414](https://github.com/influxdb/telegraf/issues/414): Jolokia plugin auth parameters
-- [#415](https://github.com/influxdb/telegraf/issues/415): memcached plugin: support unix sockets
-- [#418](https://github.com/influxdb/telegraf/pull/418): memcached plugin additional unit tests.
-- [#408](https://github.com/influxdb/telegraf/pull/408): MailChimp plugin.
-- [#382](https://github.com/influxdb/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
-- [#401](https://github.com/influxdb/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
+- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
+- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
+- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
+- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
+- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
+- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
+- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
+- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
### Bugfixes
-- [#405](https://github.com/influxdb/telegraf/issues/405): Prometheus output cardinality issue
-- [#388](https://github.com/influxdb/telegraf/issues/388): Fix collection hangup when cpu times decrement.
+- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
+- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
## v0.2.3 [2015-11-30]
@@ -102,15 +133,15 @@ same type can be specified, like this:
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
### Features
-- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj!
-- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin.
-- [#392](https://github.com/influxdb/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
-- [#383](https://github.com/influxdb/telegraf/pull/383): Specify plugins as a list.
-- [#354](https://github.com/influxdb/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
+- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
+- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
+- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
+- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
+- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
### Bugfixes
-- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning.
-- [#389](https://github.com/influxdb/telegraf/issues/389): NaN value panic
+- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
+- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
## v0.2.2 [2015-11-18]
@@ -119,7 +150,7 @@ same type can be specified, like this:
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
### Bugfixes
-- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in inputs.
+- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
## v0.2.1 [2015-11-16]
@@ -136,22 +167,22 @@ changed to just run docker commands in the Makefile. See `make docker-run` and
same type.
### Features
-- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive!
-- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
-- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
-- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello!
-- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output.
-- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc
-- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot!
-- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output.
-- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists.
-- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
+- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
+- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
+- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
+- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
+- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
+- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
+- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
+- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
+- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
+- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
### Bugfixes
-- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin.
-- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements.
-- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
-- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
+- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
+- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
+- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
+- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
## v0.2.0 [2015-10-27]
@@ -172,38 +203,38 @@ be controlled via the `round_interval` and `flush_jitter` config options.
- Telegraf will now retry metric flushes twice
### Features
-- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info
-- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
-- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin
-- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
-- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
-- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou!
+- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
+- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
+- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
+- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
+- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
+- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
- Memory plugin: cached and buffered measurements re-added
- Logging: additional logging for each collection interval, track the number
of metrics collected and from how many inputs.
-- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib!
-- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou!
-- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
-- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc
-- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
-- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2.
-- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points.
-- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot!
-- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini!
-- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals
-- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes
-- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
-- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham!
+- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
+- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
+- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
+- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
+- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
+- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
+- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
+- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
+- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
+- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
+- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
+- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
+- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
### Bugfixes
-- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
-- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
-- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
-- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
-- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
-- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings.
-- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags.
-- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
+- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
+- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
+- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
+- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
+- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
+- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
+- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
+- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
## v0.1.9 [2015-09-22]
@@ -229,27 +260,27 @@ have been renamed for consistency. Some measurements have also been removed from
re-added in a "verbose" mode if there is demand for it.
### Features
-- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support
-- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
-- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini!
-- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
-- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup.
-- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
-- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks
+- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
+- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
+- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
+- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
+- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
+- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
+- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
and filtering when specifying a config file.
### Bugfixes
-- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support
-- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics
-- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug
+- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
+- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
+- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
- Fix net plugin on darwin
-- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
-- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
-- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
-- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
-- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux.
-- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
-- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
+- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
+- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
+- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
+- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
+- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
+- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
+- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
## v0.1.8 [2015-09-04]
@@ -258,106 +289,106 @@ and filtering when specifying a config file.
- Now using Go 1.5 to build telegraf
### Features
-- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin
-- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
-- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes
-- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
-- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option
-- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3
-- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin
+- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
+- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
+- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
+- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
+- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
+- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
+- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
### Bugfixes
## v0.1.7 [2015-08-28]
### Features
-- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer.
-- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
-- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
-- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space
-- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag.
-- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
+- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
+- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
+- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
+- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
+- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
+- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
- Indent the toml config file for readability
### Bugfixes
-- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing.
-- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix.
-- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
-- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
+- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
+- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
+- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
+- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
## v0.1.6 [2015-08-20]
### Features
-- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
-- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies
-- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
+- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
+- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
+- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
### Bugfixes
-- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
-- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
-- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
-- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
+- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
+- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
+- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
+- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
## v0.1.5 [2015-08-13]
### Features
-- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
-- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
-- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
-- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
-- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
-- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database.
-- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
-- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
-- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing
-- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
-- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
-- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
-- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
-- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
-- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
-- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
+- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
+- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
+- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
+- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
+- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
+- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
+- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
+- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
+- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
+- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
+- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
+- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
+- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
+- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
+- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
+- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
### Bugfixes
-- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
-- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes
-- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
-- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally
-- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format
+- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
+- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
+- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
+- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
+- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
## v0.1.4 [2015-07-09]
### Features
-- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
+- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
### Bugfixes
-- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
-- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
+- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
+- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
## v0.1.3 [2015-07-05]
### Features
-- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
-- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
+- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
+- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
### Bugfixes
-- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
-- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
+- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
+- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
## v0.1.2 [2015-07-01]
### Features
-- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
-- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
-- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
-- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
+- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
+- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
+- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
+- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
### Bugfixes
-- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script.
-- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
-- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
-- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
-- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
+- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
+- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
+- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
+- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
+- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
## v0.1.1 [2015-06-19]
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a47ad2f17..5e1b406d4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,8 +1,24 @@
+## Steps for Contributing:
+
+1. [Sign the CLA](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#sign-the-cla)
+1. Write your input or output plugin (see below for details)
+1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
+1. If your plugin requires a new Go package,
+[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
+
## Sign the CLA
Before we can merge a pull request, you will need to sign the CLA,
which can be found [on our website](http://influxdb.com/community/cla.html)
+## Adding a dependency
+
+Assuming you can already build the project, run these in the telegraf directory:
+
+1. `go get github.com/sparrc/gdm`
+1. `gdm restore`
+1. `gdm save`
+
## Input Plugins
This section is for developers who want to create new collection inputs.
@@ -19,7 +35,7 @@ and submit new inputs.
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example.
* Input Plugins must be added to the
-`github.com/influxdb/telegraf/plugins/inputs/all/all.go` file.
+`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this plugin does.
@@ -75,7 +91,7 @@ package simple
// simple.go
-import "github.com/influxdb/telegraf/plugins/inputs"
+import "github.com/influxdata/telegraf/plugins/inputs"
type Simple struct {
Ok bool
@@ -147,7 +163,7 @@ similar constructs.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
-`github.com/influxdb/telegraf/plugins/outputs/all/all.go` file.
+`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this output does.
@@ -171,7 +187,7 @@ package simpleoutput
// simpleoutput.go
-import "github.com/influxdb/telegraf/plugins/outputs"
+import "github.com/influxdata/telegraf/plugins/outputs"
type Simple struct {
Ok bool
@@ -252,7 +268,7 @@ which would take some time to replicate.
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
-(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go)
+(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/redis/redis_test.go)
a simple mock will suffice.
To execute Telegraf tests follow these simple steps:
diff --git a/Godeps b/Godeps
index 1b427674a..2baca6b31 100644
--- a/Godeps
+++ b/Godeps
@@ -1,9 +1,9 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
-github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81
+github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757
-github.com/aws/aws-sdk-go c4c1a1a2a076858fe18b2be674d833c796c45b09
+github.com/aws/aws-sdk-go 3ad0b07b44c22c21c734d1094981540b7a11e942
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
github.com/boltdb/bolt 6465994716bf6400605746e79224cf1e7ed68725
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
@@ -11,18 +11,20 @@ github.com/dancannon/gorethink ff457cac6a529d9749d841a733d76e8305cba3c8
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
-github.com/fsouza/go-dockerclient 2fb7694010aa553998ed513dc8805ab00708077a
+github.com/fsouza/go-dockerclient 6fb38e6bb3d544d7eb5b55fd396cd4e6850802d8
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
-github.com/go-sql-driver/mysql 6fd058ce0d6b7ee43174e80d5a3e7f483c4dfbe5
+github.com/go-sql-driver/mysql 72ea5d0b32a04c67710bf63e97095d82aea5f352
github.com/gogo/protobuf c57e439bad574c2e0877ff18d514badcfced004d
github.com/golang/protobuf 2402d76f3d41f928c7902a765dfc872356dd3aad
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
-github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294
+github.com/hashicorp/raft b95f335efee1992886864389183ebda0c0a5d0f6
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
-github.com/influxdb/influxdb db84a6ed76353905432ff8bd91527c68b3ea1be6
+github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
+github.com/influxdata/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5
+github.com/influxdb/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
@@ -39,14 +41,16 @@ github.com/prometheus/common 0a3005bb37bc411040083a55372e77c405f6464c
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 8850f58d7035653e1ab90711481954c8ca1b9813
+github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
golang.org/x/crypto 3760e016850398b85094c4c99e955b8c3dea5711
-golang.org/x/net 99ca920b6037ef77af8a11297150f7f0d8f4ef80
+golang.org/x/net 72aa00c6241a8013dc9b040abb45f57edbe73945
+golang.org/x/text cf4986612c83df6c55578ba198316d1684a9a287
gopkg.in/dancannon/gorethink.v1 e2cef022d0495329dfb0635991de76efcab5cf50
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
-gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
+gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
diff --git a/Makefile b/Makefile
index b9de93ffb..9e62cd900 100644
--- a/Makefile
+++ b/Makefile
@@ -21,21 +21,8 @@ dev: prepare
"-X main.Version=$(VERSION)" \
./cmd/telegraf/telegraf.go
-# Build linux 64-bit, 32-bit and arm architectures
-build-linux-bins: prepare
- GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \
- -ldflags "-X main.Version=$(VERSION)" \
- ./cmd/telegraf/telegraf.go
- GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \
- -ldflags "-X main.Version=$(VERSION)" \
- ./cmd/telegraf/telegraf.go
- GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \
- -ldflags "-X main.Version=$(VERSION)" \
- ./cmd/telegraf/telegraf.go
-
# Get dependencies and use gdm to checkout changesets
prepare:
- go get ./...
go get github.com/sparrc/gdm
gdm restore
@@ -65,6 +52,7 @@ endif
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
+ docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
# Run docker containers necessary for CircleCI unit tests
docker-run-circle:
@@ -78,11 +66,12 @@ docker-run-circle:
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
+ docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
# Kill all docker containers, ignore errors
docker-kill:
- -docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
- -docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
+ -docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
+ -docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
# Run full unit tests using docker containers (includes setup and teardown)
test: docker-kill docker-run
diff --git a/README.md b/README.md
index 31a7590fb..a088620cc 100644
--- a/README.md
+++ b/README.md
@@ -73,19 +73,19 @@ brew install telegraf
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
which gets installed via the Makefile
-if you don't have it already. You also must build with golang version 1.4+.
+if you don't have it already. You also must build with golang version 1.5+.
1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
-3. Run `go get github.com/influxdb/telegraf`
-4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
+3. Run `go get github.com/influxdata/telegraf`
+4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
### How to use it:
```console
$ telegraf -help
-Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB
+Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
@@ -100,6 +100,8 @@ The flags are:
-input-filter filter the input plugins to enable, separator is :
-output-filter filter the output plugins to enable, separator is :
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
+ -debug print metrics as they're generated to stdout
+ -quiet run in quiet mode
-version print the version to stdout
Examples:
@@ -137,6 +139,7 @@ Currently implemented sources:
* apache
* bcache
* disque
+* docker
* elasticsearch
* exec (generic JSON-emitting executable plugin)
* haproxy
@@ -150,7 +153,9 @@ Currently implemented sources:
* mongodb
* mysql
* nginx
+* nsq
* phpfpm
+* phusion passenger
* ping
* postgresql
* procstat
@@ -163,6 +168,7 @@ Currently implemented sources:
* zfs
* zookeeper
* sensors
+* snmp
* system
* cpu
* mem
@@ -185,9 +191,11 @@ want to add support for another service or third-party API.
* influxdb
* amon
* amqp
+* aws kinesis
+* aws cloudwatch
* datadog
+* graphite
* kafka
-* amazon kinesis
* librato
* mqtt
* nsq
diff --git a/accumulator.go b/accumulator.go
index 429f3a42c..83f61ae99 100644
--- a/accumulator.go
+++ b/accumulator.go
@@ -7,9 +7,9 @@ import (
"sync"
"time"
- "github.com/influxdb/telegraf/internal/config"
+ "github.com/influxdata/telegraf/internal/models"
- "github.com/influxdb/influxdb/client/v2"
+ "github.com/influxdata/influxdb/client/v2"
)
type Accumulator interface {
@@ -29,7 +29,7 @@ type Accumulator interface {
}
func NewAccumulator(
- inputConfig *config.InputConfig,
+ inputConfig *models.InputConfig,
points chan *client.Point,
) Accumulator {
acc := accumulator{}
@@ -47,7 +47,7 @@ type accumulator struct {
debug bool
- inputConfig *config.InputConfig
+ inputConfig *models.InputConfig
prefix string
}
diff --git a/agent.go b/agent.go
index 1af2a1f7c..d0f82145e 100644
--- a/agent.go
+++ b/agent.go
@@ -1,19 +1,21 @@
package telegraf
import (
- "crypto/rand"
+ cryptorand "crypto/rand"
"fmt"
"log"
"math/big"
+ "math/rand"
"os"
"sync"
"time"
- "github.com/influxdb/telegraf/internal/config"
- "github.com/influxdb/telegraf/plugins/inputs"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/telegraf/internal/config"
+ "github.com/influxdata/telegraf/internal/models"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/outputs"
- "github.com/influxdb/influxdb/client/v2"
+ "github.com/influxdata/influxdb/client/v2"
)
// Agent runs telegraf and collects data based on the given config
@@ -58,7 +60,7 @@ func (a *Agent) Connect() error {
}
err := o.Output.Connect()
if err != nil {
- log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name)
+ log.Printf("Failed to connect to output %s, retrying in 15s, error was '%s' \n", o.Name, err)
time.Sleep(15 * time.Second)
err = o.Output.Connect()
if err != nil {
@@ -92,6 +94,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
start := time.Now()
counter := 0
+ jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
for _, input := range a.Config.Inputs {
if input.Config.Interval != 0 {
continue
@@ -99,14 +102,24 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
wg.Add(1)
counter++
- go func(input *config.RunningInput) {
+ go func(input *models.RunningInput) {
defer wg.Done()
acc := NewAccumulator(input.Config, pointChan)
acc.SetDebug(a.Config.Agent.Debug)
- // acc.SetPrefix(input.Name + "_")
acc.SetDefaultTags(a.Config.Tags)
+ if jitter != 0 {
+ nanoSleep := rand.Int63n(jitter)
+ d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
+ if err != nil {
+ log.Printf("Jittering collection interval failed for plugin %s",
+ input.Name)
+ } else {
+ time.Sleep(d)
+ }
+ }
+
if err := input.Input.Gather(acc); err != nil {
log.Printf("Error in input [%s]: %s", input.Name, err)
}
@@ -121,8 +134,10 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
wg.Wait()
elapsed := time.Since(start)
- log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
- a.Config.Agent.Interval.Duration, counter, elapsed)
+ if !a.Config.Agent.Quiet {
+ log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
+ a.Config.Agent.Interval.Duration, counter, elapsed)
+ }
return nil
}
@@ -130,7 +145,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
// reporting interval.
func (a *Agent) gatherSeparate(
shutdown chan struct{},
- input *config.RunningInput,
+ input *models.RunningInput,
pointChan chan *client.Point,
) error {
ticker := time.NewTicker(input.Config.Interval)
@@ -141,7 +156,6 @@ func (a *Agent) gatherSeparate(
acc := NewAccumulator(input.Config, pointChan)
acc.SetDebug(a.Config.Agent.Debug)
- // acc.SetPrefix(input.Name + "_")
acc.SetDefaultTags(a.Config.Tags)
if err := input.Input.Gather(acc); err != nil {
@@ -149,8 +163,10 @@ func (a *Agent) gatherSeparate(
}
elapsed := time.Since(start)
- log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
- input.Config.Interval, input.Name, elapsed)
+ if !a.Config.Agent.Quiet {
+ log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
+ input.Config.Interval, input.Name, elapsed)
+ }
if outerr != nil {
return outerr
@@ -187,7 +203,6 @@ func (a *Agent) Test() error {
for _, input := range a.Config.Inputs {
acc := NewAccumulator(input.Config, pointChan)
acc.SetDebug(true)
- // acc.SetPrefix(input.Name + "_")
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
if input.Config.Interval != 0 {
@@ -201,7 +216,7 @@ func (a *Agent) Test() error {
// Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages.
switch input.Name {
- case "cpu", "mongodb":
+ case "cpu", "mongodb", "procstat":
time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
if err := input.Input.Gather(acc); err != nil {
@@ -213,91 +228,45 @@ func (a *Agent) Test() error {
return nil
}
-// writeOutput writes a list of points to a single output, with retries.
-// Optionally takes a `done` channel to indicate that it is done writing.
-func (a *Agent) writeOutput(
- points []*client.Point,
- ro *config.RunningOutput,
- shutdown chan struct{},
- wg *sync.WaitGroup,
-) {
- defer wg.Done()
- if len(points) == 0 {
- return
- }
- retry := 0
- retries := a.Config.Agent.FlushRetries
- start := time.Now()
-
- for {
- filtered := ro.FilterPoints(points)
- err := ro.Output.Write(filtered)
- if err == nil {
- // Write successful
- elapsed := time.Since(start)
- log.Printf("Flushed %d metrics to output %s in %s\n",
- len(filtered), ro.Name, elapsed)
- return
- }
-
- select {
- case <-shutdown:
- return
- default:
- if retry >= retries {
- // No more retries
- msg := "FATAL: Write to output [%s] failed %d times, dropping" +
- " %d metrics\n"
- log.Printf(msg, ro.Name, retries+1, len(points))
- return
- } else if err != nil {
- // Sleep for a retry
- log.Printf("Error in output [%s]: %s, retrying in %s",
- ro.Name, err.Error(), a.Config.Agent.FlushInterval.Duration)
- time.Sleep(a.Config.Agent.FlushInterval.Duration)
- }
- }
-
- retry++
- }
-}
-
// flush writes a list of points to all configured outputs
-func (a *Agent) flush(
- points []*client.Point,
- shutdown chan struct{},
- wait bool,
-) {
+func (a *Agent) flush() {
var wg sync.WaitGroup
+
+ wg.Add(len(a.Config.Outputs))
for _, o := range a.Config.Outputs {
- wg.Add(1)
- go a.writeOutput(points, o, shutdown, &wg)
- }
- if wait {
- wg.Wait()
+ go func(output *models.RunningOutput) {
+ defer wg.Done()
+ err := output.Write()
+ if err != nil {
+ log.Printf("Error writing to output [%s]: %s\n",
+ output.Name, err.Error())
+ }
+ }(o)
}
+
+ wg.Wait()
}
// flusher monitors the points input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
- time.Sleep(time.Millisecond * 100)
+ time.Sleep(time.Millisecond * 200)
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
- points := make([]*client.Point, 0)
for {
select {
case <-shutdown:
log.Println("Hang on, flushing any cached points before shutdown")
- a.flush(points, shutdown, true)
+ a.flush()
return nil
case <-ticker.C:
- a.flush(points, shutdown, false)
- points = make([]*client.Point, 0)
+ a.flush()
case pt := <-pointChan:
- points = append(points, pt)
+ for _, o := range a.Config.Outputs {
+ o.AddPoint(pt)
+ }
}
}
}
@@ -309,7 +278,7 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration {
outinterval := ininterval
if injitter.Nanoseconds() != 0 {
maxjitter := big.NewInt(injitter.Nanoseconds())
- if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
+ if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
jitter = j.Int64()
}
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
@@ -327,12 +296,13 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration {
func (a *Agent) Run(shutdown chan struct{}) error {
var wg sync.WaitGroup
- a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration,
+ a.Config.Agent.FlushInterval.Duration = jitterInterval(
+ a.Config.Agent.FlushInterval.Duration,
a.Config.Agent.FlushJitter.Duration)
- log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
- "Flush Interval:%s\n",
- a.Config.Agent.Interval.Duration, a.Config.Agent.Debug,
+ log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
+ "Flush Interval:%s \n",
+ a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating points
@@ -371,7 +341,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
// configured. Default intervals are handled below with gatherParallel
if input.Config.Interval != 0 {
wg.Add(1)
- go func(input *config.RunningInput) {
+ go func(input *models.RunningInput) {
defer wg.Done()
if err := a.gatherSeparate(shutdown, input, pointChan); err != nil {
log.Printf(err.Error())
diff --git a/agent_test.go b/agent_test.go
index 1cb020c7b..3420e665a 100644
--- a/agent_test.go
+++ b/agent_test.go
@@ -5,12 +5,12 @@ import (
"testing"
"time"
- "github.com/influxdb/telegraf/internal/config"
+ "github.com/influxdata/telegraf/internal/config"
// needing to load the plugins
- _ "github.com/influxdb/telegraf/plugins/inputs/all"
+ _ "github.com/influxdata/telegraf/plugins/inputs/all"
// needing to load the outputs
- _ "github.com/influxdb/telegraf/plugins/outputs/all"
+ _ "github.com/influxdata/telegraf/plugins/outputs/all"
)
func TestAgent_LoadPlugin(t *testing.T) {
diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go
index 21e89ce04..72fb9fdcf 100644
--- a/cmd/telegraf/telegraf.go
+++ b/cmd/telegraf/telegraf.go
@@ -7,15 +7,18 @@ import (
"os"
"os/signal"
"strings"
+ "syscall"
- "github.com/influxdb/telegraf"
- "github.com/influxdb/telegraf/internal/config"
- _ "github.com/influxdb/telegraf/plugins/inputs/all"
- _ "github.com/influxdb/telegraf/plugins/outputs/all"
+ "github.com/influxdata/telegraf"
+ "github.com/influxdata/telegraf/internal/config"
+ _ "github.com/influxdata/telegraf/plugins/inputs/all"
+ _ "github.com/influxdata/telegraf/plugins/outputs/all"
)
var fDebug = flag.Bool("debug", false,
"show metrics as they're generated to stdout")
+var fQuiet = flag.Bool("quiet", false,
+ "run in quiet mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
var fConfig = flag.String("config", "", "configuration file to load")
var fConfigDirectory = flag.String("config-directory", "",
@@ -25,14 +28,14 @@ var fSampleConfig = flag.Bool("sample-config", false,
"print out full sample configuration")
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
var fInputFilters = flag.String("input-filter", "",
- "filter the plugins to enable, separator is :")
+ "filter the inputs to enable, separator is :")
var fOutputFilters = flag.String("output-filter", "",
"filter the outputs to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
var fInputFiltersLegacy = flag.String("filter", "",
- "filter the plugins to enable, separator is :")
+ "filter the inputs to enable, separator is :")
var fOutputFiltersLegacy = flag.String("outputfilter", "",
"filter the outputs to enable, separator is :")
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
@@ -57,6 +60,8 @@ The flags are:
-input-filter filter the input plugins to enable, separator is :
-output-filter filter the output plugins to enable, separator is :
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
+ -debug print metrics as they're generated to stdout
+ -quiet run in quiet mode
-version print the version to stdout
Examples:
@@ -78,139 +83,156 @@ Examples:
`
func main() {
- flag.Usage = usageExit
- flag.Parse()
+ reload := make(chan bool, 1)
+ reload <- true
+ for <-reload {
+ reload <- false
+ flag.Usage = usageExit
+ flag.Parse()
- if flag.NFlag() == 0 {
- usageExit()
- }
+ if flag.NFlag() == 0 {
+ usageExit()
+ }
- var inputFilters []string
- if *fInputFiltersLegacy != "" {
- inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
- inputFilters = strings.Split(":"+inputFilter+":", ":")
- }
- if *fInputFilters != "" {
- inputFilter := strings.TrimSpace(*fInputFilters)
- inputFilters = strings.Split(":"+inputFilter+":", ":")
- }
+ var inputFilters []string
+ if *fInputFiltersLegacy != "" {
+ inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
+ inputFilters = strings.Split(":"+inputFilter+":", ":")
+ }
+ if *fInputFilters != "" {
+ inputFilter := strings.TrimSpace(*fInputFilters)
+ inputFilters = strings.Split(":"+inputFilter+":", ":")
+ }
- var outputFilters []string
- if *fOutputFiltersLegacy != "" {
- outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
- outputFilters = strings.Split(":"+outputFilter+":", ":")
- }
- if *fOutputFilters != "" {
- outputFilter := strings.TrimSpace(*fOutputFilters)
- outputFilters = strings.Split(":"+outputFilter+":", ":")
- }
+ var outputFilters []string
+ if *fOutputFiltersLegacy != "" {
+ outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
+ outputFilters = strings.Split(":"+outputFilter+":", ":")
+ }
+ if *fOutputFilters != "" {
+ outputFilter := strings.TrimSpace(*fOutputFilters)
+ outputFilters = strings.Split(":"+outputFilter+":", ":")
+ }
- if *fVersion {
- v := fmt.Sprintf("Telegraf - Version %s", Version)
- fmt.Println(v)
- return
- }
+ if *fVersion {
+ v := fmt.Sprintf("Telegraf - Version %s", Version)
+ fmt.Println(v)
+ return
+ }
- if *fSampleConfig {
- config.PrintSampleConfig(inputFilters, outputFilters)
- return
- }
+ if *fSampleConfig {
+ config.PrintSampleConfig(inputFilters, outputFilters)
+ return
+ }
- if *fUsage != "" {
- if err := config.PrintInputConfig(*fUsage); err != nil {
- if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
- log.Fatalf("%s and %s", err, err2)
+ if *fUsage != "" {
+ if err := config.PrintInputConfig(*fUsage); err != nil {
+ if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
+ log.Fatalf("%s and %s", err, err2)
+ }
+ }
+ return
+ }
+
+ var (
+ c *config.Config
+ err error
+ )
+
+ if *fConfig != "" {
+ c = config.NewConfig()
+ c.OutputFilters = outputFilters
+ c.InputFilters = inputFilters
+ err = c.LoadConfig(*fConfig)
+ if err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ fmt.Println("Usage: Telegraf")
+ flag.PrintDefaults()
+ return
+ }
+
+ if *fConfigDirectoryLegacy != "" {
+ err = c.LoadDirectory(*fConfigDirectoryLegacy)
+ if err != nil {
+ log.Fatal(err)
}
}
- return
- }
- var (
- c *config.Config
- err error
- )
+ if *fConfigDirectory != "" {
+ err = c.LoadDirectory(*fConfigDirectory)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+ if len(c.Outputs) == 0 {
+ log.Fatalf("Error: no outputs found, did you provide a valid config file?")
+ }
+ if len(c.Inputs) == 0 {
+ log.Fatalf("Error: no inputs found, did you provide a valid config file?")
+ }
- if *fConfig != "" {
- c = config.NewConfig()
- c.OutputFilters = outputFilters
- c.InputFilters = inputFilters
- err = c.LoadConfig(*fConfig)
+ ag, err := telegraf.NewAgent(c)
if err != nil {
log.Fatal(err)
}
- } else {
- fmt.Println("Usage: Telegraf")
- flag.PrintDefaults()
- return
- }
- if *fConfigDirectoryLegacy != "" {
- err = c.LoadDirectory(*fConfigDirectoryLegacy)
+ if *fDebug {
+ ag.Config.Agent.Debug = true
+ }
+
+ if *fQuiet {
+ ag.Config.Agent.Quiet = true
+ }
+
+ if *fTest {
+ err = ag.Test()
+ if err != nil {
+ log.Fatal(err)
+ }
+ return
+ }
+
+ err = ag.Connect()
if err != nil {
log.Fatal(err)
}
- }
- if *fConfigDirectory != "" {
- err = c.LoadDirectory(*fConfigDirectory)
- if err != nil {
- log.Fatal(err)
- }
- }
- if len(c.Outputs) == 0 {
- log.Fatalf("Error: no outputs found, did you provide a valid config file?")
- }
- if len(c.Inputs) == 0 {
- log.Fatalf("Error: no plugins found, did you provide a valid config file?")
- }
+ shutdown := make(chan struct{})
+ signals := make(chan os.Signal)
+ signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
+ go func() {
+ sig := <-signals
+ if sig == os.Interrupt {
+ close(shutdown)
+ }
+ if sig == syscall.SIGHUP {
+ log.Printf("Reloading Telegraf config\n")
+ <-reload
+ reload <- true
+ close(shutdown)
+ }
+ }()
- ag, err := telegraf.NewAgent(c)
- if err != nil {
- log.Fatal(err)
- }
+ log.Printf("Starting Telegraf (version %s)\n", Version)
+ log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
+ log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
+ log.Printf("Tags enabled: %s", c.ListTags())
- if *fDebug {
- ag.Config.Agent.Debug = true
- }
+ if *fPidfile != "" {
+ f, err := os.Create(*fPidfile)
+ if err != nil {
+ log.Fatalf("Unable to create pidfile: %s", err)
+ }
- if *fTest {
- err = ag.Test()
- if err != nil {
- log.Fatal(err)
- }
- return
- }
+ fmt.Fprintf(f, "%d\n", os.Getpid())
- err = ag.Connect()
- if err != nil {
- log.Fatal(err)
- }
-
- shutdown := make(chan struct{})
- signals := make(chan os.Signal)
- signal.Notify(signals, os.Interrupt)
- go func() {
- <-signals
- close(shutdown)
- }()
-
- log.Printf("Starting Telegraf (version %s)\n", Version)
- log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
- log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " "))
- log.Printf("Tags enabled: %s", c.ListTags())
-
- if *fPidfile != "" {
- f, err := os.Create(*fPidfile)
- if err != nil {
- log.Fatalf("Unable to create pidfile: %s", err)
+ f.Close()
}
- fmt.Fprintf(f, "%d\n", os.Getpid())
-
- f.Close()
+ ag.Run(shutdown)
}
-
- ag.Run(shutdown)
}
func usageExit() {
diff --git a/etc/telegraf.conf b/etc/telegraf.conf
index 9df2e93d5..9871ae7bc 100644
--- a/etc/telegraf.conf
+++ b/etc/telegraf.conf
@@ -90,7 +90,7 @@
[[inputs.disk]]
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
- # Mountpoints=["/"]
+ # mount_points=["/"]
# Read metrics about disk IO by device
[[inputs.diskio]]
diff --git a/internal/config/config.go b/internal/config/config.go
index 6c3d17750..5f97f8350 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -10,14 +10,13 @@ import (
"strings"
"time"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/inputs"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/internal/models"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/outputs"
- "github.com/naoina/toml"
+ "github.com/influxdata/config"
"github.com/naoina/toml/ast"
-
- "github.com/influxdb/influxdb/client/v2"
)
// Config specifies the URL/user/password for the database that telegraf
@@ -29,8 +28,8 @@ type Config struct {
OutputFilters []string
Agent *AgentConfig
- Inputs []*RunningInput
- Outputs []*RunningOutput
+ Inputs []*models.RunningInput
+ Outputs []*models.RunningOutput
}
func NewConfig() *Config {
@@ -40,13 +39,12 @@ func NewConfig() *Config {
Interval: internal.Duration{Duration: 10 * time.Second},
RoundInterval: true,
FlushInterval: internal.Duration{Duration: 10 * time.Second},
- FlushRetries: 2,
FlushJitter: internal.Duration{Duration: 5 * time.Second},
},
Tags: make(map[string]string),
- Inputs: make([]*RunningInput, 0),
- Outputs: make([]*RunningOutput, 0),
+ Inputs: make([]*models.RunningInput, 0),
+ Outputs: make([]*models.RunningOutput, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
@@ -61,149 +59,40 @@ type AgentConfig struct {
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
+ // CollectionJitter is used to jitter the collection by a random amount.
+ // Each plugin will sleep for a random time within jitter before collecting.
+ // This can be used to avoid many plugins querying things like sysfs at the
+ // same time, which can have a measurable effect on the system.
+ CollectionJitter internal.Duration
+
// Interval at which to flush data
FlushInterval internal.Duration
- // FlushRetries is the number of times to retry each data flush
- FlushRetries int
-
- // FlushJitter tells
+ // FlushJitter Jitters the flush interval by a random amount.
+ // This is primarily to avoid large write spikes for users running a large
+ // number of telegraf instances.
+ // ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
FlushJitter internal.Duration
+ // MetricBufferLimit is the max number of metrics that each output plugin
+ // will cache. The buffer is cleared when a successful write occurs. When
+ // full, the oldest metrics will be overwritten.
+ MetricBufferLimit int
+
// TODO(cam): Remove UTC and Precision parameters, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatability
UTC bool `toml:"utc"`
Precision string
- // Option for running in debug mode
- Debug bool
+ // Debug is the option for running in debug mode
+ Debug bool
+
+ // Quiet is the option for running in quiet mode
+ Quiet bool
Hostname string
}
-// TagFilter is the name of a tag, and the values on which to filter
-type TagFilter struct {
- Name string
- Filter []string
-}
-
-type RunningOutput struct {
- Name string
- Output outputs.Output
- Config *OutputConfig
-}
-
-type RunningInput struct {
- Name string
- Input inputs.Input
- Config *InputConfig
-}
-
-// Filter containing drop/pass and tagdrop/tagpass rules
-type Filter struct {
- Drop []string
- Pass []string
-
- TagDrop []TagFilter
- TagPass []TagFilter
-
- IsActive bool
-}
-
-// InputConfig containing a name, interval, and filter
-type InputConfig struct {
- Name string
- NameOverride string
- MeasurementPrefix string
- MeasurementSuffix string
- Tags map[string]string
- Filter Filter
- Interval time.Duration
-}
-
-// OutputConfig containing name and filter
-type OutputConfig struct {
- Name string
- Filter Filter
-}
-
-// Filter returns filtered slice of client.Points based on whether filters
-// are active for this RunningOutput.
-func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point {
- if !ro.Config.Filter.IsActive {
- return points
- }
-
- var filteredPoints []*client.Point
- for i := range points {
- if !ro.Config.Filter.ShouldPass(points[i].Name()) || !ro.Config.Filter.ShouldTagsPass(points[i].Tags()) {
- continue
- }
- filteredPoints = append(filteredPoints, points[i])
- }
- return filteredPoints
-}
-
-// ShouldPass returns true if the metric should pass, false if should drop
-// based on the drop/pass filter parameters
-func (f Filter) ShouldPass(fieldkey string) bool {
- if f.Pass != nil {
- for _, pat := range f.Pass {
- // TODO remove HasPrefix check, leaving it for now for legacy support.
- // Cam, 2015-12-07
- if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
- return true
- }
- }
- return false
- }
-
- if f.Drop != nil {
- for _, pat := range f.Drop {
- // TODO remove HasPrefix check, leaving it for now for legacy support.
- // Cam, 2015-12-07
- if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
- return false
- }
- }
-
- return true
- }
- return true
-}
-
-// ShouldTagsPass returns true if the metric should pass, false if should drop
-// based on the tagdrop/tagpass filter parameters
-func (f Filter) ShouldTagsPass(tags map[string]string) bool {
- if f.TagPass != nil {
- for _, pat := range f.TagPass {
- if tagval, ok := tags[pat.Name]; ok {
- for _, filter := range pat.Filter {
- if internal.Glob(filter, tagval) {
- return true
- }
- }
- }
- }
- return false
- }
-
- if f.TagDrop != nil {
- for _, pat := range f.TagDrop {
- if tagval, ok := tags[pat.Name]; ok {
- for _, filter := range pat.Filter {
- if internal.Glob(filter, tagval) {
- return false
- }
- }
- }
- }
- return true
- }
-
- return true
-}
-
// Inputs returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
@@ -239,24 +128,14 @@ func (c *Config) ListTags() string {
var header = `# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
-# declared inputs.
+# declared inputs, and sent to the declared outputs.
-# Even if a plugin has no configuration, it must be declared in here
-# to be active. Declaring a plugin means just specifying the name
-# as a section with no variables. To deactivate a plugin, comment
-# out the name and any variables.
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
-# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
-# One rule that plugins conform to is wherever a connection string
-# can be passed, the values '' and 'localhost' are treated specially.
-# They indicate to the plugin to use their own builtin configuration to
-# connect to the local system.
-
-# NOTE: The configuration has a few required parameters. They are marked
-# with 'required'. Be sure to edit those to make this configuration work.
-
# Tags can also be specified via a normal map, but only one form at a time:
[tags]
# dc = "us-east-1"
@@ -269,6 +148,16 @@ var header = `# Telegraf configuration
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
+ # Telegraf will cache metric_buffer_limit metrics for each output, and will
+ # flush this buffer on a successful write.
+ metric_buffer_limit = 10000
+
+ # Collection jitter is used to jitter the collection by a random amount.
+ # Each plugin will sleep for a random time within jitter before collecting.
+ # This can be used to avoid many plugins querying things like sysfs at the
+ # same time, which can have a measurable effect on the system.
+ collection_jitter = "0s"
+
# Default data flushing interval for all outputs. You should not set this below
# interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
@@ -279,6 +168,8 @@ var header = `# Telegraf configuration
# Run telegraf in debug mode
debug = false
+ # Run telegraf in quiet mode
+ quiet = false
# Override default hostname, if empty use os.Hostname()
hostname = ""
@@ -423,12 +314,7 @@ func (c *Config) LoadDirectory(path string) error {
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
- data, err := ioutil.ReadFile(path)
- if err != nil {
- return err
- }
-
- tbl, err := toml.Parse(data)
+ tbl, err := config.ParseFile(path)
if err != nil {
return err
}
@@ -441,12 +327,12 @@ func (c *Config) LoadConfig(path string) error {
switch name {
case "agent":
- if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
+ if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("Could not parse [agent] config\n")
return err
}
case "tags":
- if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
+ if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("Could not parse [tags] config\n")
return err
}
@@ -512,15 +398,15 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
return err
}
- if err := toml.UnmarshalTable(table, output); err != nil {
+ if err := config.UnmarshalTable(table, output); err != nil {
return err
}
- ro := &RunningOutput{
- Name: name,
- Output: output,
- Config: outputConfig,
+ ro := models.NewRunningOutput(name, output, outputConfig)
+ if c.Agent.MetricBufferLimit > 0 {
+ ro.PointBufferLimit = c.Agent.MetricBufferLimit
}
+ ro.Quiet = c.Agent.Quiet
c.Outputs = append(c.Outputs, ro)
return nil
}
@@ -545,11 +431,11 @@ func (c *Config) addInput(name string, table *ast.Table) error {
return err
}
- if err := toml.UnmarshalTable(table, input); err != nil {
+ if err := config.UnmarshalTable(table, input); err != nil {
return err
}
- rp := &RunningInput{
+ rp := &models.RunningInput{
Name: name,
Input: input,
Config: pluginConfig,
@@ -559,10 +445,10 @@ func (c *Config) addInput(name string, table *ast.Table) error {
}
// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to
-// be inserted into the OutputConfig/InputConfig to be used for prefix
+// be inserted into the models.OutputConfig/models.InputConfig to be used for prefix
// filtering on tags and measurements
-func buildFilter(tbl *ast.Table) Filter {
- f := Filter{}
+func buildFilter(tbl *ast.Table) models.Filter {
+ f := models.Filter{}
if node, ok := tbl.Fields["pass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
@@ -594,7 +480,7 @@ func buildFilter(tbl *ast.Table) Filter {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
- tagfilter := &TagFilter{Name: name}
+ tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
@@ -613,7 +499,7 @@ func buildFilter(tbl *ast.Table) Filter {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
- tagfilter := &TagFilter{Name: name}
+ tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
@@ -637,9 +523,9 @@ func buildFilter(tbl *ast.Table) Filter {
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
-// InputConfig to be inserted into RunningInput
-func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
- cp := &InputConfig{Name: name}
+// models.InputConfig to be inserted into models.RunningInput
+func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
+ cp := &models.InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
@@ -680,7 +566,7 @@ func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
- if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
+ if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
}
}
@@ -696,10 +582,10 @@ func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
}
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
-// OutputConfig to be inserted into RunningInput
+// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
-func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) {
- oc := &OutputConfig{
+func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
+ oc := &models.OutputConfig{
Name: name,
Filter: buildFilter(tbl),
}
diff --git a/internal/config/config_test.go b/internal/config/config_test.go
index c8ed79bdf..92f45ad0a 100644
--- a/internal/config/config_test.go
+++ b/internal/config/config_test.go
@@ -4,10 +4,11 @@ import (
"testing"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
- "github.com/influxdb/telegraf/plugins/inputs/exec"
- "github.com/influxdb/telegraf/plugins/inputs/memcached"
- "github.com/influxdb/telegraf/plugins/inputs/procstat"
+ "github.com/influxdata/telegraf/internal/models"
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs/exec"
+ "github.com/influxdata/telegraf/plugins/inputs/memcached"
+ "github.com/influxdata/telegraf/plugins/inputs/procstat"
"github.com/stretchr/testify/assert"
)
@@ -18,19 +19,19 @@ func TestConfig_LoadSingleInput(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
- mConfig := &InputConfig{
+ mConfig := &models.InputConfig{
Name: "memcached",
- Filter: Filter{
+ Filter: models.Filter{
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
- TagDrop: []TagFilter{
- TagFilter{
+ TagDrop: []models.TagFilter{
+ models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
- TagPass: []TagFilter{
- TagFilter{
+ TagPass: []models.TagFilter{
+ models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
@@ -61,19 +62,19 @@ func TestConfig_LoadDirectory(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
- mConfig := &InputConfig{
+ mConfig := &models.InputConfig{
Name: "memcached",
- Filter: Filter{
+ Filter: models.Filter{
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
- TagDrop: []TagFilter{
- TagFilter{
+ TagDrop: []models.TagFilter{
+ models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
- TagPass: []TagFilter{
- TagFilter{
+ TagPass: []models.TagFilter{
+ models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
@@ -91,7 +92,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
ex := inputs.Inputs["exec"]().(*exec.Exec)
ex.Command = "/usr/bin/myothercollector --foo=bar"
- eConfig := &InputConfig{
+ eConfig := &models.InputConfig{
Name: "exec",
MeasurementSuffix: "_myothercollector",
}
@@ -110,7 +111,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
pstat.PidFile = "/var/run/grafana-server.pid"
- pConfig := &InputConfig{Name: "procstat"}
+ pConfig := &models.InputConfig{Name: "procstat"}
pConfig.Tags = make(map[string]string)
assert.Equal(t, pstat, c.Inputs[3].Input,
@@ -118,175 +119,3 @@ func TestConfig_LoadDirectory(t *testing.T) {
assert.Equal(t, pConfig, c.Inputs[3].Config,
"Merged Testdata did not produce correct procstat metadata.")
}
-
-func TestFilter_Empty(t *testing.T) {
- f := Filter{}
-
- measurements := []string{
- "foo",
- "bar",
- "barfoo",
- "foo_bar",
- "foo.bar",
- "foo-bar",
- "supercalifradjulisticexpialidocious",
- }
-
- for _, measurement := range measurements {
- if !f.ShouldPass(measurement) {
- t.Errorf("Expected measurement %s to pass", measurement)
- }
- }
-}
-
-func TestFilter_Pass(t *testing.T) {
- f := Filter{
- Pass: []string{"foo*", "cpu_usage_idle"},
- }
-
- passes := []string{
- "foo",
- "foo_bar",
- "foo.bar",
- "foo-bar",
- "cpu_usage_idle",
- }
-
- drops := []string{
- "bar",
- "barfoo",
- "bar_foo",
- "cpu_usage_busy",
- }
-
- for _, measurement := range passes {
- if !f.ShouldPass(measurement) {
- t.Errorf("Expected measurement %s to pass", measurement)
- }
- }
-
- for _, measurement := range drops {
- if f.ShouldPass(measurement) {
- t.Errorf("Expected measurement %s to drop", measurement)
- }
- }
-}
-
-func TestFilter_Drop(t *testing.T) {
- f := Filter{
- Drop: []string{"foo*", "cpu_usage_idle"},
- }
-
- drops := []string{
- "foo",
- "foo_bar",
- "foo.bar",
- "foo-bar",
- "cpu_usage_idle",
- }
-
- passes := []string{
- "bar",
- "barfoo",
- "bar_foo",
- "cpu_usage_busy",
- }
-
- for _, measurement := range passes {
- if !f.ShouldPass(measurement) {
- t.Errorf("Expected measurement %s to pass", measurement)
- }
- }
-
- for _, measurement := range drops {
- if f.ShouldPass(measurement) {
- t.Errorf("Expected measurement %s to drop", measurement)
- }
- }
-}
-
-func TestFilter_TagPass(t *testing.T) {
- filters := []TagFilter{
- TagFilter{
- Name: "cpu",
- Filter: []string{"cpu-*"},
- },
- TagFilter{
- Name: "mem",
- Filter: []string{"mem_free"},
- }}
- f := Filter{
- TagPass: filters,
- }
-
- passes := []map[string]string{
- {"cpu": "cpu-total"},
- {"cpu": "cpu-0"},
- {"cpu": "cpu-1"},
- {"cpu": "cpu-2"},
- {"mem": "mem_free"},
- }
-
- drops := []map[string]string{
- {"cpu": "cputotal"},
- {"cpu": "cpu0"},
- {"cpu": "cpu1"},
- {"cpu": "cpu2"},
- {"mem": "mem_used"},
- }
-
- for _, tags := range passes {
- if !f.ShouldTagsPass(tags) {
- t.Errorf("Expected tags %v to pass", tags)
- }
- }
-
- for _, tags := range drops {
- if f.ShouldTagsPass(tags) {
- t.Errorf("Expected tags %v to drop", tags)
- }
- }
-}
-
-func TestFilter_TagDrop(t *testing.T) {
- filters := []TagFilter{
- TagFilter{
- Name: "cpu",
- Filter: []string{"cpu-*"},
- },
- TagFilter{
- Name: "mem",
- Filter: []string{"mem_free"},
- }}
- f := Filter{
- TagDrop: filters,
- }
-
- drops := []map[string]string{
- {"cpu": "cpu-total"},
- {"cpu": "cpu-0"},
- {"cpu": "cpu-1"},
- {"cpu": "cpu-2"},
- {"mem": "mem_free"},
- }
-
- passes := []map[string]string{
- {"cpu": "cputotal"},
- {"cpu": "cpu0"},
- {"cpu": "cpu1"},
- {"cpu": "cpu2"},
- {"mem": "mem_used"},
- }
-
- for _, tags := range passes {
- if !f.ShouldTagsPass(tags) {
- t.Errorf("Expected tags %v to pass", tags)
- }
- }
-
- for _, tags := range drops {
- if f.ShouldTagsPass(tags) {
- t.Errorf("Expected tags %v to drop", tags)
- }
- }
-}
diff --git a/internal/models/filter.go b/internal/models/filter.go
new file mode 100644
index 000000000..3f171ccac
--- /dev/null
+++ b/internal/models/filter.go
@@ -0,0 +1,92 @@
+package models
+
+import (
+ "strings"
+
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/internal"
+)
+
+// TagFilter is the name of a tag, and the values on which to filter
+type TagFilter struct {
+ Name string
+ Filter []string
+}
+
+// Filter containing drop/pass and tagdrop/tagpass rules
+type Filter struct {
+ Drop []string
+ Pass []string
+
+ TagDrop []TagFilter
+ TagPass []TagFilter
+
+ IsActive bool
+}
+
+func (f Filter) ShouldPointPass(point *client.Point) bool {
+ if f.ShouldPass(point.Name()) && f.ShouldTagsPass(point.Tags()) {
+ return true
+ }
+ return false
+}
+
+// ShouldPass returns true if the metric should pass, false if should drop
+// based on the drop/pass filter parameters
+func (f Filter) ShouldPass(key string) bool {
+ if f.Pass != nil {
+ for _, pat := range f.Pass {
+ // TODO remove HasPrefix check, leaving it for now for legacy support.
+ // Cam, 2015-12-07
+ if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
+ return true
+ }
+ }
+ return false
+ }
+
+ if f.Drop != nil {
+ for _, pat := range f.Drop {
+ // TODO remove HasPrefix check, leaving it for now for legacy support.
+ // Cam, 2015-12-07
+ if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
+ return false
+ }
+ }
+
+ return true
+ }
+ return true
+}
+
+// ShouldTagsPass returns true if the metric should pass, false if should drop
+// based on the tagdrop/tagpass filter parameters
+func (f Filter) ShouldTagsPass(tags map[string]string) bool {
+ if f.TagPass != nil {
+ for _, pat := range f.TagPass {
+ if tagval, ok := tags[pat.Name]; ok {
+ for _, filter := range pat.Filter {
+ if internal.Glob(filter, tagval) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+ }
+
+ if f.TagDrop != nil {
+ for _, pat := range f.TagDrop {
+ if tagval, ok := tags[pat.Name]; ok {
+ for _, filter := range pat.Filter {
+ if internal.Glob(filter, tagval) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+ }
+
+ return true
+}
diff --git a/internal/models/filter_test.go b/internal/models/filter_test.go
new file mode 100644
index 000000000..9e962e420
--- /dev/null
+++ b/internal/models/filter_test.go
@@ -0,0 +1,177 @@
+package models
+
+import (
+ "testing"
+)
+
+func TestFilter_Empty(t *testing.T) {
+ f := Filter{}
+
+ measurements := []string{
+ "foo",
+ "bar",
+ "barfoo",
+ "foo_bar",
+ "foo.bar",
+ "foo-bar",
+ "supercalifradjulisticexpialidocious",
+ }
+
+ for _, measurement := range measurements {
+ if !f.ShouldPass(measurement) {
+ t.Errorf("Expected measurement %s to pass", measurement)
+ }
+ }
+}
+
+func TestFilter_Pass(t *testing.T) {
+ f := Filter{
+ Pass: []string{"foo*", "cpu_usage_idle"},
+ }
+
+ passes := []string{
+ "foo",
+ "foo_bar",
+ "foo.bar",
+ "foo-bar",
+ "cpu_usage_idle",
+ }
+
+ drops := []string{
+ "bar",
+ "barfoo",
+ "bar_foo",
+ "cpu_usage_busy",
+ }
+
+ for _, measurement := range passes {
+ if !f.ShouldPass(measurement) {
+ t.Errorf("Expected measurement %s to pass", measurement)
+ }
+ }
+
+ for _, measurement := range drops {
+ if f.ShouldPass(measurement) {
+ t.Errorf("Expected measurement %s to drop", measurement)
+ }
+ }
+}
+
+func TestFilter_Drop(t *testing.T) {
+ f := Filter{
+ Drop: []string{"foo*", "cpu_usage_idle"},
+ }
+
+ drops := []string{
+ "foo",
+ "foo_bar",
+ "foo.bar",
+ "foo-bar",
+ "cpu_usage_idle",
+ }
+
+ passes := []string{
+ "bar",
+ "barfoo",
+ "bar_foo",
+ "cpu_usage_busy",
+ }
+
+ for _, measurement := range passes {
+ if !f.ShouldPass(measurement) {
+ t.Errorf("Expected measurement %s to pass", measurement)
+ }
+ }
+
+ for _, measurement := range drops {
+ if f.ShouldPass(measurement) {
+ t.Errorf("Expected measurement %s to drop", measurement)
+ }
+ }
+}
+
+func TestFilter_TagPass(t *testing.T) {
+ filters := []TagFilter{
+ TagFilter{
+ Name: "cpu",
+ Filter: []string{"cpu-*"},
+ },
+ TagFilter{
+ Name: "mem",
+ Filter: []string{"mem_free"},
+ }}
+ f := Filter{
+ TagPass: filters,
+ }
+
+ passes := []map[string]string{
+ {"cpu": "cpu-total"},
+ {"cpu": "cpu-0"},
+ {"cpu": "cpu-1"},
+ {"cpu": "cpu-2"},
+ {"mem": "mem_free"},
+ }
+
+ drops := []map[string]string{
+ {"cpu": "cputotal"},
+ {"cpu": "cpu0"},
+ {"cpu": "cpu1"},
+ {"cpu": "cpu2"},
+ {"mem": "mem_used"},
+ }
+
+ for _, tags := range passes {
+ if !f.ShouldTagsPass(tags) {
+ t.Errorf("Expected tags %v to pass", tags)
+ }
+ }
+
+ for _, tags := range drops {
+ if f.ShouldTagsPass(tags) {
+ t.Errorf("Expected tags %v to drop", tags)
+ }
+ }
+}
+
+func TestFilter_TagDrop(t *testing.T) {
+ filters := []TagFilter{
+ TagFilter{
+ Name: "cpu",
+ Filter: []string{"cpu-*"},
+ },
+ TagFilter{
+ Name: "mem",
+ Filter: []string{"mem_free"},
+ }}
+ f := Filter{
+ TagDrop: filters,
+ }
+
+ drops := []map[string]string{
+ {"cpu": "cpu-total"},
+ {"cpu": "cpu-0"},
+ {"cpu": "cpu-1"},
+ {"cpu": "cpu-2"},
+ {"mem": "mem_free"},
+ }
+
+ passes := []map[string]string{
+ {"cpu": "cputotal"},
+ {"cpu": "cpu0"},
+ {"cpu": "cpu1"},
+ {"cpu": "cpu2"},
+ {"mem": "mem_used"},
+ }
+
+ for _, tags := range passes {
+ if !f.ShouldTagsPass(tags) {
+ t.Errorf("Expected tags %v to pass", tags)
+ }
+ }
+
+ for _, tags := range drops {
+ if f.ShouldTagsPass(tags) {
+ t.Errorf("Expected tags %v to drop", tags)
+ }
+ }
+}
diff --git a/internal/models/running_input.go b/internal/models/running_input.go
new file mode 100644
index 000000000..17c0d2129
--- /dev/null
+++ b/internal/models/running_input.go
@@ -0,0 +1,24 @@
+package models
+
+import (
+ "time"
+
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+type RunningInput struct {
+ Name string
+ Input inputs.Input
+ Config *InputConfig
+}
+
+// InputConfig containing a name, interval, and filter
+type InputConfig struct {
+ Name string
+ NameOverride string
+ MeasurementPrefix string
+ MeasurementSuffix string
+ Tags map[string]string
+ Filter Filter
+ Interval time.Duration
+}
diff --git a/internal/models/running_output.go b/internal/models/running_output.go
new file mode 100644
index 000000000..196ebdc8a
--- /dev/null
+++ b/internal/models/running_output.go
@@ -0,0 +1,77 @@
+package models
+
+import (
+ "log"
+ "time"
+
+ "github.com/influxdata/telegraf/plugins/outputs"
+
+ "github.com/influxdata/influxdb/client/v2"
+)
+
+const DEFAULT_POINT_BUFFER_LIMIT = 10000
+
+type RunningOutput struct {
+ Name string
+ Output outputs.Output
+ Config *OutputConfig
+ Quiet bool
+ PointBufferLimit int
+
+ points []*client.Point
+ overwriteCounter int
+}
+
+func NewRunningOutput(
+ name string,
+ output outputs.Output,
+ conf *OutputConfig,
+) *RunningOutput {
+ ro := &RunningOutput{
+ Name: name,
+ points: make([]*client.Point, 0),
+ Output: output,
+ Config: conf,
+ PointBufferLimit: DEFAULT_POINT_BUFFER_LIMIT,
+ }
+ return ro
+}
+
+func (ro *RunningOutput) AddPoint(point *client.Point) {
+ if ro.Config.Filter.IsActive {
+ if !ro.Config.Filter.ShouldPointPass(point) {
+ return
+ }
+ }
+
+ if len(ro.points) < ro.PointBufferLimit {
+ ro.points = append(ro.points, point)
+ } else {
+ if ro.overwriteCounter == len(ro.points) {
+ ro.overwriteCounter = 0
+ }
+ ro.points[ro.overwriteCounter] = point
+ ro.overwriteCounter++
+ }
+}
+
+func (ro *RunningOutput) Write() error {
+ start := time.Now()
+ err := ro.Output.Write(ro.points)
+ elapsed := time.Since(start)
+ if err == nil {
+ if !ro.Quiet {
+ log.Printf("Wrote %d metrics to output %s in %s\n",
+ len(ro.points), ro.Name, elapsed)
+ }
+ ro.points = make([]*client.Point, 0)
+ ro.overwriteCounter = 0
+ }
+ return err
+}
+
+// OutputConfig containing name and filter
+type OutputConfig struct {
+ Name string
+ Filter Filter
+}
diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go
index 5f847ebfa..aa015a4c0 100644
--- a/plugins/inputs/aerospike/aerospike.go
+++ b/plugins/inputs/aerospike/aerospike.go
@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/binary"
"fmt"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"net"
"strconv"
"strings"
diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go
index 3f4d909a2..74b70eb1d 100644
--- a/plugins/inputs/aerospike/aerospike_test.go
+++ b/plugins/inputs/aerospike/aerospike_test.go
@@ -4,7 +4,7 @@ import (
"reflect"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go
index 03f0f8aef..a83d788c8 100644
--- a/plugins/inputs/all/all.go
+++ b/plugins/inputs/all/all.go
@@ -1,39 +1,43 @@
package all
import (
- _ "github.com/influxdb/telegraf/plugins/inputs/aerospike"
- _ "github.com/influxdb/telegraf/plugins/inputs/apache"
- _ "github.com/influxdb/telegraf/plugins/inputs/bcache"
- _ "github.com/influxdb/telegraf/plugins/inputs/disque"
- _ "github.com/influxdb/telegraf/plugins/inputs/elasticsearch"
- _ "github.com/influxdb/telegraf/plugins/inputs/exec"
- _ "github.com/influxdb/telegraf/plugins/inputs/haproxy"
- _ "github.com/influxdb/telegraf/plugins/inputs/httpjson"
- _ "github.com/influxdb/telegraf/plugins/inputs/influxdb"
- _ "github.com/influxdb/telegraf/plugins/inputs/jolokia"
- _ "github.com/influxdb/telegraf/plugins/inputs/kafka_consumer"
- _ "github.com/influxdb/telegraf/plugins/inputs/leofs"
- _ "github.com/influxdb/telegraf/plugins/inputs/lustre2"
- _ "github.com/influxdb/telegraf/plugins/inputs/mailchimp"
- _ "github.com/influxdb/telegraf/plugins/inputs/memcached"
- _ "github.com/influxdb/telegraf/plugins/inputs/mongodb"
- _ "github.com/influxdb/telegraf/plugins/inputs/mysql"
- _ "github.com/influxdb/telegraf/plugins/inputs/nginx"
- _ "github.com/influxdb/telegraf/plugins/inputs/phpfpm"
- _ "github.com/influxdb/telegraf/plugins/inputs/ping"
- _ "github.com/influxdb/telegraf/plugins/inputs/postgresql"
- _ "github.com/influxdb/telegraf/plugins/inputs/procstat"
- _ "github.com/influxdb/telegraf/plugins/inputs/prometheus"
- _ "github.com/influxdb/telegraf/plugins/inputs/puppetagent"
- _ "github.com/influxdb/telegraf/plugins/inputs/rabbitmq"
- _ "github.com/influxdb/telegraf/plugins/inputs/redis"
- _ "github.com/influxdb/telegraf/plugins/inputs/rethinkdb"
- _ "github.com/influxdb/telegraf/plugins/inputs/sensors"
- _ "github.com/influxdb/telegraf/plugins/inputs/sqlserver"
- _ "github.com/influxdb/telegraf/plugins/inputs/statsd"
- _ "github.com/influxdb/telegraf/plugins/inputs/system"
- _ "github.com/influxdb/telegraf/plugins/inputs/trig"
- _ "github.com/influxdb/telegraf/plugins/inputs/twemproxy"
- _ "github.com/influxdb/telegraf/plugins/inputs/zfs"
- _ "github.com/influxdb/telegraf/plugins/inputs/zookeeper"
+ _ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
+ _ "github.com/influxdata/telegraf/plugins/inputs/apache"
+ _ "github.com/influxdata/telegraf/plugins/inputs/bcache"
+ _ "github.com/influxdata/telegraf/plugins/inputs/disque"
+ _ "github.com/influxdata/telegraf/plugins/inputs/docker"
+ _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
+ _ "github.com/influxdata/telegraf/plugins/inputs/exec"
+ _ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
+ _ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
+ _ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
+ _ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
+ _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
+ _ "github.com/influxdata/telegraf/plugins/inputs/leofs"
+ _ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
+ _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
+ _ "github.com/influxdata/telegraf/plugins/inputs/memcached"
+ _ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
+ _ "github.com/influxdata/telegraf/plugins/inputs/mysql"
+ _ "github.com/influxdata/telegraf/plugins/inputs/nginx"
+ _ "github.com/influxdata/telegraf/plugins/inputs/nsq"
+ _ "github.com/influxdata/telegraf/plugins/inputs/passenger"
+ _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
+ _ "github.com/influxdata/telegraf/plugins/inputs/ping"
+ _ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
+ _ "github.com/influxdata/telegraf/plugins/inputs/procstat"
+ _ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
+ _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
+ _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
+ _ "github.com/influxdata/telegraf/plugins/inputs/redis"
+ _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
+ _ "github.com/influxdata/telegraf/plugins/inputs/sensors"
+ _ "github.com/influxdata/telegraf/plugins/inputs/snmp"
+ _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
+ _ "github.com/influxdata/telegraf/plugins/inputs/statsd"
+ _ "github.com/influxdata/telegraf/plugins/inputs/system"
+ _ "github.com/influxdata/telegraf/plugins/inputs/trig"
+ _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
+ _ "github.com/influxdata/telegraf/plugins/inputs/zfs"
+ _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
)
diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go
index f48bac336..317a635d3 100644
--- a/plugins/inputs/apache/apache.go
+++ b/plugins/inputs/apache/apache.go
@@ -11,7 +11,7 @@ import (
"sync"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Apache struct {
diff --git a/plugins/inputs/apache/apache_test.go b/plugins/inputs/apache/apache_test.go
index 16c319974..8eed61ca6 100644
--- a/plugins/inputs/apache/apache_test.go
+++ b/plugins/inputs/apache/apache_test.go
@@ -6,7 +6,7 @@ import (
"net/http/httptest"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go
index 146849eef..b6d6eb130 100644
--- a/plugins/inputs/bcache/bcache.go
+++ b/plugins/inputs/bcache/bcache.go
@@ -8,7 +8,7 @@ import (
"strconv"
"strings"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Bcache struct {
diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go
index 0f34d016b..bd191528f 100644
--- a/plugins/inputs/bcache/bcache_test.go
+++ b/plugins/inputs/bcache/bcache_test.go
@@ -5,7 +5,7 @@ import (
"os"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go
index 334fdd554..364e78fbc 100644
--- a/plugins/inputs/disque/disque.go
+++ b/plugins/inputs/disque/disque.go
@@ -10,7 +10,7 @@ import (
"strings"
"sync"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Disque struct {
diff --git a/plugins/inputs/disque/disque_test.go b/plugins/inputs/disque/disque_test.go
index 91c7dc979..f060e9568 100644
--- a/plugins/inputs/disque/disque_test.go
+++ b/plugins/inputs/disque/disque_test.go
@@ -6,7 +6,7 @@ import (
"net"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md
new file mode 100644
index 000000000..fa662ca80
--- /dev/null
+++ b/plugins/inputs/docker/README.md
@@ -0,0 +1,148 @@
+# Docker Input Plugin
+
+The docker plugin uses the docker remote API to gather metrics on running
+docker containers. You can read Docker's documentation for their remote API
+[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
+
+The docker plugin uses the excellent
+[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to
+gather stats. Documentation for the library can be found
+[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation
+for the stat structure can be found
+[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats)
+
+### Configuration:
+
+```
+# Read metrics about docker containers
+[[inputs.docker]]
+ # Docker Endpoint
+ # To use TCP, set endpoint = "tcp://[ip]:[port]"
+ # To use environment variables (ie, docker-machine), set endpoint = "ENV"
+ endpoint = "unix:///var/run/docker.sock"
+ # Only collect metrics for these containers, collect all if empty
+ container_names = []
+```
+
+### Measurements & Fields:
+
+Every effort was made to preserve the names based on the JSON response from the
+docker API.
+
+Note that the docker_cpu metric may appear multiple times per collection, based
+on the availability of per-cpu stats on your system.
+
+- docker_mem
+ - total_pgmafault
+ - cache
+ - mapped_file
+ - total_inactive_file
+ - pgpgout
+ - rss
+ - total_mapped_file
+ - writeback
+ - unevictable
+ - pgpgin
+ - total_unevictable
+ - pgmajfault
+ - total_rss
+ - total_rss_huge
+ - total_writeback
+ - total_inactive_anon
+ - rss_huge
+ - hierarchical_memory_limit
+ - total_pgfault
+ - total_active_file
+ - active_anon
+ - total_active_anon
+ - total_pgpgout
+ - total_cache
+ - inactive_anon
+ - active_file
+ - pgfault
+ - inactive_file
+ - total_pgpgin
+ - max_usage
+ - usage
+ - failcnt
+ - limit
+- docker_cpu
+ - throttling_periods
+ - throttling_throttled_periods
+ - throttling_throttled_time
+ - usage_in_kernelmode
+ - usage_in_usermode
+ - usage_system
+ - usage_total
+- docker_net
+ - rx_dropped
+ - rx_bytes
+ - rx_errors
+ - tx_packets
+ - tx_dropped
+ - rx_packets
+ - tx_errors
+ - tx_bytes
+- docker_blkio
+ - io_service_bytes_recursive_async
+ - io_service_bytes_recursive_read
+ - io_service_bytes_recursive_sync
+ - io_service_bytes_recursive_total
+ - io_service_bytes_recursive_write
+ - io_serviced_recursive_async
+ - io_serviced_recursive_read
+ - io_serviced_recursive_sync
+ - io_serviced_recursive_total
+ - io_serviced_recursive_write
+
+### Tags:
+
+- All stats have the following tags:
+ - cont_id (container ID)
+ - cont_image (container image)
+ - cont_name (container name)
+- docker_cpu specific:
+ - cpu
+- docker_net specific:
+ - network
+- docker_blkio specific:
+ - device
+
+### Example Output:
+
+```
+% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
+* Plugin: docker, Collection 1
+> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
+cont_image=spotify/kafka,cont_name=kafka \
+active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
+hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
+inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
+max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
+pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
+total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
+total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
+total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
+total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
+total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
+> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
+cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \
+throttling_periods=0i,throttling_throttled_periods=0i,\
+throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
+usage_in_usermode=2290000000i,usage_system=84795360000000i,\
+usage_total=6628208865i 1453409536840126713
+> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
+cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \
+usage_total=6628208865i 1453409536840126713
+> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
+cont_image=spotify/kafka,cont_name=kafka,network=eth0 \
+rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
+tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
+> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
+cont_image=spotify/kafka,cont_name=kafka,device=8:0 \
+io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
+io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
+io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
+io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
+io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
+```
diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go
new file mode 100644
index 000000000..0e96dd176
--- /dev/null
+++ b/plugins/inputs/docker/docker.go
@@ -0,0 +1,309 @@
+package system
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf/plugins/inputs"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+type Docker struct {
+ Endpoint string
+ ContainerNames []string
+
+ client *docker.Client
+}
+
+var sampleConfig = `
+ # Docker Endpoint
+ # To use TCP, set endpoint = "tcp://[ip]:[port]"
+ # To use environment variables (ie, docker-machine), set endpoint = "ENV"
+ endpoint = "unix:///var/run/docker.sock"
+ # Only collect metrics for these containers, collect all if empty
+ container_names = []
+`
+
+func (d *Docker) Description() string {
+ return "Read metrics about docker containers"
+}
+
+func (d *Docker) SampleConfig() string { return sampleConfig }
+
+func (d *Docker) Gather(acc inputs.Accumulator) error {
+ if d.client == nil {
+ var c *docker.Client
+ var err error
+ if d.Endpoint == "ENV" {
+ c, err = docker.NewClientFromEnv()
+ if err != nil {
+ return err
+ }
+ } else if d.Endpoint == "" {
+ c, err = docker.NewClient("unix:///var/run/docker.sock")
+ if err != nil {
+ return err
+ }
+ } else {
+ c, err = docker.NewClient(d.Endpoint)
+ if err != nil {
+ return err
+ }
+ }
+ d.client = c
+ }
+
+ opts := docker.ListContainersOptions{}
+ containers, err := d.client.ListContainers(opts)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(len(containers))
+ for _, container := range containers {
+ go func(c docker.APIContainers) {
+ defer wg.Done()
+ err := d.gatherContainer(c, acc)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ }(container)
+ }
+ wg.Wait()
+
+ return nil
+}
+
+func (d *Docker) gatherContainer(
+ container docker.APIContainers,
+ acc inputs.Accumulator,
+) error {
+ // Parse container name
+ cname := "unknown"
+ if len(container.Names) > 0 {
+ // Not sure what to do with other names, just take the first.
+ cname = strings.TrimPrefix(container.Names[0], "/")
+ }
+
+ tags := map[string]string{
+ "cont_id": container.ID,
+ "cont_name": cname,
+ "cont_image": container.Image,
+ }
+ if len(d.ContainerNames) > 0 {
+ if !sliceContains(cname, d.ContainerNames) {
+ return nil
+ }
+ }
+
+ statChan := make(chan *docker.Stats)
+ done := make(chan bool)
+ statOpts := docker.StatsOptions{
+ Stream: false,
+ ID: container.ID,
+ Stats: statChan,
+ Done: done,
+ Timeout: time.Duration(time.Second * 5),
+ }
+
+ go func() {
+ d.client.Stats(statOpts)
+ }()
+
+ stat := <-statChan
+ close(done)
+
+ // Add labels to tags
+ for k, v := range container.Labels {
+ tags[k] = v
+ }
+
+ gatherContainerStats(stat, acc, tags)
+
+ return nil
+}
+
+func gatherContainerStats(
+ stat *docker.Stats,
+ acc inputs.Accumulator,
+ tags map[string]string,
+) {
+ now := stat.Read
+
+ memfields := map[string]interface{}{
+ "max_usage": stat.MemoryStats.MaxUsage,
+ "usage": stat.MemoryStats.Usage,
+ "fail_count": stat.MemoryStats.Failcnt,
+ "limit": stat.MemoryStats.Limit,
+ "total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault,
+ "cache": stat.MemoryStats.Stats.Cache,
+ "mapped_file": stat.MemoryStats.Stats.MappedFile,
+ "total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile,
+ "pgpgout": stat.MemoryStats.Stats.Pgpgout,
+ "rss": stat.MemoryStats.Stats.Rss,
+ "total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile,
+ "writeback": stat.MemoryStats.Stats.Writeback,
+ "unevictable": stat.MemoryStats.Stats.Unevictable,
+ "pgpgin": stat.MemoryStats.Stats.Pgpgin,
+ "total_unevictable": stat.MemoryStats.Stats.TotalUnevictable,
+ "pgmajfault": stat.MemoryStats.Stats.Pgmajfault,
+ "total_rss": stat.MemoryStats.Stats.TotalRss,
+ "total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge,
+ "total_writeback": stat.MemoryStats.Stats.TotalWriteback,
+ "total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon,
+ "rss_huge": stat.MemoryStats.Stats.RssHuge,
+ "hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit,
+ "total_pgfault": stat.MemoryStats.Stats.TotalPgfault,
+ "total_active_file": stat.MemoryStats.Stats.TotalActiveFile,
+ "active_anon": stat.MemoryStats.Stats.ActiveAnon,
+ "total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon,
+ "total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout,
+ "total_cache": stat.MemoryStats.Stats.TotalCache,
+ "inactive_anon": stat.MemoryStats.Stats.InactiveAnon,
+ "active_file": stat.MemoryStats.Stats.ActiveFile,
+ "pgfault": stat.MemoryStats.Stats.Pgfault,
+ "inactive_file": stat.MemoryStats.Stats.InactiveFile,
+ "total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin,
+ }
+ acc.AddFields("docker_mem", memfields, tags, now)
+
+ cpufields := map[string]interface{}{
+ "usage_total": stat.CPUStats.CPUUsage.TotalUsage,
+ "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
+ "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
+ "usage_system": stat.CPUStats.SystemCPUUsage,
+ "throttling_periods": stat.CPUStats.ThrottlingData.Periods,
+ "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
+ "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
+ }
+ cputags := copyTags(tags)
+ cputags["cpu"] = "cpu-total"
+ acc.AddFields("docker_cpu", cpufields, cputags, now)
+
+ for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
+ percputags := copyTags(tags)
+ percputags["cpu"] = fmt.Sprintf("cpu%d", i)
+ acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
+ }
+
+ for network, netstats := range stat.Networks {
+ netfields := map[string]interface{}{
+ "rx_dropped": netstats.RxDropped,
+ "rx_bytes": netstats.RxBytes,
+ "rx_errors": netstats.RxErrors,
+ "tx_packets": netstats.TxPackets,
+ "tx_dropped": netstats.TxDropped,
+ "rx_packets": netstats.RxPackets,
+ "tx_errors": netstats.TxErrors,
+ "tx_bytes": netstats.TxBytes,
+ }
+ // Create a new network tag dictionary for the "network" tag
+ nettags := copyTags(tags)
+ nettags["network"] = network
+ acc.AddFields("docker_net", netfields, nettags, now)
+ }
+
+ gatherBlockIOMetrics(stat, acc, tags, now)
+}
+
+func gatherBlockIOMetrics(
+ stat *docker.Stats,
+ acc inputs.Accumulator,
+ tags map[string]string,
+ now time.Time,
+) {
+ blkioStats := stat.BlkioStats
+ // Make a map of devices to their block io stats
+ deviceStatMap := make(map[string]map[string]interface{})
+
+ for _, metric := range blkioStats.IOServiceBytesRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ _, ok := deviceStatMap[device]
+ if !ok {
+ deviceStatMap[device] = make(map[string]interface{})
+ }
+
+ field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IOServicedRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ _, ok := deviceStatMap[device]
+ if !ok {
+ deviceStatMap[device] = make(map[string]interface{})
+ }
+
+ field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IOQueueRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IOServiceTimeRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IOWaitTimeRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IOMergedRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.IOTimeRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for _, metric := range blkioStats.SectorsRecursive {
+ device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
+ field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op))
+ deviceStatMap[device][field] = metric.Value
+ }
+
+ for device, fields := range deviceStatMap {
+ iotags := copyTags(tags)
+ iotags["device"] = device
+ acc.AddFields("docker_blkio", fields, iotags, now)
+ }
+}
+
+func copyTags(in map[string]string) map[string]string {
+ out := make(map[string]string)
+ for k, v := range in {
+ out[k] = v
+ }
+ return out
+}
+
+func sliceContains(in string, sl []string) bool {
+ for _, str := range sl {
+ if str == in {
+ return true
+ }
+ }
+ return false
+}
+
+func init() {
+ inputs.Add("docker", func() inputs.Input {
+ return &Docker{}
+ })
+}
diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go
new file mode 100644
index 000000000..9b85d1029
--- /dev/null
+++ b/plugins/inputs/docker/docker_test.go
@@ -0,0 +1,190 @@
+package system
+
+import (
+ "testing"
+ "time"
+
+ "github.com/influxdata/telegraf/testutil"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func TestDockerGatherContainerStats(t *testing.T) {
+ var acc testutil.Accumulator
+ stats := testStats()
+
+ tags := map[string]string{
+ "cont_id": "foobarbaz",
+ "cont_name": "redis",
+ "cont_image": "redis/image",
+ }
+ gatherContainerStats(stats, &acc, tags)
+
+ // test docker_net measurement
+ netfields := map[string]interface{}{
+ "rx_dropped": uint64(1),
+ "rx_bytes": uint64(2),
+ "rx_errors": uint64(3),
+ "tx_packets": uint64(4),
+ "tx_dropped": uint64(1),
+ "rx_packets": uint64(2),
+ "tx_errors": uint64(3),
+ "tx_bytes": uint64(4),
+ }
+ nettags := copyTags(tags)
+ nettags["network"] = "eth0"
+ acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags)
+
+ // test docker_blkio measurement
+ blkiotags := copyTags(tags)
+ blkiotags["device"] = "6:0"
+ blkiofields := map[string]interface{}{
+ "io_service_bytes_recursive_read": uint64(100),
+ "io_serviced_recursive_write": uint64(101),
+ }
+ acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags)
+
+ // test docker_mem measurement
+ memfields := map[string]interface{}{
+ "max_usage": uint64(1001),
+ "usage": uint64(1111),
+ "fail_count": uint64(1),
+ "limit": uint64(20),
+ "total_pgmafault": uint64(0),
+ "cache": uint64(0),
+ "mapped_file": uint64(0),
+ "total_inactive_file": uint64(0),
+ "pgpgout": uint64(0),
+ "rss": uint64(0),
+ "total_mapped_file": uint64(0),
+ "writeback": uint64(0),
+ "unevictable": uint64(0),
+ "pgpgin": uint64(0),
+ "total_unevictable": uint64(0),
+ "pgmajfault": uint64(0),
+ "total_rss": uint64(44),
+ "total_rss_huge": uint64(444),
+ "total_writeback": uint64(55),
+ "total_inactive_anon": uint64(0),
+ "rss_huge": uint64(0),
+ "hierarchical_memory_limit": uint64(0),
+ "total_pgfault": uint64(0),
+ "total_active_file": uint64(0),
+ "active_anon": uint64(0),
+ "total_active_anon": uint64(0),
+ "total_pgpgout": uint64(0),
+ "total_cache": uint64(0),
+ "inactive_anon": uint64(0),
+ "active_file": uint64(1),
+ "pgfault": uint64(2),
+ "inactive_file": uint64(3),
+ "total_pgpgin": uint64(4),
+ }
+ acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
+
+ // test docker_cpu measurement
+ cputags := copyTags(tags)
+ cputags["cpu"] = "cpu-total"
+ cpufields := map[string]interface{}{
+ "usage_total": uint64(500),
+ "usage_in_usermode": uint64(100),
+ "usage_in_kernelmode": uint64(200),
+ "usage_system": uint64(100),
+ "throttling_periods": uint64(1),
+ "throttling_throttled_periods": uint64(0),
+ "throttling_throttled_time": uint64(0),
+ }
+ acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
+
+ cputags["cpu"] = "cpu0"
+ cpu0fields := map[string]interface{}{
+ "usage_total": uint64(1),
+ }
+ acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags)
+
+ cputags["cpu"] = "cpu1"
+ cpu1fields := map[string]interface{}{
+ "usage_total": uint64(1002),
+ }
+ acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
+}
+
+func testStats() *docker.Stats {
+ stats := &docker.Stats{
+ Read: time.Now(),
+ Networks: make(map[string]docker.NetworkStats),
+ }
+
+ stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
+ stats.CPUStats.CPUUsage.UsageInUsermode = 100
+ stats.CPUStats.CPUUsage.TotalUsage = 500
+ stats.CPUStats.CPUUsage.UsageInKernelmode = 200
+ stats.CPUStats.SystemCPUUsage = 100
+ stats.CPUStats.ThrottlingData.Periods = 1
+
+ stats.MemoryStats.Stats.TotalPgmafault = 0
+ stats.MemoryStats.Stats.Cache = 0
+ stats.MemoryStats.Stats.MappedFile = 0
+ stats.MemoryStats.Stats.TotalInactiveFile = 0
+ stats.MemoryStats.Stats.Pgpgout = 0
+ stats.MemoryStats.Stats.Rss = 0
+ stats.MemoryStats.Stats.TotalMappedFile = 0
+ stats.MemoryStats.Stats.Writeback = 0
+ stats.MemoryStats.Stats.Unevictable = 0
+ stats.MemoryStats.Stats.Pgpgin = 0
+ stats.MemoryStats.Stats.TotalUnevictable = 0
+ stats.MemoryStats.Stats.Pgmajfault = 0
+ stats.MemoryStats.Stats.TotalRss = 44
+ stats.MemoryStats.Stats.TotalRssHuge = 444
+ stats.MemoryStats.Stats.TotalWriteback = 55
+ stats.MemoryStats.Stats.TotalInactiveAnon = 0
+ stats.MemoryStats.Stats.RssHuge = 0
+ stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0
+ stats.MemoryStats.Stats.TotalPgfault = 0
+ stats.MemoryStats.Stats.TotalActiveFile = 0
+ stats.MemoryStats.Stats.ActiveAnon = 0
+ stats.MemoryStats.Stats.TotalActiveAnon = 0
+ stats.MemoryStats.Stats.TotalPgpgout = 0
+ stats.MemoryStats.Stats.TotalCache = 0
+ stats.MemoryStats.Stats.InactiveAnon = 0
+ stats.MemoryStats.Stats.ActiveFile = 1
+ stats.MemoryStats.Stats.Pgfault = 2
+ stats.MemoryStats.Stats.InactiveFile = 3
+ stats.MemoryStats.Stats.TotalPgpgin = 4
+
+ stats.MemoryStats.MaxUsage = 1001
+ stats.MemoryStats.Usage = 1111
+ stats.MemoryStats.Failcnt = 1
+ stats.MemoryStats.Limit = 20
+
+ stats.Networks["eth0"] = docker.NetworkStats{
+ RxDropped: 1,
+ RxBytes: 2,
+ RxErrors: 3,
+ TxPackets: 4,
+ TxDropped: 1,
+ RxPackets: 2,
+ TxErrors: 3,
+ TxBytes: 4,
+ }
+
+ sbr := docker.BlkioStatsEntry{
+ Major: 6,
+ Minor: 0,
+ Op: "read",
+ Value: 100,
+ }
+ sr := docker.BlkioStatsEntry{
+ Major: 6,
+ Minor: 0,
+ Op: "write",
+ Value: 101,
+ }
+
+ stats.BlkioStats.IOServiceBytesRecursive = append(
+ stats.BlkioStats.IOServiceBytesRecursive, sbr)
+ stats.BlkioStats.IOServicedRecursive = append(
+ stats.BlkioStats.IOServicedRecursive, sr)
+
+ return stats
+}
diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go
index f8185a053..304e0e3d7 100644
--- a/plugins/inputs/elasticsearch/elasticsearch.go
+++ b/plugins/inputs/elasticsearch/elasticsearch.go
@@ -2,12 +2,15 @@ package elasticsearch
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
+ "strings"
+ "sync"
"time"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
const statsPath = "/_nodes/stats"
@@ -93,21 +96,41 @@ func (e *Elasticsearch) Description() string {
// Gather reads the stats from Elasticsearch and writes it to the
// Accumulator.
func (e *Elasticsearch) Gather(acc inputs.Accumulator) error {
+ errChan := make(chan error, len(e.Servers))
+ var wg sync.WaitGroup
+ wg.Add(len(e.Servers))
+
for _, serv := range e.Servers {
- var url string
- if e.Local {
- url = serv + statsPathLocal
- } else {
- url = serv + statsPath
- }
- if err := e.gatherNodeStats(url, acc); err != nil {
- return err
- }
- if e.ClusterHealth {
- e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", serv), acc)
- }
+ go func(s string, acc inputs.Accumulator) {
+ defer wg.Done()
+ var url string
+ if e.Local {
+ url = s + statsPathLocal
+ } else {
+ url = s + statsPath
+ }
+ if err := e.gatherNodeStats(url, acc); err != nil {
+ errChan <- err
+ return
+ }
+ if e.ClusterHealth {
+ e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
+ }
+ }(serv, acc)
}
- return nil
+
+ wg.Wait()
+ close(errChan)
+ // Get all errors and return them as one giant error
+ errStrings := []string{}
+ for err := range errChan {
+ errStrings = append(errStrings, err.Error())
+ }
+
+ if len(errStrings) == 0 {
+ return nil
+ }
+ return errors.New(strings.Join(errStrings, "\n"))
}
func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error {
diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go
index 62c3cb8fd..f94d3f9ac 100644
--- a/plugins/inputs/elasticsearch/elasticsearch_test.go
+++ b/plugins/inputs/elasticsearch/elasticsearch_test.go
@@ -6,7 +6,7 @@ import (
"strings"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md
index 29203f9a9..bd78f0b3c 100644
--- a/plugins/inputs/exec/README.md
+++ b/plugins/inputs/exec/README.md
@@ -10,7 +10,7 @@ setup the exec plugin with:
[[inputs.exec]]
command = "/usr/bin/mycollector --output=json"
name_suffix = "_mycollector"
- interval = 10
+ interval = "10s"
```
The name suffix is appended to exec as "exec_name_suffix" to identify the input stream.
@@ -27,14 +27,19 @@ Let's say that we have a command with the name_suffix "_mycollector", which give
{
"a": 0.5,
"b": {
- "c": "some text",
- "d": 0.1,
- "e": 5
+ "c": 0.1,
+ "d": 5
}
}
```
The collected metrics will be stored as field values under the same measurement "exec_mycollector":
```
- exec_mycollector a=0.5,b_c="some text",b_d=0.1,b_e=5 1452815002357578567
+ exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567
+```
+
+Other options for modifying the measurement names are:
+```
+name_override = "newname"
+name_prefix = "prefix_"
```
diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go
index b3c1001f8..603ba1464 100644
--- a/plugins/inputs/exec/exec.go
+++ b/plugins/inputs/exec/exec.go
@@ -8,8 +8,8 @@ import (
"github.com/gonuts/go-shellquote"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
const sampleConfig = `
diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go
index 64fd69fce..8bf47c1d0 100644
--- a/plugins/inputs/exec/exec_test.go
+++ b/plugins/inputs/exec/exec_test.go
@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go
index 23b92fc26..c2e334424 100644
--- a/plugins/inputs/haproxy/haproxy.go
+++ b/plugins/inputs/haproxy/haproxy.go
@@ -3,7 +3,7 @@ package haproxy
import (
"encoding/csv"
"fmt"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"io"
"net/http"
"net/url"
diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go
index e514bc7ad..7b86f2b50 100644
--- a/plugins/inputs/haproxy/haproxy_test.go
+++ b/plugins/inputs/haproxy/haproxy_test.go
@@ -5,7 +5,7 @@ import (
"strings"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"net/http"
diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go
index 5763fd6fa..b90a02e5b 100644
--- a/plugins/inputs/httpjson/httpjson.go
+++ b/plugins/inputs/httpjson/httpjson.go
@@ -11,8 +11,8 @@ import (
"sync"
"time"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type HttpJson struct {
diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go
index 3f14290ff..0ea5e9e42 100644
--- a/plugins/inputs/httpjson/httpjson_test.go
+++ b/plugins/inputs/httpjson/httpjson_test.go
@@ -6,7 +6,7 @@ import (
"strings"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go
index cf5742e1d..311f6ba0c 100644
--- a/plugins/inputs/influxdb/influxdb.go
+++ b/plugins/inputs/influxdb/influxdb.go
@@ -8,7 +8,7 @@ import (
"strings"
"sync"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type InfluxDB struct {
@@ -130,7 +130,7 @@ func (i *InfluxDB) gatherURL(
p.Tags["url"] = url
acc.AddFields(
- p.Name,
+ "influxdb_"+p.Name,
p.Values,
p.Tags,
)
diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go
index 0e02cc6bd..ef6c1a97a 100644
--- a/plugins/inputs/influxdb/influxdb_test.go
+++ b/plugins/inputs/influxdb/influxdb_test.go
@@ -5,8 +5,8 @@ import (
"net/http/httptest"
"testing"
- "github.com/influxdb/telegraf/plugins/inputs/influxdb"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
@@ -84,7 +84,7 @@ func TestBasic(t *testing.T) {
"id": "ex1",
"url": fakeServer.URL + "/endpoint",
}
- acc.AssertContainsTaggedFields(t, "foo", fields, tags)
+ acc.AssertContainsTaggedFields(t, "influxdb_foo", fields, tags)
fields = map[string]interface{}{
"x": "x",
@@ -93,5 +93,5 @@ func TestBasic(t *testing.T) {
"id": "ex2",
"url": fakeServer.URL + "/endpoint",
}
- acc.AssertContainsTaggedFields(t, "bar", fields, tags)
+ acc.AssertContainsTaggedFields(t, "influxdb_bar", fields, tags)
}
diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go
index 36811bd27..7579ecb4a 100644
--- a/plugins/inputs/jolokia/jolokia.go
+++ b/plugins/inputs/jolokia/jolokia.go
@@ -8,7 +8,7 @@ import (
"net/http"
"net/url"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Server struct {
diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go
index d29b8a810..63b47ebff 100644
--- a/plugins/inputs/jolokia/jolokia_test.go
+++ b/plugins/inputs/jolokia/jolokia_test.go
@@ -7,7 +7,7 @@ import (
"strings"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
_ "github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go
index f3558e2e5..a0f1d3d11 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer.go
@@ -5,8 +5,8 @@ import (
"strings"
"sync"
- "github.com/influxdb/influxdb/models"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/telegraf/plugins/inputs"
"github.com/Shopify/sarama"
"github.com/wvanbergen/kafka/consumergroup"
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go
index 9f554d9ab..0611467ff 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/Shopify/sarama"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
index dcd38f6c4..560e130c0 100644
--- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go
+++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go
@@ -4,8 +4,8 @@ import (
"testing"
"time"
- "github.com/influxdb/influxdb/models"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/telegraf/testutil"
"github.com/Shopify/sarama"
"github.com/stretchr/testify/assert"
diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go
index c65db5f37..f4dd314b7 100644
--- a/plugins/inputs/leofs/leofs.go
+++ b/plugins/inputs/leofs/leofs.go
@@ -3,7 +3,7 @@ package leofs
import (
"bufio"
"fmt"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"net/url"
"os/exec"
"strconv"
diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go
index 48a82a18a..292cd15d0 100644
--- a/plugins/inputs/leofs/leofs_test.go
+++ b/plugins/inputs/leofs/leofs_test.go
@@ -1,7 +1,7 @@
package leofs
import (
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"io/ioutil"
diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go
index 90222af79..d6266de73 100644
--- a/plugins/inputs/lustre2/lustre2.go
+++ b/plugins/inputs/lustre2/lustre2.go
@@ -13,8 +13,8 @@ import (
"strconv"
"strings"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
// Lustre proc files can change between versions, so we want to future-proof
diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go
index cea98fa1e..9e560df2c 100644
--- a/plugins/inputs/lustre2/lustre2_test.go
+++ b/plugins/inputs/lustre2/lustre2_test.go
@@ -5,7 +5,7 @@ import (
"os"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go
index 4b148a95c..284ac61e1 100644
--- a/plugins/inputs/mailchimp/mailchimp.go
+++ b/plugins/inputs/mailchimp/mailchimp.go
@@ -4,7 +4,7 @@ import (
"fmt"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type MailChimp struct {
diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go
index 5e5394581..0c4dab56d 100644
--- a/plugins/inputs/mailchimp/mailchimp_test.go
+++ b/plugins/inputs/mailchimp/mailchimp_test.go
@@ -7,7 +7,7 @@ import (
"net/url"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go
index 1d9ee9547..078f05aa3 100644
--- a/plugins/inputs/memcached/memcached.go
+++ b/plugins/inputs/memcached/memcached.go
@@ -8,7 +8,7 @@ import (
"strconv"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
// Memcached is a memcached plugin
diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go
index 6e2f8452a..210adffdb 100644
--- a/plugins/inputs/memcached/memcached_test.go
+++ b/plugins/inputs/memcached/memcached_test.go
@@ -5,7 +5,7 @@ import (
"strings"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go
index 4cb3ffee5..ce73c3a14 100644
--- a/plugins/inputs/mongodb/mongodb.go
+++ b/plugins/inputs/mongodb/mongodb.go
@@ -9,7 +9,7 @@ import (
"sync"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"gopkg.in/mgo.v2"
)
diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go
index 15f8c479b..c0c68c330 100644
--- a/plugins/inputs/mongodb/mongodb_data.go
+++ b/plugins/inputs/mongodb/mongodb_data.go
@@ -5,7 +5,7 @@ import (
"reflect"
"strconv"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type MongodbData struct {
diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go
index 5d24a7a09..3166ab018 100644
--- a/plugins/inputs/mongodb/mongodb_data_test.go
+++ b/plugins/inputs/mongodb/mongodb_data_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go
index 795cf97d7..87552f906 100644
--- a/plugins/inputs/mongodb/mongodb_server.go
+++ b/plugins/inputs/mongodb/mongodb_server.go
@@ -4,7 +4,7 @@ import (
"net/url"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go
index ec536bbef..52869724c 100644
--- a/plugins/inputs/mongodb/mongodb_server_test.go
+++ b/plugins/inputs/mongodb/mongodb_server_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go
index f9126b5ea..7434a282a 100644
--- a/plugins/inputs/mysql/mysql.go
+++ b/plugins/inputs/mysql/mysql.go
@@ -6,7 +6,7 @@ import (
"strings"
_ "github.com/go-sql-driver/mysql"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Mysql struct {
diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go
index 2362002bc..855e8ba52 100644
--- a/plugins/inputs/mysql/mysql_test.go
+++ b/plugins/inputs/mysql/mysql_test.go
@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go
index 18e3244f7..6ea665b7e 100644
--- a/plugins/inputs/nginx/nginx.go
+++ b/plugins/inputs/nginx/nginx.go
@@ -11,7 +11,7 @@ import (
"sync"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Nginx struct {
diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go
index 9d694bc26..895e3e583 100644
--- a/plugins/inputs/nginx/nginx_test.go
+++ b/plugins/inputs/nginx/nginx_test.go
@@ -8,7 +8,7 @@ import (
"net/url"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go
new file mode 100644
index 000000000..9b680a0db
--- /dev/null
+++ b/plugins/inputs/nsq/nsq.go
@@ -0,0 +1,271 @@
+// The MIT License (MIT)
+//
+// Copyright (c) 2015 Jeff Nickoloff (jeff@allingeek.com)
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package nsq
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/influxdata/telegraf/plugins/inputs"
+)
+
+// Might add Lookupd endpoints for cluster discovery
+type NSQ struct {
+ Endpoints []string
+}
+
+var sampleConfig = `
+ # An array of NSQD HTTP API endpoints
+ endpoints = ["http://localhost:4151"]
+`
+
+const (
+ requestPattern = `%s/stats?format=json`
+)
+
+func init() {
+ inputs.Add("nsq", func() inputs.Input {
+ return &NSQ{}
+ })
+}
+
+func (n *NSQ) SampleConfig() string {
+ return sampleConfig
+}
+
+func (n *NSQ) Description() string {
+ return "Read NSQ topic and channel statistics."
+}
+
+func (n *NSQ) Gather(acc inputs.Accumulator) error {
+ var wg sync.WaitGroup
+ var outerr error
+
+ for _, e := range n.Endpoints {
+ wg.Add(1)
+ go func(e string) {
+ defer wg.Done()
+ outerr = n.gatherEndpoint(e, acc)
+ }(e)
+ }
+
+ wg.Wait()
+
+ return outerr
+}
+
+var tr = &http.Transport{
+ ResponseHeaderTimeout: time.Duration(3 * time.Second),
+}
+
+var client = &http.Client{Transport: tr}
+
+func (n *NSQ) gatherEndpoint(e string, acc inputs.Accumulator) error {
+ u, err := buildURL(e)
+ if err != nil {
+ return err
+ }
+ r, err := client.Get(u.String())
+ if err != nil {
+ return fmt.Errorf("Error while polling %s: %s", u.String(), err)
+ }
+ defer r.Body.Close()
+
+ if r.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status)
+ }
+
+ s := &NSQStats{}
+ err = json.NewDecoder(r.Body).Decode(s)
+ if err != nil {
+ return fmt.Errorf(`Error parsing response: %s`, err)
+ }
+
+ tags := map[string]string{
+ `server_host`: u.Host,
+ `server_version`: s.Data.Version,
+ }
+
+ fields := make(map[string]interface{})
+ if s.Data.Health == `OK` {
+ fields["server_count"] = int64(1)
+ } else {
+ fields["server_count"] = int64(0)
+ }
+ fields["topic_count"] = int64(len(s.Data.Topics))
+
+ acc.AddFields("nsq_server", fields, tags)
+ for _, t := range s.Data.Topics {
+ topicStats(t, acc, u.Host, s.Data.Version)
+ }
+
+ return nil
+}
+
+func buildURL(e string) (*url.URL, error) {
+ u := fmt.Sprintf(requestPattern, e)
+ addr, err := url.Parse(u)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err)
+ }
+ return addr, nil
+}
+
+func topicStats(t TopicStats, acc inputs.Accumulator, host, version string) {
+ // per topic overall (tag: name, paused, channel count)
+ tags := map[string]string{
+ "server_host": host,
+ "server_version": version,
+ "topic": t.Name,
+ }
+
+ fields := map[string]interface{}{
+ "depth": t.Depth,
+ "backend_depth": t.BackendDepth,
+ "message_count": t.MessageCount,
+ "channel_count": int64(len(t.Channels)),
+ }
+ acc.AddFields("nsq_topic", fields, tags)
+
+ for _, c := range t.Channels {
+ channelStats(c, acc, host, version, t.Name)
+ }
+}
+
+func channelStats(c ChannelStats, acc inputs.Accumulator, host, version, topic string) {
+ tags := map[string]string{
+ "server_host": host,
+ "server_version": version,
+ "topic": topic,
+ "channel": c.Name,
+ }
+
+ fields := map[string]interface{}{
+ "depth": c.Depth,
+ "backend_depth": c.BackendDepth,
+ "inflight_count": c.InFlightCount,
+ "deferred_count": c.DeferredCount,
+ "message_count": c.MessageCount,
+ "requeue_count": c.RequeueCount,
+ "timeout_count": c.TimeoutCount,
+ "client_count": int64(len(c.Clients)),
+ }
+
+ acc.AddFields("nsq_channel", fields, tags)
+ for _, cl := range c.Clients {
+ clientStats(cl, acc, host, version, topic, c.Name)
+ }
+}
+
+func clientStats(c ClientStats, acc inputs.Accumulator, host, version, topic, channel string) {
+ tags := map[string]string{
+ "server_host": host,
+ "server_version": version,
+ "topic": topic,
+ "channel": channel,
+ "client_name": c.Name,
+ "client_id": c.ID,
+ "client_hostname": c.Hostname,
+ "client_version": c.Version,
+ "client_address": c.RemoteAddress,
+ "client_user_agent": c.UserAgent,
+ "client_tls": strconv.FormatBool(c.TLS),
+ "client_snappy": strconv.FormatBool(c.Snappy),
+ "client_deflate": strconv.FormatBool(c.Deflate),
+ }
+
+ fields := map[string]interface{}{
+ "ready_count": c.ReadyCount,
+ "inflight_count": c.InFlightCount,
+ "message_count": c.MessageCount,
+ "finish_count": c.FinishCount,
+ "requeue_count": c.RequeueCount,
+ }
+ acc.AddFields("nsq_client", fields, tags)
+}
+
+type NSQStats struct {
+ Code int64 `json:"status_code"`
+ Txt string `json:"status_txt"`
+ Data NSQStatsData `json:"data"`
+}
+
+type NSQStatsData struct {
+ Version string `json:"version"`
+ Health string `json:"health"`
+ StartTime int64 `json:"start_time"`
+ Topics []TopicStats `json:"topics"`
+}
+
+// e2e_processing_latency is not modeled
+type TopicStats struct {
+ Name string `json:"topic_name"`
+ Depth int64 `json:"depth"`
+ BackendDepth int64 `json:"backend_depth"`
+ MessageCount int64 `json:"message_count"`
+ Paused bool `json:"paused"`
+ Channels []ChannelStats `json:"channels"`
+}
+
+// e2e_processing_latency is not modeled
+type ChannelStats struct {
+ Name string `json:"channel_name"`
+ Depth int64 `json:"depth"`
+ BackendDepth int64 `json:"backend_depth"`
+ InFlightCount int64 `json:"in_flight_count"`
+ DeferredCount int64 `json:"deferred_count"`
+ MessageCount int64 `json:"message_count"`
+ RequeueCount int64 `json:"requeue_count"`
+ TimeoutCount int64 `json:"timeout_count"`
+ Paused bool `json:"paused"`
+ Clients []ClientStats `json:"clients"`
+}
+
+type ClientStats struct {
+ Name string `json:"name"`
+ ID string `json:"client_id"`
+ Hostname string `json:"hostname"`
+ Version string `json:"version"`
+ RemoteAddress string `json:"remote_address"`
+ State int64 `json:"state"`
+ ReadyCount int64 `json:"ready_count"`
+ InFlightCount int64 `json:"in_flight_count"`
+ MessageCount int64 `json:"message_count"`
+ FinishCount int64 `json:"finish_count"`
+ RequeueCount int64 `json:"requeue_count"`
+ ConnectTime int64 `json:"connect_ts"`
+ SampleRate int64 `json:"sample_rate"`
+ Deflate bool `json:"deflate"`
+ Snappy bool `json:"snappy"`
+ UserAgent string `json:"user_agent"`
+ TLS bool `json:"tls"`
+ TLSCipherSuite string `json:"tls_cipher_suite"`
+ TLSVersion string `json:"tls_version"`
+ TLSNegotiatedProtocol string `json:"tls_negotiated_protocol"`
+ TLSNegotiatedProtocolIsMutual bool `json:"tls_negotiated_protocol_is_mutual"`
+}
diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go
new file mode 100644
index 000000000..23fd19a42
--- /dev/null
+++ b/plugins/inputs/nsq/nsq_test.go
@@ -0,0 +1,273 @@
+package nsq
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestNSQStats(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, response)
+ }))
+ defer ts.Close()
+
+ n := &NSQ{
+ Endpoints: []string{ts.URL},
+ }
+
+ var acc testutil.Accumulator
+ err := n.Gather(&acc)
+ require.NoError(t, err)
+
+ u, err := url.Parse(ts.URL)
+ require.NoError(t, err)
+ host := u.Host
+
+ // actually validate the tests
+ tests := []struct {
+ m string
+ f map[string]interface{}
+ g map[string]string
+ }{
+ {
+ "nsq_server",
+ map[string]interface{}{
+ "server_count": int64(1),
+ "topic_count": int64(2),
+ },
+ map[string]string{
+ "server_host": host,
+ "server_version": "0.3.6",
+ },
+ },
+ {
+ "nsq_topic",
+ map[string]interface{}{
+ "depth": int64(12),
+ "backend_depth": int64(13),
+ "message_count": int64(14),
+ "channel_count": int64(1),
+ },
+ map[string]string{
+ "server_host": host,
+ "server_version": "0.3.6",
+ "topic": "t1"},
+ },
+ {
+ "nsq_channel",
+ map[string]interface{}{
+ "depth": int64(0),
+ "backend_depth": int64(1),
+ "inflight_count": int64(2),
+ "deferred_count": int64(3),
+ "message_count": int64(4),
+ "requeue_count": int64(5),
+ "timeout_count": int64(6),
+ "client_count": int64(1),
+ },
+ map[string]string{
+ "server_host": host,
+ "server_version": "0.3.6",
+ "topic": "t1",
+ "channel": "c1",
+ },
+ },
+ {
+ "nsq_client",
+ map[string]interface{}{
+ "ready_count": int64(200),
+ "inflight_count": int64(7),
+ "message_count": int64(8),
+ "finish_count": int64(9),
+ "requeue_count": int64(10),
+ },
+ map[string]string{"server_host": host, "server_version": "0.3.6",
+ "topic": "t1", "channel": "c1", "client_name": "373a715cd990",
+ "client_id": "373a715cd990", "client_hostname": "373a715cd990",
+ "client_version": "V2", "client_address": "172.17.0.11:35560",
+ "client_tls": "false", "client_snappy": "false",
+ "client_deflate": "false",
+ "client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"},
+ },
+ {
+ "nsq_topic",
+ map[string]interface{}{
+ "depth": int64(28),
+ "backend_depth": int64(29),
+ "message_count": int64(30),
+ "channel_count": int64(1),
+ },
+ map[string]string{
+ "server_host": host,
+ "server_version": "0.3.6",
+ "topic": "t2"},
+ },
+ {
+ "nsq_channel",
+ map[string]interface{}{
+ "depth": int64(15),
+ "backend_depth": int64(16),
+ "inflight_count": int64(17),
+ "deferred_count": int64(18),
+ "message_count": int64(19),
+ "requeue_count": int64(20),
+ "timeout_count": int64(21),
+ "client_count": int64(1),
+ },
+ map[string]string{
+ "server_host": host,
+ "server_version": "0.3.6",
+ "topic": "t2",
+ "channel": "c2",
+ },
+ },
+ {
+ "nsq_client",
+ map[string]interface{}{
+ "ready_count": int64(22),
+ "inflight_count": int64(23),
+ "message_count": int64(24),
+ "finish_count": int64(25),
+ "requeue_count": int64(26),
+ },
+ map[string]string{"server_host": host, "server_version": "0.3.6",
+ "topic": "t2", "channel": "c2", "client_name": "377569bd462b",
+ "client_id": "377569bd462b", "client_hostname": "377569bd462b",
+ "client_version": "V2", "client_address": "172.17.0.8:48145",
+ "client_user_agent": "go-nsq/1.0.5", "client_tls": "true",
+ "client_snappy": "true", "client_deflate": "true"},
+ },
+ }
+
+ for _, test := range tests {
+ acc.AssertContainsTaggedFields(t, test.m, test.f, test.g)
+ }
+}
+
+var response = `
+{
+ "status_code": 200,
+ "status_txt": "OK",
+ "data": {
+ "version": "0.3.6",
+ "health": "OK",
+ "start_time": 1452021674,
+ "topics": [
+ {
+ "topic_name": "t1",
+ "channels": [
+ {
+ "channel_name": "c1",
+ "depth": 0,
+ "backend_depth": 1,
+ "in_flight_count": 2,
+ "deferred_count": 3,
+ "message_count": 4,
+ "requeue_count": 5,
+ "timeout_count": 6,
+ "clients": [
+ {
+ "name": "373a715cd990",
+ "client_id": "373a715cd990",
+ "hostname": "373a715cd990",
+ "version": "V2",
+ "remote_address": "172.17.0.11:35560",
+ "state": 3,
+ "ready_count": 200,
+ "in_flight_count": 7,
+ "message_count": 8,
+ "finish_count": 9,
+ "requeue_count": 10,
+ "connect_ts": 1452021675,
+ "sample_rate": 11,
+ "deflate": false,
+ "snappy": false,
+ "user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5",
+ "tls": false,
+ "tls_cipher_suite": "",
+ "tls_version": "",
+ "tls_negotiated_protocol": "",
+ "tls_negotiated_protocol_is_mutual": false
+ }
+ ],
+ "paused": false,
+ "e2e_processing_latency": {
+ "count": 0,
+ "percentiles": null
+ }
+ }
+ ],
+ "depth": 12,
+ "backend_depth": 13,
+ "message_count": 14,
+ "paused": false,
+ "e2e_processing_latency": {
+ "count": 0,
+ "percentiles": null
+ }
+ },
+ {
+ "topic_name": "t2",
+ "channels": [
+ {
+ "channel_name": "c2",
+ "depth": 15,
+ "backend_depth": 16,
+ "in_flight_count": 17,
+ "deferred_count": 18,
+ "message_count": 19,
+ "requeue_count": 20,
+ "timeout_count": 21,
+ "clients": [
+ {
+ "name": "377569bd462b",
+ "client_id": "377569bd462b",
+ "hostname": "377569bd462b",
+ "version": "V2",
+ "remote_address": "172.17.0.8:48145",
+ "state": 3,
+ "ready_count": 22,
+ "in_flight_count": 23,
+ "message_count": 24,
+ "finish_count": 25,
+ "requeue_count": 26,
+ "connect_ts": 1452021678,
+ "sample_rate": 27,
+ "deflate": true,
+ "snappy": true,
+ "user_agent": "go-nsq\/1.0.5",
+ "tls": true,
+ "tls_cipher_suite": "",
+ "tls_version": "",
+ "tls_negotiated_protocol": "",
+ "tls_negotiated_protocol_is_mutual": false
+ }
+ ],
+ "paused": false,
+ "e2e_processing_latency": {
+ "count": 0,
+ "percentiles": null
+ }
+ }
+ ],
+ "depth": 28,
+ "backend_depth": 29,
+ "message_count": 30,
+ "paused": false,
+ "e2e_processing_latency": {
+ "count": 0,
+ "percentiles": null
+ }
+ }
+ ]
+ }
+}
+`
diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md
new file mode 100644
index 000000000..64e39729b
--- /dev/null
+++ b/plugins/inputs/passenger/README.md
@@ -0,0 +1,138 @@
+# Telegraf plugin: passenger
+
+Get phusion passenger stat using their command line utility
+`passenger-status`
+
+# Measurements
+
+Meta:
+
+- tags:
+
+ * name
+ * passenger_version
+ * pid
+ * code_revision
+
+Measurement names:
+
+- passenger:
+
+ * Tags: `passenger_version`
+ * Fields:
+
+ - process_count
+ - max
+ - capacity_used
+ - get_wait_list_size
+
+- passenger_supergroup:
+
+ * Tags: `name`
+ * Fields:
+
+ - get_wait_list_size
+ - capacity_used
+
+- passenger_group:
+
+ * Tags:
+
+ - name
+ - app_root
+ - app_type
+
+ * Fields:
+
+ - get_wait_list_size
+ - capacity_used
+ - processes_being_spawned
+
+- passenger_process:
+
+ * Tags:
+
+ - group_name
+ - app_root
+ - supergroup_name
+ - pid
+ - code_revision
+ - life_status
+ - process_group_id
+
+ * Field:
+
+ - concurrency
+ - sessions
+ - busyness
+ - processed
+ - spawner_creation_time
+ - spawn_start_time
+ - spawn_end_time
+ - last_used
+ - uptime
+ - cpu
+ - rss
+ - pss
+ - private_dirty
+ - swap
+ - real_memory
+ - vmsize
+
+# Example output
+
+Using this configuration:
+
+```
+[[inputs.passenger]]
+ # Path of passenger-status.
+ #
+ # Plugin gather metric via parsing XML output of passenger-status
+ # More information about the tool:
+ # https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
+ #
+ #
+ # If no path is specified, then the plugin simply execute passenger-status
+ # hopefully it can be found in your PATH
+ command = "passenger-status -v --show=xml"
+```
+
+When run with:
+
+```
+./telegraf -config telegraf.conf -test -input-filter passenger
+```
+
+It produces:
+
+```
+> passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257
+> passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977
+> passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021
+> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11553,process_group_id=13608,supergroup_name=/var/app/current/public busyness=0i,concurrency=1i,cpu=58i,last_used=1452747071764940i,private_dirty=314900i,processed=951i,pss=319391i,real_memory=314900i,rss=418548i,sessions=0i,spawn_end_time=1452746845013365i,spawn_start_time=1452746844946982i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563580i 1452984112799571490
+> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11563,process_group_id=13608,supergroup_name=/var/app/current/public busyness=2147483647i,concurrency=1i,cpu=47i,last_used=1452747071709179i,private_dirty=309240i,processed=756i,pss=314036i,real_memory=309240i,rss=418296i,sessions=1i,spawn_end_time=1452746845172460i,spawn_start_time=1452746845136882i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563608i 1452984112799638581
+```
+
+# Note
+
+You have to ensure that you can run the `passenger-status` command under
+telegraf user. Depend on how you install and configure passenger, this
+maybe an issue for you. If you are using passenger standlone, or compile
+yourself, it is straight forward. However, if you are using gem and
+`rvm`, it maybe harder to get this right.
+
+Such as with `rvm`, you can use this command:
+
+```
+~/.rvm/bin/rvm default do passenger-status -v --show=xml
+```
+
+You can use `&` and `;` in the shell command to run comlicated shell command
+in order to get the passenger-status such as load the rvm shell, source the
+path
+```
+command = "source .rvm/scripts/rvm && passenger-status -v --show=xml"
+```
+
+Anyway, just ensure that you can run the command under `telegraf` user, and it
+has to produce XML output.
diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go
new file mode 100644
index 000000000..c5b049b7c
--- /dev/null
+++ b/plugins/inputs/passenger/passenger.go
@@ -0,0 +1,250 @@
+package passenger
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "os/exec"
+ "strconv"
+ "strings"
+
+ "github.com/influxdata/telegraf/plugins/inputs"
+ "golang.org/x/net/html/charset"
+)
+
+type passenger struct {
+ Command string
+}
+
+func (p *passenger) parseCommand() (string, []string) {
+ var arguments []string
+ if !strings.Contains(p.Command, " ") {
+ return p.Command, arguments
+ }
+
+ arguments = strings.Split(p.Command, " ")
+ if len(arguments) == 1 {
+ return arguments[0], arguments[1:]
+ }
+
+ return arguments[0], arguments[1:]
+}
+
+type info struct {
+ Passenger_version string `xml:"passenger_version"`
+ Process_count int `xml:"process_count"`
+ Capacity_used int `xml:"capacity_used"`
+ Get_wait_list_size int `xml:"get_wait_list_size"`
+ Max int `xml:"max"`
+ Supergroups struct {
+ Supergroup []struct {
+ Name string `xml:"name"`
+ Get_wait_list_size int `xml:"get_wait_list_size"`
+ Capacity_used int `xml:"capacity_used"`
+ Group []struct {
+ Name string `xml:"name"`
+ AppRoot string `xml:"app_root"`
+ AppType string `xml:"app_type"`
+ Enabled_process_count int `xml:"enabled_process_count"`
+ Disabling_process_count int `xml:"disabling_process_count"`
+ Disabled_process_count int `xml:"disabled_process_count"`
+ Capacity_used int `xml:"capacity_used"`
+ Get_wait_list_size int `xml:"get_wait_list_size"`
+ Processes_being_spawned int `xml:"processes_being_spawned"`
+ Processes struct {
+ Process []*process `xml:"process"`
+ } `xml:"processes"`
+ } `xml:"group"`
+ } `xml:"supergroup"`
+ } `xml:"supergroups"`
+}
+
+type process struct {
+ Pid int `xml:"pid"`
+ Concurrency int `xml:"concurrency"`
+ Sessions int `xml:"sessions"`
+ Busyness int `xml:"busyness"`
+ Processed int `xml:"processed"`
+ Spawner_creation_time int64 `xml:"spawner_creation_time"`
+ Spawn_start_time int64 `xml:"spawn_start_time"`
+ Spawn_end_time int64 `xml:"spawn_end_time"`
+ Last_used int64 `xml:"last_used"`
+ Uptime string `xml:"uptime"`
+ Code_revision string `xml:"code_revision"`
+ Life_status string `xml:"life_status"`
+ Enabled string `xml:"enabled"`
+ Has_metrics bool `xml:"has_metrics"`
+ Cpu int64 `xml:"cpu"`
+ Rss int64 `xml:"rss"`
+ Pss int64 `xml:"pss"`
+ Private_dirty int64 `xml:"private_dirty"`
+ Swap int64 `xml:"swap"`
+ Real_memory int64 `xml:"real_memory"`
+ Vmsize int64 `xml:"vmsize"`
+ Process_group_id string `xml:"process_group_id"`
+}
+
+func (p *process) getUptime() int64 {
+ if p.Uptime == "" {
+ return 0
+ }
+
+ timeSlice := strings.Split(p.Uptime, " ")
+ var uptime int64
+ uptime = 0
+ for _, v := range timeSlice {
+ switch {
+ case strings.HasSuffix(v, "d"):
+ iValue := strings.TrimSuffix(v, "d")
+ value, err := strconv.ParseInt(iValue, 10, 64)
+ if err == nil {
+ uptime += value * (24 * 60 * 60)
+ }
+ case strings.HasSuffix(v, "h"):
+ iValue := strings.TrimSuffix(v, "y")
+ value, err := strconv.ParseInt(iValue, 10, 64)
+ if err == nil {
+ uptime += value * (60 * 60)
+ }
+ case strings.HasSuffix(v, "m"):
+ iValue := strings.TrimSuffix(v, "m")
+ value, err := strconv.ParseInt(iValue, 10, 64)
+ if err == nil {
+ uptime += value * 60
+ }
+ case strings.HasSuffix(v, "s"):
+ iValue := strings.TrimSuffix(v, "s")
+ value, err := strconv.ParseInt(iValue, 10, 64)
+ if err == nil {
+ uptime += value
+ }
+ }
+ }
+
+ return uptime
+}
+
+var sampleConfig = `
+ # Path of passenger-status.
+ #
+ # Plugin gather metric via parsing XML output of passenger-status
+ # More information about the tool:
+ # https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
+ #
+ #
+ # If no path is specified, then the plugin simply execute passenger-status
+ # hopefully it can be found in your PATH
+ command = "passenger-status -v --show=xml"
+`
+
+func (r *passenger) SampleConfig() string {
+ return sampleConfig
+}
+
+func (r *passenger) Description() string {
+ return "Read metrics of passenger using passenger-status"
+}
+
+func (g *passenger) Gather(acc inputs.Accumulator) error {
+ if g.Command == "" {
+ g.Command = "passenger-status -v --show=xml"
+ }
+
+ cmd, args := g.parseCommand()
+ out, err := exec.Command(cmd, args...).Output()
+
+ if err != nil {
+ return err
+ }
+
+ if err = importMetric(out, acc); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func importMetric(stat []byte, acc inputs.Accumulator) error {
+ var p info
+
+ decoder := xml.NewDecoder(bytes.NewReader(stat))
+ decoder.CharsetReader = charset.NewReaderLabel
+ if err := decoder.Decode(&p); err != nil {
+ return fmt.Errorf("Cannot parse input with error: %v\n", err)
+ }
+
+ tags := map[string]string{
+ "passenger_version": p.Passenger_version,
+ }
+ fields := map[string]interface{}{
+ "process_count": p.Process_count,
+ "max": p.Max,
+ "capacity_used": p.Capacity_used,
+ "get_wait_list_size": p.Get_wait_list_size,
+ }
+ acc.AddFields("passenger", fields, tags)
+
+ for _, sg := range p.Supergroups.Supergroup {
+ tags := map[string]string{
+ "name": sg.Name,
+ }
+ fields := map[string]interface{}{
+ "get_wait_list_size": sg.Get_wait_list_size,
+ "capacity_used": sg.Capacity_used,
+ }
+ acc.AddFields("passenger_supergroup", fields, tags)
+
+ for _, group := range sg.Group {
+ tags := map[string]string{
+ "name": group.Name,
+ "app_root": group.AppRoot,
+ "app_type": group.AppType,
+ }
+ fields := map[string]interface{}{
+ "get_wait_list_size": group.Get_wait_list_size,
+ "capacity_used": group.Capacity_used,
+ "processes_being_spawned": group.Processes_being_spawned,
+ }
+ acc.AddFields("passenger_group", fields, tags)
+
+ for _, process := range group.Processes.Process {
+ tags := map[string]string{
+ "group_name": group.Name,
+ "app_root": group.AppRoot,
+ "supergroup_name": sg.Name,
+ "pid": fmt.Sprintf("%d", process.Pid),
+ "code_revision": process.Code_revision,
+ "life_status": process.Life_status,
+ "process_group_id": process.Process_group_id,
+ }
+ fields := map[string]interface{}{
+ "concurrency": process.Concurrency,
+ "sessions": process.Sessions,
+ "busyness": process.Busyness,
+ "processed": process.Processed,
+ "spawner_creation_time": process.Spawner_creation_time,
+ "spawn_start_time": process.Spawn_start_time,
+ "spawn_end_time": process.Spawn_end_time,
+ "last_used": process.Last_used,
+ "uptime": process.getUptime(),
+ "cpu": process.Cpu,
+ "rss": process.Rss,
+ "pss": process.Pss,
+ "private_dirty": process.Private_dirty,
+ "swap": process.Swap,
+ "real_memory": process.Real_memory,
+ "vmsize": process.Vmsize,
+ }
+ acc.AddFields("passenger_process", fields, tags)
+ }
+ }
+ }
+
+ return nil
+}
+
+func init() {
+ inputs.Add("passenger", func() inputs.Input {
+ return &passenger{}
+ })
+}
diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go
new file mode 100644
index 000000000..6124a968e
--- /dev/null
+++ b/plugins/inputs/passenger/passenger_test.go
@@ -0,0 +1,301 @@
+package passenger
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func fakePassengerStatus(stat string) {
+ content := fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat)
+ ioutil.WriteFile("/tmp/passenger-status", []byte(content), 0700)
+}
+
+func teardown() {
+ os.Remove("/tmp/passenger-status")
+}
+
+func Test_Invalid_Passenger_Status_Cli(t *testing.T) {
+ r := &passenger{
+ Command: "an-invalid-command passenger-status",
+ }
+
+ var acc testutil.Accumulator
+
+ err := r.Gather(&acc)
+ require.Error(t, err)
+ assert.Equal(t, err.Error(), `exec: "an-invalid-command": executable file not found in $PATH`)
+}
+
+func Test_Invalid_Xml(t *testing.T) {
+ fakePassengerStatus("invalid xml")
+ defer teardown()
+
+ r := &passenger{
+ Command: "/tmp/passenger-status",
+ }
+
+ var acc testutil.Accumulator
+
+ err := r.Gather(&acc)
+ require.Error(t, err)
+ assert.Equal(t, err.Error(), "Cannot parse input with error: EOF\n")
+}
+
+// We test this by ensure that the error message match the path of default cli
+func Test_Default_Config_Load_Default_Command(t *testing.T) {
+ fakePassengerStatus("invalid xml")
+ defer teardown()
+
+ r := &passenger{}
+
+ var acc testutil.Accumulator
+
+ err := r.Gather(&acc)
+ require.Error(t, err)
+ assert.Equal(t, err.Error(), "exec: \"passenger-status\": executable file not found in $PATH")
+}
+
+func TestPassengerGenerateMetric(t *testing.T) {
+ fakePassengerStatus(sampleStat)
+ defer teardown()
+
+ //Now we tested again above server, with our authentication data
+ r := &passenger{
+ Command: "/tmp/passenger-status",
+ }
+
+ var acc testutil.Accumulator
+
+ err := r.Gather(&acc)
+ require.NoError(t, err)
+
+ tags := map[string]string{
+ "passenger_version": "5.0.17",
+ }
+ fields := map[string]interface{}{
+ "process_count": 23,
+ "max": 23,
+ "capacity_used": 23,
+ "get_wait_list_size": 3,
+ }
+ acc.AssertContainsTaggedFields(t, "passenger", fields, tags)
+
+ tags = map[string]string{
+ "name": "/var/app/current/public",
+ "app_root": "/var/app/current",
+ "app_type": "rack",
+ }
+ fields = map[string]interface{}{
+ "processes_being_spawned": 2,
+ "capacity_used": 23,
+ "get_wait_list_size": 3,
+ }
+ acc.AssertContainsTaggedFields(t, "passenger_group", fields, tags)
+
+ tags = map[string]string{
+ "name": "/var/app/current/public",
+ }
+
+ fields = map[string]interface{}{
+ "capacity_used": 23,
+ "get_wait_list_size": 3,
+ }
+ acc.AssertContainsTaggedFields(t, "passenger_supergroup", fields, tags)
+
+ tags = map[string]string{
+ "app_root": "/var/app/current",
+ "group_name": "/var/app/current/public",
+ "supergroup_name": "/var/app/current/public",
+ "pid": "11553",
+ "code_revision": "899ac7f",
+ "life_status": "ALIVE",
+ "process_group_id": "13608",
+ }
+ fields = map[string]interface{}{
+ "concurrency": 1,
+ "sessions": 0,
+ "busyness": 0,
+ "processed": 951,
+ "spawner_creation_time": int64(1452746835922747),
+ "spawn_start_time": int64(1452746844946982),
+ "spawn_end_time": int64(1452746845013365),
+ "last_used": int64(1452747071764940),
+ "uptime": int64(226), // in seconds of 3m 46s
+ "cpu": int64(58),
+ "rss": int64(418548),
+ "pss": int64(319391),
+ "private_dirty": int64(314900),
+ "swap": int64(0),
+ "real_memory": int64(314900),
+ "vmsize": int64(1563580),
+ }
+ acc.AssertContainsTaggedFields(t, "passenger_process", fields, tags)
+}
+
+var sampleStat = `
+
+
+
+ 5.0.17
+ 1
+ 23
+ 23
+ 23
+ 3
+
+
+
+ /var/app/current/public
+ READY
+ 3
+ 23
+ foo
+
+ /var/app/current/public
+ /var/app/current/public
+ /var/app/current
+ rack
+ production
+ QQUrbCVYxbJYpfgyDOwJ
+ 23
+ 0
+ 0
+ 23
+ 3
+ 0
+ 2
+ foo
+ foo
+ ALIVE
+ axcoto
+ 1001
+ axcoto
+ 1001
+
+ /var/app/current
+ /var/app/current/public
+ rack
+ /var/app/.rvm/gems/ruby-2.2.0-p645/gems/passenger-5.0.17/helper-scripts/rack-loader.rb
+ config.ru
+ Passenger RubyApp
+ 3
+ 90000
+ production
+ /
+ smart
+ nobody
+ nogroup
+ /var/app/.rvm/gems/ruby-2.2.0-p645/wrappers/ruby
+ python
+ node
+ unix:/tmp/passenger.eKFdvdC/agents.s/ust_router
+ logging
+ foo
+ false
+ false
+ foo
+ 22
+ 0
+ 300
+ 1
+
+
+
+ 11553
+ 378579907
+ 17173df-PoNT3J9HCf
+ 1
+ 0
+ 0
+ 951
+ 1452746835922747
+ 1452746844946982
+ 1452746845013365
+ 1452747071764940
+ 0s ago
+ 3m 46s
+ 899ac7f
+ ALIVE
+ ENABLED
+ true
+ 58
+ 418548
+ 319391
+ 314900
+ 0
+ 314900
+ 1563580
+ 13608
+ Passenger RubyApp: /var/app/current/public
+
+
+ main
+ unix:/tmp/passenger.eKFdvdC/apps.s/ruby.UWF6zkRJ71aoMXPxpknpWVfC1POFqgWZzbEsdz5v0G46cSSMxJ3GHLFhJaUrK2I
+ session
+ 1
+ 0
+
+
+ http
+ tcp://127.0.0.1:49888
+ http
+ 1
+ 0
+
+
+
+
+ 11563
+ 1549681201
+ 17173df-pX5iJOipd8
+ 1
+ 1
+ 2147483647
+ 756
+ 1452746835922747
+ 1452746845136882
+ 1452746845172460
+ 1452747071709179
+ 0s ago
+ 3m 46s
+ 899ac7f
+ ALIVE
+ ENABLED
+ true
+ 47
+ 418296
+ 314036
+ 309240
+ 0
+ 309240
+ 1563608
+ 13608
+ Passenger RubyApp: /var/app/current/public
+
+
+ main
+ unix:/tmp/passenger.eKFdvdC/apps.s/ruby.PVCh7TmvCi9knqhba2vG5qXrlHGEIwhGrxnUvRbIAD6SPz9m0G7YlJ8HEsREHY3
+ session
+ 1
+ 1
+
+
+ http
+ tcp://127.0.0.1:52783
+ http
+ 1
+ 0
+
+
+
+
+
+
+
+`
diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md
index c2a42523a..b853b7fd7 100644
--- a/plugins/inputs/phpfpm/README.md
+++ b/plugins/inputs/phpfpm/README.md
@@ -6,10 +6,14 @@ Get phpfpm stat using either HTTP status page or fpm socket.
Meta:
-- tags: `url= pool=poolname`
+- tags: `pool=poolname`
Measurement names:
+- phpfpm
+
+Measurement field:
+
- accepted_conn
- listen_queue
- max_listen_queue
@@ -50,36 +54,12 @@ It produces:
```
* Plugin: phpfpm, Collection 1
-> [url="10.0.0.12" pool="www"] phpfpm_idle_processes value=1
-> [url="10.0.0.12" pool="www"] phpfpm_total_processes value=2
-> [url="10.0.0.12" pool="www"] phpfpm_max_children_reached value=0
-> [url="10.0.0.12" pool="www"] phpfpm_max_listen_queue value=0
-> [url="10.0.0.12" pool="www"] phpfpm_listen_queue value=0
-> [url="10.0.0.12" pool="www"] phpfpm_listen_queue_len value=0
-> [url="10.0.0.12" pool="www"] phpfpm_active_processes value=1
-> [url="10.0.0.12" pool="www"] phpfpm_max_active_processes value=2
-> [url="10.0.0.12" pool="www"] phpfpm_slow_requests value=0
-> [url="10.0.0.12" pool="www"] phpfpm_accepted_conn value=305
-
-> [url="localhost" pool="www2"] phpfpm_max_children_reached value=0
-> [url="localhost" pool="www2"] phpfpm_slow_requests value=0
-> [url="localhost" pool="www2"] phpfpm_max_listen_queue value=0
-> [url="localhost" pool="www2"] phpfpm_active_processes value=1
-> [url="localhost" pool="www2"] phpfpm_listen_queue_len value=0
-> [url="localhost" pool="www2"] phpfpm_idle_processes value=1
-> [url="localhost" pool="www2"] phpfpm_total_processes value=2
-> [url="localhost" pool="www2"] phpfpm_max_active_processes value=2
-> [url="localhost" pool="www2"] phpfpm_accepted_conn value=306
-> [url="localhost" pool="www2"] phpfpm_listen_queue value=0
-
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_children_reached value=0
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_slow_requests value=1
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_listen_queue value=0
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_active_processes value=1
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue_len value=0
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_idle_processes value=2
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_total_processes value=2
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_active_processes value=2
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_accepted_conn value=307
-> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue value=0
+> phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187
+> phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422
+> phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658
```
+
+## Note
+
+When using `unixsocket`, you have to ensure that telegraf runs on same
+host, and socket path is accessible to telegraf user.
diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go
index ceffc673e..0166f7bea 100644
--- a/plugins/inputs/phpfpm/phpfpm.go
+++ b/plugins/inputs/phpfpm/phpfpm.go
@@ -7,11 +7,12 @@ import (
"io"
"net/http"
"net/url"
+ "os"
"strconv"
"strings"
"sync"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
const (
@@ -40,20 +41,25 @@ type phpfpm struct {
var sampleConfig = `
# An array of addresses to gather stats about. Specify an ip or hostname
- # with optional port and path.
+ # with optional port and path
#
- # Plugin can be configured in three modes (both can be used):
- # - http: the URL must start with http:// or https://, ex:
+ # Plugin can be configured in three modes (either can be used):
+ # - http: the URL must start with http:// or https://, ie:
# "http://localhost/status"
# "http://192.168.130.1/status?full"
- # - unixsocket: path to fpm socket, ex:
+ #
+ # - unixsocket: path to fpm socket, ie:
# "/var/run/php5-fpm.sock"
- # "192.168.10.10:/var/run/php5-fpm-www2.sock"
- # - fcgi: the URL mush start with fcgi:// or cgi://, and port must present, ex:
+ # or using a custom fpm status path:
+ # "/var/run/php5-fpm.sock:fpm-custom-status-path"
+ #
+ # - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
# "fcgi://10.0.0.12:9000/status"
# "cgi://10.0.10.12:9001/status"
#
- # If no servers are specified, then default to 127.0.0.1/server-status
+ # Example of multiple gathering from local socket and remove host
+ # urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
+ # If no servers are specified, then default to http://127.0.0.1/status
urls = ["http://localhost/status"]
`
@@ -62,7 +68,7 @@ func (r *phpfpm) SampleConfig() string {
}
func (r *phpfpm) Description() string {
- return "Read metrics of phpfpm, via HTTP status page or socket(pending)"
+ return "Read metrics of phpfpm, via HTTP status page or socket"
}
// Reads stats from all configured servers accumulates stats.
@@ -89,71 +95,96 @@ func (g *phpfpm) Gather(acc inputs.Accumulator) error {
return outerr
}
-// Request status page to get stat raw data
+// Request status page to get stat raw data and import it
func (g *phpfpm) gatherServer(addr string, acc inputs.Accumulator) error {
if g.client == nil {
-
client := &http.Client{}
g.client = client
}
if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") {
+ return g.gatherHttp(addr, acc)
+ }
+
+ var (
+ fcgi *conn
+ socketPath string
+ statusPath string
+ )
+
+ if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
u, err := url.Parse(addr)
if err != nil {
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
}
-
- req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme,
- u.Host, u.Path), nil)
- res, err := g.client.Do(req)
- if err != nil {
- return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v",
- addr, err)
- }
-
- if res.StatusCode != 200 {
- return fmt.Errorf("Unable to get valid stat result from '%s': %v",
- addr, err)
- }
-
- importMetric(res.Body, acc, u.Host)
+ socketAddr := strings.Split(u.Host, ":")
+ fcgiIp := socketAddr[0]
+ fcgiPort, _ := strconv.Atoi(socketAddr[1])
+ fcgi, _ = NewClient(fcgiIp, fcgiPort)
} else {
- var (
- fcgi *FCGIClient
- fcgiAddr string
- )
- if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
- u, err := url.Parse(addr)
- if err != nil {
- return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
- }
- socketAddr := strings.Split(u.Host, ":")
- fcgiIp := socketAddr[0]
- fcgiPort, _ := strconv.Atoi(socketAddr[1])
- fcgiAddr = u.Host
- fcgi, _ = NewClient(fcgiIp, fcgiPort)
+ socketAddr := strings.Split(addr, ":")
+ if len(socketAddr) >= 2 {
+ socketPath = socketAddr[0]
+ statusPath = socketAddr[1]
} else {
- socketAddr := strings.Split(addr, ":")
- fcgiAddr = socketAddr[0]
- fcgi, _ = NewClient("unix", socketAddr[1])
- }
- resOut, resErr, err := fcgi.Request(map[string]string{
- "SCRIPT_NAME": "/status",
- "SCRIPT_FILENAME": "status",
- "REQUEST_METHOD": "GET",
- }, "")
-
- if len(resErr) == 0 && err == nil {
- importMetric(bytes.NewReader(resOut), acc, fcgiAddr)
+ socketPath = socketAddr[0]
+ statusPath = "status"
}
+ if _, err := os.Stat(socketPath); os.IsNotExist(err) {
+ return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err)
+ }
+ fcgi, _ = NewClient("unix", socketPath)
+ }
+ return g.gatherFcgi(fcgi, statusPath, acc)
+}
+
+// Gather stat using fcgi protocol
+func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc inputs.Accumulator) error {
+ fpmOutput, fpmErr, err := fcgi.Request(map[string]string{
+ "SCRIPT_NAME": "/" + statusPath,
+ "SCRIPT_FILENAME": statusPath,
+ "REQUEST_METHOD": "GET",
+ "CONTENT_LENGTH": "0",
+ "SERVER_PROTOCOL": "HTTP/1.0",
+ "SERVER_SOFTWARE": "go / fcgiclient ",
+ "REMOTE_ADDR": "127.0.0.1",
+ }, "/"+statusPath)
+
+ if len(fpmErr) == 0 && err == nil {
+ importMetric(bytes.NewReader(fpmOutput), acc)
+ return nil
+ } else {
+ return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err)
+ }
+}
+
+// Gather stat using http protocol
+func (g *phpfpm) gatherHttp(addr string, acc inputs.Accumulator) error {
+ u, err := url.Parse(addr)
+ if err != nil {
+ return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
}
+ req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme,
+ u.Host, u.Path), nil)
+ res, err := g.client.Do(req)
+ if err != nil {
+ return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v",
+ addr, err)
+ }
+
+ if res.StatusCode != 200 {
+ return fmt.Errorf("Unable to get valid stat result from '%s': %v",
+ addr, err)
+ }
+
+ importMetric(res.Body, acc)
return nil
}
-// Import HTTP stat data into Telegraf system
-func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, error) {
+// Import stat data into Telegraf system
+func importMetric(r io.Reader, acc inputs.Accumulator) (poolStat, error) {
stats := make(poolStat)
var currentPool string
@@ -195,7 +226,6 @@ func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, e
// Finally, we push the pool metric
for pool := range stats {
tags := map[string]string{
- "url": host,
"pool": pool,
}
fields := make(map[string]interface{})
diff --git a/plugins/inputs/phpfpm/phpfpm_fcgi.go b/plugins/inputs/phpfpm/phpfpm_fcgi.go
index 65f4c789b..03aac7634 100644
--- a/plugins/inputs/phpfpm/phpfpm_fcgi.go
+++ b/plugins/inputs/phpfpm/phpfpm_fcgi.go
@@ -1,13 +1,14 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fcgi implements the FastCGI protocol.
+// Currently only the responder role is supported.
+// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
package phpfpm
-// FastCGI client to request via socket
-
-// Copyright 2012 Junqing Tan and The Go Authors
-// Use of this source code is governed by a BSD-style
-// Part of source code is from Go fcgi package
-
-// Fix bug: Can't recive more than 1 record untill FCGI_END_REQUEST 2012-09-15
-// By: wofeiwo
+// This file defines the raw protocol and some utilities used by the child and
+// the host.
import (
"bufio"
@@ -15,70 +16,84 @@ import (
"encoding/binary"
"errors"
"io"
+ "sync"
+
"net"
"strconv"
- "sync"
+
+ "strings"
)
-const FCGI_LISTENSOCK_FILENO uint8 = 0
-const FCGI_HEADER_LEN uint8 = 8
-const VERSION_1 uint8 = 1
-const FCGI_NULL_REQUEST_ID uint8 = 0
-const FCGI_KEEP_CONN uint8 = 1
+// recType is a record type, as defined by
+// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8
+type recType uint8
const (
- FCGI_BEGIN_REQUEST uint8 = iota + 1
- FCGI_ABORT_REQUEST
- FCGI_END_REQUEST
- FCGI_PARAMS
- FCGI_STDIN
- FCGI_STDOUT
- FCGI_STDERR
- FCGI_DATA
- FCGI_GET_VALUES
- FCGI_GET_VALUES_RESULT
- FCGI_UNKNOWN_TYPE
- FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
+ typeBeginRequest recType = 1
+ typeAbortRequest recType = 2
+ typeEndRequest recType = 3
+ typeParams recType = 4
+ typeStdin recType = 5
+ typeStdout recType = 6
+ typeStderr recType = 7
+ typeData recType = 8
+ typeGetValues recType = 9
+ typeGetValuesResult recType = 10
+ typeUnknownType recType = 11
)
-const (
- FCGI_RESPONDER uint8 = iota + 1
- FCGI_AUTHORIZER
- FCGI_FILTER
-)
+// keep the connection between web-server and responder open after request
+const flagKeepConn = 1
const (
- FCGI_REQUEST_COMPLETE uint8 = iota
- FCGI_CANT_MPX_CONN
- FCGI_OVERLOADED
- FCGI_UNKNOWN_ROLE
-)
-
-const (
- FCGI_MAX_CONNS string = "MAX_CONNS"
- FCGI_MAX_REQS string = "MAX_REQS"
- FCGI_MPXS_CONNS string = "MPXS_CONNS"
-)
-
-const (
- maxWrite = 6553500 // maximum record body
+ maxWrite = 65535 // maximum record body
maxPad = 255
)
+const (
+ roleResponder = iota + 1 // only Responders are implemented.
+ roleAuthorizer
+ roleFilter
+)
+
+const (
+ statusRequestComplete = iota
+ statusCantMultiplex
+ statusOverloaded
+ statusUnknownRole
+)
+
+const headerLen = 8
+
type header struct {
Version uint8
- Type uint8
+ Type recType
Id uint16
ContentLength uint16
PaddingLength uint8
Reserved uint8
}
+type beginRequest struct {
+ role uint16
+ flags uint8
+ reserved [5]uint8
+}
+
+func (br *beginRequest) read(content []byte) error {
+ if len(content) != 8 {
+ return errors.New("fcgi: invalid begin request record")
+ }
+ br.role = binary.BigEndian.Uint16(content)
+ br.flags = content[2]
+ return nil
+}
+
// for padding so we don't have to allocate all the time
// not synchronized because we don't care what the contents are
var pad [maxPad]byte
-func (h *header) init(recType uint8, reqId uint16, contentLength int) {
+func (h *header) init(recType recType, reqId uint16, contentLength int) {
h.Version = 1
h.Type = recType
h.Id = reqId
@@ -86,6 +101,26 @@ func (h *header) init(recType uint8, reqId uint16, contentLength int) {
h.PaddingLength = uint8(-contentLength & 7)
}
+// conn sends records over rwc
+type conn struct {
+ mutex sync.Mutex
+ rwc io.ReadWriteCloser
+
+ // to avoid allocations
+ buf bytes.Buffer
+ h header
+}
+
+func newConn(rwc io.ReadWriteCloser) *conn {
+ return &conn{rwc: rwc}
+}
+
+func (c *conn) Close() error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ return c.rwc.Close()
+}
+
type record struct {
h header
buf [maxWrite + maxPad]byte
@@ -109,69 +144,39 @@ func (r *record) content() []byte {
return r.buf[:r.h.ContentLength]
}
-type FCGIClient struct {
- mutex sync.Mutex
- rwc io.ReadWriteCloser
- h header
- buf bytes.Buffer
- keepAlive bool
-}
-
-func NewClient(h string, args ...interface{}) (fcgi *FCGIClient, err error) {
- var conn net.Conn
- if len(args) != 1 {
- err = errors.New("fcgi: not enough params")
- return
- }
- switch args[0].(type) {
- case int:
- addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
- conn, err = net.Dial("tcp", addr)
- case string:
- laddr := net.UnixAddr{Name: args[0].(string), Net: h}
- conn, err = net.DialUnix(h, nil, &laddr)
- default:
- err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
- }
- fcgi = &FCGIClient{
- rwc: conn,
- keepAlive: false,
- }
- return
-}
-
-func (client *FCGIClient) writeRecord(recType uint8, reqId uint16, content []byte) (err error) {
- client.mutex.Lock()
- defer client.mutex.Unlock()
- client.buf.Reset()
- client.h.init(recType, reqId, len(content))
- if err := binary.Write(&client.buf, binary.BigEndian, client.h); err != nil {
+// writeRecord writes and sends a single record.
+func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ c.buf.Reset()
+ c.h.init(recType, reqId, len(b))
+ if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
return err
}
- if _, err := client.buf.Write(content); err != nil {
+ if _, err := c.buf.Write(b); err != nil {
return err
}
- if _, err := client.buf.Write(pad[:client.h.PaddingLength]); err != nil {
+ if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
return err
}
- _, err = client.rwc.Write(client.buf.Bytes())
+ _, err := c.rwc.Write(c.buf.Bytes())
return err
}
-func (client *FCGIClient) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
+func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
b := [8]byte{byte(role >> 8), byte(role), flags}
- return client.writeRecord(FCGI_BEGIN_REQUEST, reqId, b[:])
+ return c.writeRecord(typeBeginRequest, reqId, b[:])
}
-func (client *FCGIClient) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
+func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
b := make([]byte, 8)
binary.BigEndian.PutUint32(b, uint32(appStatus))
b[4] = protocolStatus
- return client.writeRecord(FCGI_END_REQUEST, reqId, b)
+ return c.writeRecord(typeEndRequest, reqId, b)
}
-func (client *FCGIClient) writePairs(recType uint8, reqId uint16, pairs map[string]string) error {
- w := newWriter(client, recType, reqId)
+func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error {
+ w := newWriter(c, recType, reqId)
b := make([]byte, 8)
for k, v := range pairs {
n := encodeSize(b, uint32(len(k)))
@@ -238,7 +243,7 @@ func (w *bufWriter) Close() error {
return w.closer.Close()
}
-func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter {
+func newWriter(c *conn, recType recType, reqId uint16) *bufWriter {
s := &streamWriter{c: c, recType: recType, reqId: reqId}
w := bufio.NewWriterSize(s, maxWrite)
return &bufWriter{s, w}
@@ -247,8 +252,8 @@ func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter {
// streamWriter abstracts out the separation of a stream into discrete records.
// It only writes maxWrite bytes at a time.
type streamWriter struct {
- c *FCGIClient
- recType uint8
+ c *conn
+ recType recType
reqId uint16
}
@@ -273,22 +278,44 @@ func (w *streamWriter) Close() error {
return w.c.writeRecord(w.recType, w.reqId, nil)
}
-func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout []byte, reterr []byte, err error) {
+func NewClient(h string, args ...interface{}) (fcgi *conn, err error) {
+ var con net.Conn
+ if len(args) != 1 {
+ err = errors.New("fcgi: not enough params")
+ return
+ }
+ switch args[0].(type) {
+ case int:
+ addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
+ con, err = net.Dial("tcp", addr)
+ case string:
+ laddr := net.UnixAddr{Name: args[0].(string), Net: h}
+ con, err = net.DialUnix(h, nil, &laddr)
+ default:
+ err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
+ }
+ fcgi = &conn{
+ rwc: con,
+ }
+ return
+}
- var reqId uint16 = 1
+func (client *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) {
defer client.rwc.Close()
+ var reqId uint16 = 1
- err = client.writeBeginRequest(reqId, uint16(FCGI_RESPONDER), 0)
+ err = client.writeBeginRequest(reqId, uint16(roleResponder), 0)
if err != nil {
return
}
- err = client.writePairs(FCGI_PARAMS, reqId, env)
+
+ err = client.writePairs(typeParams, reqId, env)
if err != nil {
return
}
- if len(reqStr) > 0 {
- err = client.writeRecord(FCGI_STDIN, reqId, []byte(reqStr))
- if err != nil {
+
+ if len(requestData) > 0 {
+ if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
return
}
}
@@ -297,23 +324,25 @@ func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout
var err1 error
// recive untill EOF or FCGI_END_REQUEST
+READ_LOOP:
for {
err1 = rec.read(client.rwc)
- if err1 != nil {
+ if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
if err1 != io.EOF {
err = err1
}
break
}
+
switch {
- case rec.h.Type == FCGI_STDOUT:
+ case rec.h.Type == typeStdout:
retout = append(retout, rec.content()...)
- case rec.h.Type == FCGI_STDERR:
+ case rec.h.Type == typeStderr:
reterr = append(reterr, rec.content()...)
- case rec.h.Type == FCGI_END_REQUEST:
+ case rec.h.Type == typeEndRequest:
fallthrough
default:
- break
+ break READ_LOOP
}
}
diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go
index 2f34372bf..c965e5a13 100644
--- a/plugins/inputs/phpfpm/phpfpm_test.go
+++ b/plugins/inputs/phpfpm/phpfpm_test.go
@@ -1,24 +1,34 @@
package phpfpm
import (
+ "crypto/rand"
+ "encoding/binary"
"fmt"
+ "net"
+ "net/http"
+ "net/http/fcgi"
+ "net/http/httptest"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "net/http"
- "net/http/httptest"
)
-func TestPhpFpmGeneratesMetrics(t *testing.T) {
- //We create a fake server to return test data
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprint(w, outputSample)
- }))
+type statServer struct{}
+
+// We create a fake server to return test data
+func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ w.Header().Set("Content-Length", fmt.Sprint(len(outputSample)))
+ fmt.Fprint(w, outputSample)
+}
+
+func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) {
+ sv := statServer{}
+ ts := httptest.NewServer(sv)
defer ts.Close()
- //Now we tested again above server, with our authentication data
r := &phpfpm{
Urls: []string{ts.URL},
}
@@ -29,7 +39,134 @@ func TestPhpFpmGeneratesMetrics(t *testing.T) {
require.NoError(t, err)
tags := map[string]string{
- "url": ts.Listener.Addr().String(),
+ "pool": "www",
+ }
+
+ fields := map[string]interface{}{
+ "accepted_conn": int64(3),
+ "listen_queue": int64(1),
+ "max_listen_queue": int64(0),
+ "listen_queue_len": int64(0),
+ "idle_processes": int64(1),
+ "active_processes": int64(1),
+ "total_processes": int64(2),
+ "max_active_processes": int64(1),
+ "max_children_reached": int64(2),
+ "slow_requests": int64(1),
+ }
+
+ acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
+}
+
+func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
+ // Let OS find an available port
+ tcp, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal("Cannot initalize test server")
+ }
+ defer tcp.Close()
+
+ s := statServer{}
+ go fcgi.Serve(tcp, s)
+
+ //Now we tested again above server
+ r := &phpfpm{
+ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"},
+ }
+
+ var acc testutil.Accumulator
+ err = r.Gather(&acc)
+ require.NoError(t, err)
+
+ tags := map[string]string{
+ "pool": "www",
+ }
+
+ fields := map[string]interface{}{
+ "accepted_conn": int64(3),
+ "listen_queue": int64(1),
+ "max_listen_queue": int64(0),
+ "listen_queue_len": int64(0),
+ "idle_processes": int64(1),
+ "active_processes": int64(1),
+ "total_processes": int64(2),
+ "max_active_processes": int64(1),
+ "max_children_reached": int64(2),
+ "slow_requests": int64(1),
+ }
+
+ acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
+}
+
+func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) {
+ // Create a socket in /tmp because we always have write permission and if the
+ // removing of socket fail when system restart /tmp is clear so
+ // we don't have junk files around
+ var randomNumber int64
+ binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
+ tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber))
+ if err != nil {
+ t.Fatal("Cannot initalize server on port ")
+ }
+
+ defer tcp.Close()
+ s := statServer{}
+ go fcgi.Serve(tcp, s)
+
+ r := &phpfpm{
+ Urls: []string{tcp.Addr().String()},
+ }
+
+ var acc testutil.Accumulator
+
+ err = r.Gather(&acc)
+ require.NoError(t, err)
+
+ tags := map[string]string{
+ "pool": "www",
+ }
+
+ fields := map[string]interface{}{
+ "accepted_conn": int64(3),
+ "listen_queue": int64(1),
+ "max_listen_queue": int64(0),
+ "listen_queue_len": int64(0),
+ "idle_processes": int64(1),
+ "active_processes": int64(1),
+ "total_processes": int64(2),
+ "max_active_processes": int64(1),
+ "max_children_reached": int64(2),
+ "slow_requests": int64(1),
+ }
+
+ acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
+}
+
+func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
+ // Create a socket in /tmp because we always have write permission. If the
+ // removing of socket fail we won't have junk files around. Cuz when system
+ // restart, it clears out /tmp
+ var randomNumber int64
+ binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
+ tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber))
+ if err != nil {
+ t.Fatal("Cannot initalize server on port ")
+ }
+
+ defer tcp.Close()
+ s := statServer{}
+ go fcgi.Serve(tcp, s)
+
+ r := &phpfpm{
+ Urls: []string{tcp.Addr().String() + ":custom-status-path"},
+ }
+
+ var acc testutil.Accumulator
+
+ err = r.Gather(&acc)
+ require.NoError(t, err)
+
+ tags := map[string]string{
"pool": "www",
}
@@ -51,7 +188,7 @@ func TestPhpFpmGeneratesMetrics(t *testing.T) {
//When not passing server config, we default to localhost
//We just want to make sure we did request stat from localhost
-func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
+func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) {
r := &phpfpm{}
var acc testutil.Accumulator
@@ -61,6 +198,31 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
assert.Contains(t, err.Error(), "127.0.0.1/status")
}
+func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) {
+ r := &phpfpm{
+ Urls: []string{"http://aninvalidone"},
+ }
+
+ var acc testutil.Accumulator
+
+ err := r.Gather(&acc)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`)
+}
+
+func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) {
+ r := &phpfpm{
+ Urls: []string{"/tmp/invalid.sock"},
+ }
+
+ var acc testutil.Accumulator
+
+ err := r.Gather(&acc)
+ require.Error(t, err)
+ assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error())
+
+}
+
const outputSample = `
pool: www
process manager: dynamic
diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go
index ff7cebb99..aa1d5bf36 100644
--- a/plugins/inputs/ping/ping.go
+++ b/plugins/inputs/ping/ping.go
@@ -7,7 +7,7 @@ import (
"strings"
"sync"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
// HostPinger is a function that runs the "ping" function using a list of
diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go
index b98a08be8..be603a49c 100644
--- a/plugins/inputs/ping/ping_test.go
+++ b/plugins/inputs/ping/ping_test.go
@@ -6,7 +6,7 @@ import (
"sort"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go
index c356cea77..3398f5ac0 100644
--- a/plugins/inputs/postgresql/postgresql.go
+++ b/plugins/inputs/postgresql/postgresql.go
@@ -6,7 +6,7 @@ import (
"fmt"
"strings"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/lib/pq"
)
diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go
index 0f4ff5579..8baae39a6 100644
--- a/plugins/inputs/postgresql/postgresql_test.go
+++ b/plugins/inputs/postgresql/postgresql_test.go
@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md
index d2322ab1f..0c37af509 100644
--- a/plugins/inputs/procstat/README.md
+++ b/plugins/inputs/procstat/README.md
@@ -16,25 +16,19 @@ individual process specific measurements.
Example:
```
- [procstat]
+[[inputs.procstat]]
+ exe = "influxd"
+ prefix = "influxd"
- [[procstat.specifications]]
- exe = "influxd"
- prefix = "influxd"
-
- [[procstat.specifications]]
- pid_file = "/var/run/lxc/dnsmasq.pid"
+[[inputs.procstat]]
+ pid_file = "/var/run/lxc/dnsmasq.pid"
```
The above configuration would result in output like:
```
-[...]
-> [name="dnsmasq" pid="44979"] procstat_cpu_user value=0.14
-> [name="dnsmasq" pid="44979"] procstat_cpu_system value=0.07
-[...]
-> [name="influxd" pid="34337"] procstat_influxd_cpu_user value=25.43
-> [name="influxd" pid="34337"] procstat_influxd_cpu_system value=21.82
+> procstat,name="dnsmasq",pid="44979" cpu_user=0.14,cpu_system=0.07
+> procstat,name="influxd",pid="34337" influxd_cpu_user=25.43,influxd_cpu_system=21.82
```
# Measurements
diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go
index 5e596d6d8..fd8158ec7 100644
--- a/plugins/inputs/procstat/procstat.go
+++ b/plugins/inputs/procstat/procstat.go
@@ -10,7 +10,7 @@ import (
"github.com/shirou/gopsutil/process"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Procstat struct {
@@ -18,10 +18,14 @@ type Procstat struct {
Exe string
Pattern string
Prefix string
+
+ pidmap map[int32]*process.Process
}
func NewProcstat() *Procstat {
- return &Procstat{}
+ return &Procstat{
+ pidmap: make(map[int32]*process.Process),
+ }
}
var sampleConfig = `
@@ -46,12 +50,12 @@ func (_ *Procstat) Description() string {
}
func (p *Procstat) Gather(acc inputs.Accumulator) error {
- procs, err := p.createProcesses()
+ err := p.createProcesses()
if err != nil {
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
p.Exe, p.PidFile, p.Pattern, err.Error())
} else {
- for _, proc := range procs {
+ for _, proc := range p.pidmap {
p := NewSpecProcessor(p.Prefix, acc, proc)
p.pushMetrics()
}
@@ -60,8 +64,7 @@ func (p *Procstat) Gather(acc inputs.Accumulator) error {
return nil
}
-func (p *Procstat) createProcesses() ([]*process.Process, error) {
- var out []*process.Process
+func (p *Procstat) createProcesses() error {
var errstring string
var outerr error
@@ -71,11 +74,14 @@ func (p *Procstat) createProcesses() ([]*process.Process, error) {
}
for _, pid := range pids {
- p, err := process.NewProcess(int32(pid))
- if err == nil {
- out = append(out, p)
- } else {
- errstring += err.Error() + " "
+ _, ok := p.pidmap[pid]
+ if !ok {
+ proc, err := process.NewProcess(pid)
+ if err == nil {
+ p.pidmap[pid] = proc
+ } else {
+ errstring += err.Error() + " "
+ }
}
}
@@ -83,7 +89,7 @@ func (p *Procstat) createProcesses() ([]*process.Process, error) {
outerr = fmt.Errorf("%s", errstring)
}
- return out, outerr
+ return outerr
}
func (p *Procstat) getAllPids() ([]int32, error) {
@@ -123,9 +129,13 @@ func pidsFromFile(file string) ([]int32, error) {
func pidsFromExe(exe string) ([]int32, error) {
var out []int32
var outerr error
- pgrep, err := exec.Command("pgrep", exe).Output()
+ bin, err := exec.LookPath("pgrep")
if err != nil {
- return out, fmt.Errorf("Failed to execute pgrep. Error: '%s'", err)
+ return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
+ }
+ pgrep, err := exec.Command(bin, exe).Output()
+ if err != nil {
+ return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
} else {
pids := strings.Fields(string(pgrep))
for _, pid := range pids {
@@ -143,9 +153,13 @@ func pidsFromExe(exe string) ([]int32, error) {
func pidsFromPattern(pattern string) ([]int32, error) {
var out []int32
var outerr error
- pgrep, err := exec.Command("pgrep", "-f", pattern).Output()
+ bin, err := exec.LookPath("pgrep")
if err != nil {
- return out, fmt.Errorf("Failed to execute pgrep. Error: '%s'", err)
+ return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
+ }
+ pgrep, err := exec.Command(bin, "-f", pattern).Output()
+ if err != nil {
+ return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
} else {
pids := strings.Fields(string(pgrep))
for _, pid := range pids {
diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go
index 6ec6834ca..bf5790f67 100644
--- a/plugins/inputs/procstat/procstat_test.go
+++ b/plugins/inputs/procstat/procstat_test.go
@@ -6,10 +6,11 @@ import (
"strconv"
"testing"
+ "github.com/shirou/gopsutil/process"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
)
func TestGather(t *testing.T) {
@@ -23,6 +24,7 @@ func TestGather(t *testing.T) {
p := Procstat{
PidFile: file.Name(),
Prefix: "foo",
+ pidmap: make(map[int32]*process.Process),
}
p.Gather(&acc)
assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user"))
diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go
index 9c7e53826..b66572f2e 100644
--- a/plugins/inputs/procstat/spec_processor.go
+++ b/plugins/inputs/procstat/spec_processor.go
@@ -2,11 +2,11 @@ package procstat
import (
"fmt"
- "log"
+ "time"
"github.com/shirou/gopsutil/process"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type SpecProcessor struct {
@@ -40,7 +40,7 @@ func NewSpecProcessor(
tags := make(map[string]string)
tags["pid"] = fmt.Sprintf("%v", p.Pid)
if name, err := p.Name(); err == nil {
- tags["name"] = name
+ tags["process_name"] = name
}
return &SpecProcessor{
Prefix: prefix,
@@ -52,21 +52,11 @@ func NewSpecProcessor(
}
func (p *SpecProcessor) pushMetrics() {
- if err := p.pushFDStats(); err != nil {
- log.Printf("procstat, fd stats not available: %s", err.Error())
- }
- if err := p.pushCtxStats(); err != nil {
- log.Printf("procstat, ctx stats not available: %s", err.Error())
- }
- if err := p.pushIOStats(); err != nil {
- log.Printf("procstat, io stats not available: %s", err.Error())
- }
- if err := p.pushCPUStats(); err != nil {
- log.Printf("procstat, cpu stats not available: %s", err.Error())
- }
- if err := p.pushMemoryStats(); err != nil {
- log.Printf("procstat, mem stats not available: %s", err.Error())
- }
+ p.pushFDStats()
+ p.pushCtxStats()
+ p.pushIOStats()
+ p.pushCPUStats()
+ p.pushMemoryStats()
p.flush()
}
@@ -113,10 +103,18 @@ func (p *SpecProcessor) pushCPUStats() error {
p.add("cpu_time_iowait", cpu_time.Iowait)
p.add("cpu_time_irq", cpu_time.Irq)
p.add("cpu_time_soft_irq", cpu_time.Softirq)
- p.add("cpu_time_soft_steal", cpu_time.Steal)
- p.add("cpu_time_soft_stolen", cpu_time.Stolen)
- p.add("cpu_time_soft_guest", cpu_time.Guest)
- p.add("cpu_time_soft_guest_nice", cpu_time.GuestNice)
+ p.add("cpu_time_steal", cpu_time.Steal)
+ p.add("cpu_time_stolen", cpu_time.Stolen)
+ p.add("cpu_time_guest", cpu_time.Guest)
+ p.add("cpu_time_guest_nice", cpu_time.GuestNice)
+
+ cpu_perc, err := p.proc.CPUPercent(time.Duration(0))
+ if err != nil {
+ return err
+ } else if cpu_perc == 0 {
+ return nil
+ }
+ p.add("cpu_usage", cpu_perc)
return nil
}
diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go
index 758788b8d..e6374b8d6 100644
--- a/plugins/inputs/prometheus/prometheus.go
+++ b/plugins/inputs/prometheus/prometheus.go
@@ -3,7 +3,7 @@ package prometheus
import (
"errors"
"fmt"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"io"
diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go
index 901fe2da2..2009cbb11 100644
--- a/plugins/inputs/prometheus/prometheus_test.go
+++ b/plugins/inputs/prometheus/prometheus_test.go
@@ -6,7 +6,7 @@ import (
"net/http/httptest"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go
index d0bedae9e..eee9186b3 100644
--- a/plugins/inputs/puppetagent/puppetagent.go
+++ b/plugins/inputs/puppetagent/puppetagent.go
@@ -8,7 +8,7 @@ import (
"reflect"
"strings"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
// PuppetAgent is a PuppetAgent plugin
diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go
index 1d854ab46..d1470bc27 100644
--- a/plugins/inputs/puppetagent/puppetagent_test.go
+++ b/plugins/inputs/puppetagent/puppetagent_test.go
@@ -1,7 +1,7 @@
package puppetagent
import (
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"testing"
)
diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go
index fc95af494..103484e78 100644
--- a/plugins/inputs/rabbitmq/rabbitmq.go
+++ b/plugins/inputs/rabbitmq/rabbitmq.go
@@ -7,7 +7,7 @@ import (
"strconv"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
const DefaultUsername = "guest"
@@ -57,9 +57,14 @@ type ObjectTotals struct {
}
type QueueTotals struct {
- Messages int64
- MessagesReady int64 `json:"messages_ready"`
- MessagesUnacknowledged int64 `json:"messages_unacknowledged"`
+ Messages int64
+ MessagesReady int64 `json:"messages_ready"`
+ MessagesUnacknowledged int64 `json:"messages_unacknowledged"`
+ MessageBytes int64 `json:"message_bytes"`
+ MessageBytesReady int64 `json:"message_bytes_ready"`
+ MessageBytesUnacknowledged int64 `json:"message_bytes_unacknowledged"`
+ MessageRam int64 `json:"message_bytes_ram"`
+ MessagePersistent int64 `json:"message_bytes_persistent"`
}
type Queue struct {
@@ -270,6 +275,11 @@ func gatherQueues(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) {
"consumer_utilisation": queue.ConsumerUtilisation,
"memory": queue.Memory,
// messages information
+ "message_bytes": queue.MessageBytes,
+ "message_bytes_ready": queue.MessageBytesReady,
+ "message_bytes_unacked": queue.MessageBytesUnacknowledged,
+ "message_bytes_ram": queue.MessageRam,
+ "message_bytes_persist": queue.MessagePersistent,
"messages": queue.Messages,
"messages_ready": queue.MessagesReady,
"messages_unack": queue.MessagesUnacknowledged,
diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go
index 12b7aee70..4bdc980db 100644
--- a/plugins/inputs/rabbitmq/rabbitmq_test.go
+++ b/plugins/inputs/rabbitmq/rabbitmq_test.go
@@ -6,7 +6,7 @@ import (
"net/http/httptest"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go
index c9e98e886..735aa2052 100644
--- a/plugins/inputs/redis/redis.go
+++ b/plugins/inputs/redis/redis.go
@@ -10,7 +10,7 @@ import (
"strings"
"sync"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Redis struct {
diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go
index ec0cf998c..612595cdb 100644
--- a/plugins/inputs/redis/redis_test.go
+++ b/plugins/inputs/redis/redis_test.go
@@ -6,7 +6,7 @@ import (
"strings"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go
index 17873f1ce..1f28dab25 100644
--- a/plugins/inputs/rethinkdb/rethinkdb.go
+++ b/plugins/inputs/rethinkdb/rethinkdb.go
@@ -5,7 +5,7 @@ import (
"net/url"
"sync"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"gopkg.in/dancannon/gorethink.v1"
)
diff --git a/plugins/inputs/rethinkdb/rethinkdb_data.go b/plugins/inputs/rethinkdb/rethinkdb_data.go
index 3ea429d82..8093fa5ba 100644
--- a/plugins/inputs/rethinkdb/rethinkdb_data.go
+++ b/plugins/inputs/rethinkdb/rethinkdb_data.go
@@ -4,7 +4,7 @@ import (
"reflect"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type serverStatus struct {
diff --git a/plugins/inputs/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go
index 3441370a3..6159016c0 100644
--- a/plugins/inputs/rethinkdb/rethinkdb_data_test.go
+++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go
@@ -3,7 +3,7 @@ package rethinkdb
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go
index 4af916a4d..6ca7a3af1 100644
--- a/plugins/inputs/rethinkdb/rethinkdb_server.go
+++ b/plugins/inputs/rethinkdb/rethinkdb_server.go
@@ -9,7 +9,7 @@ import (
"strconv"
"strings"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"gopkg.in/dancannon/gorethink.v1"
)
diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go
index 21ab0dbbd..c4b644222 100644
--- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go
+++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go
@@ -5,7 +5,7 @@ package rethinkdb
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go
index 926003b83..81001abd8 100644
--- a/plugins/inputs/sensors/sensors.go
+++ b/plugins/inputs/sensors/sensors.go
@@ -1,4 +1,4 @@
-// +build linux
+// +build linux,sensors
package sensors
@@ -7,7 +7,7 @@ import (
"github.com/md14454/gosensors"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Sensors struct {
diff --git a/plugins/inputs/sensors/sensors_nocompile.go b/plugins/inputs/sensors/sensors_nocompile.go
new file mode 100644
index 000000000..5c38a437b
--- /dev/null
+++ b/plugins/inputs/sensors/sensors_nocompile.go
@@ -0,0 +1,3 @@
+// +build !linux !sensors
+
+package sensors
diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go
deleted file mode 100644
index 62a621159..000000000
--- a/plugins/inputs/sensors/sensors_notlinux.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// +build !linux
-
-package sensors
diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go
new file mode 100644
index 000000000..bebb54bdc
--- /dev/null
+++ b/plugins/inputs/snmp/snmp.go
@@ -0,0 +1,473 @@
+package snmp
+
+import (
+ "io/ioutil"
+ "log"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/telegraf/plugins/inputs"
+
+ "github.com/soniah/gosnmp"
+)
+
+// Snmp is a snmp plugin
+type Snmp struct {
+ Host []Host
+ Get []Data
+ Bulk []Data
+ SnmptranslateFile string
+}
+
+type Host struct {
+ Address string
+ Community string
+ // SNMP version. Default 2
+ Version int
+ // SNMP timeout, in seconds. 0 means no timeout
+ Timeout float64
+ // SNMP retries
+ Retries int
+ // Data to collect (list of Data names)
+ Collect []string
+ // easy get oids
+ GetOids []string
+ // Oids
+ getOids []Data
+ bulkOids []Data
+}
+
+type Data struct {
+ Name string
+ // OID (could be numbers or name)
+ Oid string
+ // Unit
+ Unit string
+ // SNMP getbulk max repetition
+ MaxRepetition uint8 `toml:"max_repetition"`
+ // SNMP Instance (default 0)
+ // (only used with GET request and if
+ // OID is a name from snmptranslate file)
+ Instance string
+ // OID (only number) (used for computation)
+ rawOid string
+}
+
+type Node struct {
+ id string
+ name string
+ subnodes map[string]Node
+}
+
+var initNode = Node{
+ id: "1",
+ name: "",
+ subnodes: make(map[string]Node),
+}
+
+var NameToOid = make(map[string]string)
+
+var sampleConfig = `
+ # Use 'oids.txt' file to translate oids to names
+ # To generate 'oids.txt' you need to run:
+ # snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
+ # Or if you have an other MIB folder with custom MIBs
+ # snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
+ snmptranslate_file = "/tmp/oids.txt"
+ [[inputs.snmp.host]]
+ address = "192.168.2.2:161"
+ # SNMP community
+ community = "public" # default public
+ # SNMP version (1, 2 or 3)
+ # Version 3 not supported yet
+ version = 2 # default 2
+ # SNMP response timeout
+ timeout = 2.0 # default 2.0
+ # SNMP request retries
+ retries = 2 # default 2
+ # Which get/bulk do you want to collect for this host
+ collect = ["mybulk", "sysservices", "sysdescr"]
+ # Simple list of OIDs to get, in addition to "collect"
+ get_oids = []
+
+ [[inputs.snmp.host]]
+ address = "192.168.2.3:161"
+ community = "public"
+ version = 2
+ timeout = 2.0
+ retries = 2
+ collect = ["mybulk"]
+ get_oids = [
+ "ifNumber",
+ ".1.3.6.1.2.1.1.3.0",
+ ]
+
+ [[inputs.snmp.get]]
+ name = "ifnumber"
+ oid = "ifNumber"
+
+ [[inputs.snmp.get]]
+ name = "interface_speed"
+ oid = "ifSpeed"
+ instance = 0
+
+ [[inputs.snmp.get]]
+ name = "sysuptime"
+ oid = ".1.3.6.1.2.1.1.3.0"
+ unit = "second"
+
+ [[inputs.snmp.bulk]]
+ name = "mybulk"
+ max_repetition = 127
+ oid = ".1.3.6.1.2.1.1"
+
+ [[inputs.snmp.bulk]]
+ name = "ifoutoctets"
+ max_repetition = 127
+ oid = "ifOutOctets"
+`
+
+// SampleConfig returns sample configuration message
+func (s *Snmp) SampleConfig() string {
+ return sampleConfig
+}
+
+// Description returns description of Zookeeper plugin
+func (s *Snmp) Description() string {
+ return `Reads oids value from one or many snmp agents`
+}
+
+func fillnode(parentNode Node, oid_name string, ids []string) {
+ // ids = ["1", "3", "6", ...]
+ id, ids := ids[0], ids[1:]
+ node, ok := parentNode.subnodes[id]
+ if ok == false {
+ node = Node{
+ id: id,
+ name: "",
+ subnodes: make(map[string]Node),
+ }
+ if len(ids) == 0 {
+ node.name = oid_name
+ }
+ parentNode.subnodes[id] = node
+ }
+ if len(ids) > 0 {
+ fillnode(node, oid_name, ids)
+ }
+}
+
+func findnodename(node Node, ids []string) (string, string) {
+ // ids = ["1", "3", "6", ...]
+ if len(ids) == 1 {
+ return node.name, ids[0]
+ }
+ id, ids := ids[0], ids[1:]
+ // Get node
+ subnode, ok := node.subnodes[id]
+ if ok {
+ return findnodename(subnode, ids)
+ }
+ // We got a node
+ // Get node name
+ if node.name != "" && len(ids) == 0 && id == "0" {
+ // node with instance 0
+ return node.name, "0"
+ } else if node.name != "" && len(ids) == 0 && id != "0" {
+ // node with an instance
+ return node.name, string(id)
+ } else if node.name != "" && len(ids) > 0 {
+ // node with subinstances
+ return node.name, strings.Join(ids, ".")
+ }
+ // return an empty node name
+ return node.name, ""
+}
+
+func (s *Snmp) Gather(acc inputs.Accumulator) error {
+ // Create oid tree
+ if s.SnmptranslateFile != "" && len(initNode.subnodes) == 0 {
+ data, err := ioutil.ReadFile(s.SnmptranslateFile)
+ if err != nil {
+ log.Printf("Reading SNMPtranslate file error: %s", err)
+ return err
+ } else {
+ for _, line := range strings.Split(string(data), "\n") {
+ oidsRegEx := regexp.MustCompile(`([^\t]*)\t*([^\t]*)`)
+ oids := oidsRegEx.FindStringSubmatch(string(line))
+ if oids[2] != "" {
+ oid_name := oids[1]
+ oid := oids[2]
+ fillnode(initNode, oid_name, strings.Split(string(oid), "."))
+ NameToOid[oid_name] = oid
+ }
+ }
+ }
+ }
+ // Fetching data
+ for _, host := range s.Host {
+ // Set default args
+ if len(host.Address) == 0 {
+ host.Address = "127.0.0.1:161"
+ }
+ if host.Community == "" {
+ host.Community = "public"
+ }
+ if host.Timeout <= 0 {
+ host.Timeout = 2.0
+ }
+ if host.Retries <= 0 {
+ host.Retries = 2
+ }
+ // Prepare host
+ // Get Easy GET oids
+ for _, oidstring := range host.GetOids {
+ oid := Data{}
+ if val, ok := NameToOid[oidstring]; ok {
+ // TODO should we add the 0 instance ?
+ oid.Name = oidstring
+ oid.Oid = val
+ oid.rawOid = "." + val + ".0"
+ } else {
+ oid.Name = oidstring
+ oid.Oid = oidstring
+ if string(oidstring[:1]) != "." {
+ oid.rawOid = "." + oidstring
+ } else {
+ oid.rawOid = oidstring
+ }
+ }
+ host.getOids = append(host.getOids, oid)
+ }
+
+ for _, oid_name := range host.Collect {
+ // Get GET oids
+ for _, oid := range s.Get {
+ if oid.Name == oid_name {
+ if val, ok := NameToOid[oid.Oid]; ok {
+ // TODO should we add the 0 instance ?
+ if oid.Instance != "" {
+ oid.rawOid = "." + val + "." + oid.Instance
+ } else {
+ oid.rawOid = "." + val + ".0"
+ }
+ } else {
+ oid.rawOid = oid.Oid
+ }
+ host.getOids = append(host.getOids, oid)
+ }
+ }
+ // Get GETBULK oids
+ for _, oid := range s.Bulk {
+ if oid.Name == oid_name {
+ if val, ok := NameToOid[oid.Oid]; ok {
+ oid.rawOid = "." + val
+ } else {
+ oid.rawOid = oid.Oid
+ }
+ host.bulkOids = append(host.bulkOids, oid)
+ }
+ }
+ }
+ // Launch Get requests
+ if err := host.SNMPGet(acc); err != nil {
+ return err
+ }
+ if err := host.SNMPBulk(acc); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (h *Host) SNMPGet(acc inputs.Accumulator) error {
+ // Get snmp client
+ snmpClient, err := h.GetSNMPClient()
+ if err != nil {
+ return err
+ }
+ // Deconnection
+ defer snmpClient.Conn.Close()
+ // Prepare OIDs
+ oidsList := make(map[string]Data)
+ for _, oid := range h.getOids {
+ oidsList[oid.rawOid] = oid
+ }
+ oidsNameList := make([]string, 0, len(oidsList))
+ for _, oid := range oidsList {
+ oidsNameList = append(oidsNameList, oid.rawOid)
+ }
+
+ // gosnmp.MAX_OIDS == 60
+ // TODO use gosnmp.MAX_OIDS instead of hard coded value
+ max_oids := 60
+ // limit 60 (MAX_OIDS) oids by requests
+ for i := 0; i < len(oidsList); i = i + max_oids {
+ // Launch request
+ max_index := i + max_oids
+ if i+max_oids > len(oidsList) {
+ max_index = len(oidsList)
+ }
+ result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS
+ if err3 != nil {
+ return err3
+ }
+ // Handle response
+ _, err = h.HandleResponse(oidsList, result, acc)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (h *Host) SNMPBulk(acc inputs.Accumulator) error {
+ // Get snmp client
+ snmpClient, err := h.GetSNMPClient()
+ if err != nil {
+ return err
+ }
+ // Deconnection
+ defer snmpClient.Conn.Close()
+ // Prepare OIDs
+ oidsList := make(map[string]Data)
+ for _, oid := range h.bulkOids {
+ oidsList[oid.rawOid] = oid
+ }
+ oidsNameList := make([]string, 0, len(oidsList))
+ for _, oid := range oidsList {
+ oidsNameList = append(oidsNameList, oid.rawOid)
+ }
+ // TODO Trying to make requests with more than one OID
+ // to reduce the number of requests
+ for _, oid := range oidsNameList {
+ oid_asked := oid
+ need_more_requests := true
+ // Set max repetition
+ maxRepetition := oidsList[oid].MaxRepetition
+ if maxRepetition <= 0 {
+ maxRepetition = 32
+ }
+ // Launch requests
+ for need_more_requests {
+ // Launch request
+ result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition)
+ if err3 != nil {
+ return err3
+ }
+ // Handle response
+ last_oid, err := h.HandleResponse(oidsList, result, acc)
+ if err != nil {
+ return err
+ }
+ // Determine if we need more requests
+ if strings.HasPrefix(last_oid, oid_asked) {
+ need_more_requests = true
+ oid = last_oid
+ } else {
+ need_more_requests = false
+ }
+ }
+ }
+ return nil
+}
+
+func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) {
+ // Prepare Version
+ var version gosnmp.SnmpVersion
+ if h.Version == 1 {
+ version = gosnmp.Version1
+ } else if h.Version == 3 {
+ version = gosnmp.Version3
+ } else {
+ version = gosnmp.Version2c
+ }
+ // Prepare host and port
+ host, port_str, err := net.SplitHostPort(h.Address)
+ if err != nil {
+ port_str = string("161")
+ }
+ // convert port_str to port in uint16
+ port_64, err := strconv.ParseUint(port_str, 10, 16)
+ port := uint16(port_64)
+ // Get SNMP client
+ snmpClient := &gosnmp.GoSNMP{
+ Target: host,
+ Port: port,
+ Community: h.Community,
+ Version: version,
+ Timeout: time.Duration(h.Timeout) * time.Second,
+ Retries: h.Retries,
+ }
+ // Connection
+ err2 := snmpClient.Connect()
+ if err2 != nil {
+ return nil, err2
+ }
+ // Return snmpClient
+ return snmpClient, nil
+}
+
+func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc inputs.Accumulator) (string, error) {
+ var lastOid string
+ for _, variable := range result.Variables {
+ lastOid = variable.Name
+ // Remove unwanted oid
+ for oid_key, oid := range oids {
+ if strings.HasPrefix(variable.Name, oid_key) {
+ switch variable.Type {
+ // handle Metrics
+ case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32,
+ gosnmp.TimeTicks, gosnmp.Counter64, gosnmp.Uinteger32:
+ // Prepare tags
+ tags := make(map[string]string)
+ if oid.Unit != "" {
+ tags["unit"] = oid.Unit
+ }
+ // Get name and instance
+ var oid_name string
+ var instance string
+ // Get oidname and instannce from translate file
+ oid_name, instance = findnodename(initNode,
+ strings.Split(string(variable.Name[1:]), "."))
+
+ if instance != "" {
+ tags["instance"] = instance
+ }
+
+ // Set name
+ var field_name string
+ if oid_name != "" {
+ // Set fieldname as oid name from translate file
+ field_name = oid_name
+ } else {
+ // Set fieldname as oid name from inputs.snmp.get section
+ // Because the result oid is equal to inputs.snmp.get section
+ field_name = oid.Name
+ }
+ tags["host"], _, _ = net.SplitHostPort(h.Address)
+ fields := make(map[string]interface{})
+ fields[string(field_name)] = variable.Value
+
+ acc.AddFields(field_name, fields, tags)
+ case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
+ // Oid not found
+ log.Printf("[snmp input] Oid not found: %s", oid_key)
+ default:
+ // delete other data
+ }
+ break
+ }
+ }
+ }
+ return lastOid, nil
+}
+
+func init() {
+ inputs.Add("snmp", func() inputs.Input {
+ return &Snmp{}
+ })
+}
diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go
new file mode 100644
index 000000000..594a70217
--- /dev/null
+++ b/plugins/inputs/snmp/snmp_test.go
@@ -0,0 +1,459 @@
+package snmp
+
+import (
+ "testing"
+
+ "github.com/influxdata/telegraf/testutil"
+
+ // "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSNMPErrorGet1(t *testing.T) {
+ get1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: ".1.3.6.1.2.1.2.2.1.16.1",
+ }
+ h := Host{
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "bad_oid.txt",
+ Host: []Host{h},
+ Get: []Data{get1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.Error(t, err)
+}
+
+func TestSNMPErrorGet2(t *testing.T) {
+ get1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: ".1.3.6.1.2.1.2.2.1.16.1",
+ }
+ h := Host{
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ Host: []Host{h},
+ Get: []Data{get1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.Error(t, err)
+}
+
+func TestSNMPErrorBulk(t *testing.T) {
+ bulk1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: ".1.3.6.1.2.1.2.2.1.16",
+ }
+ h := Host{
+ Address: "127.0.0.1",
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ Host: []Host{h},
+ Bulk: []Data{bulk1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.Error(t, err)
+}
+
+func TestSNMPGet1(t *testing.T) {
+ get1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: ".1.3.6.1.2.1.2.2.1.16.1",
+ }
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ Host: []Host{h},
+ Get: []Data{get1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "oid1",
+ map[string]interface{}{
+ "oid1": uint(543846),
+ },
+ map[string]string{
+ "unit": "octets",
+ "host": "127.0.0.1",
+ },
+ )
+}
+
+func TestSNMPGet2(t *testing.T) {
+ get1 := Data{
+ Name: "oid1",
+ Oid: "ifNumber",
+ }
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "./testdata/oids.txt",
+ Host: []Host{h},
+ Get: []Data{get1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "ifNumber",
+ map[string]interface{}{
+ "ifNumber": int(4),
+ },
+ map[string]string{
+ "instance": "0",
+ "host": "127.0.0.1",
+ },
+ )
+}
+
+func TestSNMPGet3(t *testing.T) {
+ get1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: "ifSpeed",
+ Instance: "1",
+ }
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "./testdata/oids.txt",
+ Host: []Host{h},
+ Get: []Data{get1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "ifSpeed",
+ map[string]interface{}{
+ "ifSpeed": uint(10000000),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "1",
+ "host": "127.0.0.1",
+ },
+ )
+}
+
+func TestSNMPEasyGet4(t *testing.T) {
+ get1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: "ifSpeed",
+ Instance: "1",
+ }
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ Collect: []string{"oid1"},
+ GetOids: []string{"ifNumber"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "./testdata/oids.txt",
+ Host: []Host{h},
+ Get: []Data{get1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "ifSpeed",
+ map[string]interface{}{
+ "ifSpeed": uint(10000000),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "1",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifNumber",
+ map[string]interface{}{
+ "ifNumber": int(4),
+ },
+ map[string]string{
+ "instance": "0",
+ "host": "127.0.0.1",
+ },
+ )
+}
+
+func TestSNMPEasyGet5(t *testing.T) {
+ get1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: "ifSpeed",
+ Instance: "1",
+ }
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ Collect: []string{"oid1"},
+ GetOids: []string{".1.3.6.1.2.1.2.1.0"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "./testdata/oids.txt",
+ Host: []Host{h},
+ Get: []Data{get1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "ifSpeed",
+ map[string]interface{}{
+ "ifSpeed": uint(10000000),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "1",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifNumber",
+ map[string]interface{}{
+ "ifNumber": int(4),
+ },
+ map[string]string{
+ "instance": "0",
+ "host": "127.0.0.1",
+ },
+ )
+}
+
+func TestSNMPEasyGet6(t *testing.T) {
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ GetOids: []string{"1.3.6.1.2.1.2.1.0"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "./testdata/oids.txt",
+ Host: []Host{h},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "ifNumber",
+ map[string]interface{}{
+ "ifNumber": int(4),
+ },
+ map[string]string{
+ "instance": "0",
+ "host": "127.0.0.1",
+ },
+ )
+}
+
+func TestSNMPBulk1(t *testing.T) {
+ bulk1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: ".1.3.6.1.2.1.2.2.1.16",
+ MaxRepetition: 2,
+ }
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "./testdata/oids.txt",
+ Host: []Host{h},
+ Bulk: []Data{bulk1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(543846),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "1",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(26475179),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "2",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(108963968),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "3",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(12991453),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "36",
+ "host": "127.0.0.1",
+ },
+ )
+}
+
+// TODO find why, if this test is active
+// Circle CI stops with the following error...
+// bash scripts/circle-test.sh died unexpectedly
+// Maybe the test is too long ??
+func dTestSNMPBulk2(t *testing.T) {
+ bulk1 := Data{
+ Name: "oid1",
+ Unit: "octets",
+ Oid: "ifOutOctets",
+ MaxRepetition: 2,
+ }
+ h := Host{
+ Address: "127.0.0.1:31161",
+ Community: "telegraf",
+ Version: 2,
+ Timeout: 2.0,
+ Retries: 2,
+ Collect: []string{"oid1"},
+ }
+ s := Snmp{
+ SnmptranslateFile: "./testdata/oids.txt",
+ Host: []Host{h},
+ Bulk: []Data{bulk1},
+ }
+
+ var acc testutil.Accumulator
+ err := s.Gather(&acc)
+ require.NoError(t, err)
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(543846),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "1",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(26475179),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "2",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(108963968),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "3",
+ "host": "127.0.0.1",
+ },
+ )
+
+ acc.AssertContainsTaggedFields(t,
+ "ifOutOctets",
+ map[string]interface{}{
+ "ifOutOctets": uint(12991453),
+ },
+ map[string]string{
+ "unit": "octets",
+ "instance": "36",
+ "host": "127.0.0.1",
+ },
+ )
+}
diff --git a/plugins/inputs/snmp/testdata/oids.txt b/plugins/inputs/snmp/testdata/oids.txt
new file mode 100644
index 000000000..1a351be90
--- /dev/null
+++ b/plugins/inputs/snmp/testdata/oids.txt
@@ -0,0 +1,32 @@
+org 1.3
+dod 1.3.6
+internet 1.3.6.1
+directory 1.3.6.1.1
+mgmt 1.3.6.1.2
+mib-2 1.3.6.1.2.1
+interfaces 1.3.6.1.2.1.2
+ifNumber 1.3.6.1.2.1.2.1
+ifTable 1.3.6.1.2.1.2.2
+ifEntry 1.3.6.1.2.1.2.2.1
+ifIndex 1.3.6.1.2.1.2.2.1.1
+ifDescr 1.3.6.1.2.1.2.2.1.2
+ifType 1.3.6.1.2.1.2.2.1.3
+ifMtu 1.3.6.1.2.1.2.2.1.4
+ifSpeed 1.3.6.1.2.1.2.2.1.5
+ifPhysAddress 1.3.6.1.2.1.2.2.1.6
+ifAdminStatus 1.3.6.1.2.1.2.2.1.7
+ifOperStatus 1.3.6.1.2.1.2.2.1.8
+ifLastChange 1.3.6.1.2.1.2.2.1.9
+ifInOctets 1.3.6.1.2.1.2.2.1.10
+ifInUcastPkts 1.3.6.1.2.1.2.2.1.11
+ifInNUcastPkts 1.3.6.1.2.1.2.2.1.12
+ifInDiscards 1.3.6.1.2.1.2.2.1.13
+ifInErrors 1.3.6.1.2.1.2.2.1.14
+ifInUnknownProtos 1.3.6.1.2.1.2.2.1.15
+ifOutOctets 1.3.6.1.2.1.2.2.1.16
+ifOutUcastPkts 1.3.6.1.2.1.2.2.1.17
+ifOutNUcastPkts 1.3.6.1.2.1.2.2.1.18
+ifOutDiscards 1.3.6.1.2.1.2.2.1.19
+ifOutErrors 1.3.6.1.2.1.2.2.1.20
+ifOutQLen 1.3.6.1.2.1.2.2.1.21
+ifSpecific 1.3.6.1.2.1.2.2.1.22
diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md
index 76255f3b0..49b8ff842 100644
--- a/plugins/inputs/statsd/README.md
+++ b/plugins/inputs/statsd/README.md
@@ -157,4 +157,4 @@ mem.cached.localhost:256|g
```
There are many more options available,
-[More details can be found here](https://github.com/influxdb/influxdb/tree/master/services/graphite#templates)
+[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go
index d210b55fa..6b7a427b7 100644
--- a/plugins/inputs/statsd/statsd.go
+++ b/plugins/inputs/statsd/statsd.go
@@ -10,11 +10,13 @@ import (
"strings"
"sync"
- "github.com/influxdb/influxdb/services/graphite"
+ "github.com/influxdata/influxdb/services/graphite"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
+const UDP_PACKET_SIZE int = 1500
+
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
"You may want to increase allowed_pending_messages in the config\n"
@@ -35,11 +37,16 @@ type Statsd struct {
DeleteCounters bool
DeleteSets bool
DeleteTimings bool
+ ConvertNames bool
+
+ // UDPPacketSize is the size of the read packets for the server listening
+ // for statsd UDP packets. This will default to 1500 bytes.
+ UDPPacketSize int `toml:"udp_packet_size"`
sync.Mutex
- // Channel for all incoming statsd messages
- in chan string
+ // Channel for all incoming statsd packets
+ in chan []byte
done chan struct{}
// Cache gauges, counters & sets so they can be aggregated as they arrive
@@ -57,12 +64,15 @@ func NewStatsd() *Statsd {
// Make data structures
s.done = make(chan struct{})
- s.in = make(chan string, s.AllowedPendingMessages)
+ s.in = make(chan []byte, s.AllowedPendingMessages)
s.gauges = make(map[string]cachedgauge)
s.counters = make(map[string]cachedcounter)
s.sets = make(map[string]cachedset)
s.timings = make(map[string]cachedtimings)
+ s.ConvertNames = true
+ s.UDPPacketSize = UDP_PACKET_SIZE
+
return &s
}
@@ -121,6 +131,9 @@ const sampleConfig = `
# Percentiles to calculate for timing & histogram stats
percentiles = [90]
+ # convert measurement names, "." to "_" and "-" to "__"
+ convert_names = true
+
# templates = [
# "cpu.* measurement*"
# ]
@@ -133,6 +146,10 @@ const sampleConfig = `
# calculation of percentiles. Raising this limit increases the accuracy
# of percentiles but also increases the memory usage and cpu time.
percentile_limit = 1000
+
+ # UDP packet size for the server to listen for. This will depend on the size
+ # of the packets that the client is sending, which is usually 1500 bytes.
+ udp_packet_size = 1500
`
func (_ *Statsd) SampleConfig() string {
@@ -185,7 +202,7 @@ func (s *Statsd) Gather(acc inputs.Accumulator) error {
func (s *Statsd) Start() error {
// Make data structures
s.done = make(chan struct{})
- s.in = make(chan string, s.AllowedPendingMessages)
+ s.in = make(chan []byte, s.AllowedPendingMessages)
s.gauges = make(map[string]cachedgauge)
s.counters = make(map[string]cachedcounter)
s.sets = make(map[string]cachedset)
@@ -214,36 +231,37 @@ func (s *Statsd) udpListen() error {
case <-s.done:
return nil
default:
- buf := make([]byte, 1024)
+ buf := make([]byte, s.UDPPacketSize)
n, _, err := listener.ReadFromUDP(buf)
if err != nil {
log.Printf("ERROR: %s\n", err.Error())
}
- lines := strings.Split(string(buf[:n]), "\n")
- for _, line := range lines {
- line = strings.TrimSpace(line)
- if line != "" {
- select {
- case s.in <- line:
- default:
- log.Printf(dropwarn, line)
- }
- }
+ select {
+ case s.in <- buf[:n]:
+ default:
+ log.Printf(dropwarn, string(buf[:n]))
}
}
}
}
-// parser monitors the s.in channel, if there is a line ready, it parses the
-// statsd string into a usable metric struct and aggregates the value
+// parser monitors the s.in channel, if there is a packet ready, it parses the
+// packet into statsd strings and then calls parseStatsdLine, which parses a
+// single statsd metric into a struct.
func (s *Statsd) parser() error {
for {
select {
case <-s.done:
return nil
- case line := <-s.in:
- s.parseStatsdLine(line)
+ case packet := <-s.in:
+ lines := strings.Split(string(packet), "\n")
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line != "" {
+ s.parseStatsdLine(line)
+ }
+ }
}
}
}
@@ -319,10 +337,15 @@ func (s *Statsd) parseStatsdLine(line string) error {
}
m.floatvalue = v
case "c", "s":
+ var v int64
v, err := strconv.ParseInt(pipesplit[0], 10, 64)
if err != nil {
- log.Printf("Error: parsing value to int64: %s\n", line)
- return errors.New("Error Parsing statsd line")
+ v2, err2 := strconv.ParseFloat(pipesplit[0], 64)
+ if err2 != nil {
+ log.Printf("Error: parsing value to int64: %s\n", line)
+ return errors.New("Error Parsing statsd line")
+ }
+ v = int64(v2)
}
// If a sample rate is given with a counter, divide value by the rate
if m.samplerate != 0 && m.mtype == "c" {
@@ -389,8 +412,10 @@ func (s *Statsd) parseName(bucket string) (string, map[string]string) {
if err == nil {
name, tags, _, _ = p.ApplyTemplate(name)
}
- name = strings.Replace(name, ".", "_", -1)
- name = strings.Replace(name, "-", "__", -1)
+ if s.ConvertNames {
+ name = strings.Replace(name, ".", "_", -1)
+ name = strings.Replace(name, "-", "__", -1)
+ }
return name, tags
}
@@ -491,6 +516,9 @@ func (s *Statsd) Stop() {
func init() {
inputs.Add("statsd", func() inputs.Input {
- return &Statsd{}
+ return &Statsd{
+ ConvertNames: true,
+ UDPPacketSize: UDP_PACKET_SIZE,
+ }
})
}
diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go
index 4a97728f2..6fc1f6933 100644
--- a/plugins/inputs/statsd/statsd_test.go
+++ b/plugins/inputs/statsd/statsd_test.go
@@ -5,7 +5,7 @@ import (
"fmt"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
)
// Invalid lines should return an error
@@ -303,6 +303,64 @@ func TestParse_Tags(t *testing.T) {
}
}
+// Test that statsd buckets are parsed to measurement names properly
+func TestParseName(t *testing.T) {
+ s := NewStatsd()
+
+ tests := []struct {
+ in_name string
+ out_name string
+ }{
+ {
+ "foobar",
+ "foobar",
+ },
+ {
+ "foo.bar",
+ "foo_bar",
+ },
+ {
+ "foo.bar-baz",
+ "foo_bar__baz",
+ },
+ }
+
+ for _, test := range tests {
+ name, _ := s.parseName(test.in_name)
+ if name != test.out_name {
+ t.Errorf("Expected: %s, got %s", test.out_name, name)
+ }
+ }
+
+ // Test with ConvertNames = false
+ s.ConvertNames = false
+
+ tests = []struct {
+ in_name string
+ out_name string
+ }{
+ {
+ "foobar",
+ "foobar",
+ },
+ {
+ "foo.bar",
+ "foo.bar",
+ },
+ {
+ "foo.bar-baz",
+ "foo.bar-baz",
+ },
+ }
+
+ for _, test := range tests {
+ name, _ := s.parseName(test.in_name)
+ if name != test.out_name {
+ t.Errorf("Expected: %s, got %s", test.out_name, name)
+ }
+ }
+}
+
// Test that measurements with the same name, but different tags, are treated
// as different outputs
func TestParse_MeasurementsWithSameName(t *testing.T) {
diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go
index 298df20bb..95c854b2c 100644
--- a/plugins/inputs/system/cpu.go
+++ b/plugins/inputs/system/cpu.go
@@ -4,7 +4,7 @@ import (
"fmt"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
"github.com/shirou/gopsutil/cpu"
)
diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/system/cpu_test.go
index c85734adc..77d90e2a5 100644
--- a/plugins/inputs/system/cpu_test.go
+++ b/plugins/inputs/system/cpu_test.go
@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/cpu"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go
index 5d1553dd4..c6b23492b 100644
--- a/plugins/inputs/system/disk.go
+++ b/plugins/inputs/system/disk.go
@@ -3,13 +3,16 @@ package system
import (
"fmt"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type DiskStats struct {
ps PS
+ // Legacy support
Mountpoints []string
+
+ MountPoints []string
}
func (_ *DiskStats) Description() string {
@@ -19,7 +22,7 @@ func (_ *DiskStats) Description() string {
var diskSampleConfig = `
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
- # Mountpoints=["/"]
+ # mount_points = ["/"]
`
func (_ *DiskStats) SampleConfig() string {
@@ -27,25 +30,17 @@ func (_ *DiskStats) SampleConfig() string {
}
func (s *DiskStats) Gather(acc inputs.Accumulator) error {
- disks, err := s.ps.DiskUsage()
+ // Legacy support:
+ if len(s.Mountpoints) != 0 {
+ s.MountPoints = s.Mountpoints
+ }
+
+ disks, err := s.ps.DiskUsage(s.MountPoints)
if err != nil {
return fmt.Errorf("error getting disk usage info: %s", err)
}
- var restrictMpoints bool
- mPoints := make(map[string]bool)
- if len(s.Mountpoints) != 0 {
- restrictMpoints = true
- for _, mp := range s.Mountpoints {
- mPoints[mp] = true
- }
- }
-
for _, du := range disks {
- _, member := mPoints[du.Path]
- if restrictMpoints && !member {
- continue
- }
tags := map[string]string{
"path": du.Path,
"fstype": du.Fstype,
diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/system/disk_test.go
index 6ea110fef..ec4182cb3 100644
--- a/plugins/inputs/system/disk_test.go
+++ b/plugins/inputs/system/disk_test.go
@@ -3,7 +3,7 @@ package system
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/disk"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -15,7 +15,7 @@ func TestDiskStats(t *testing.T) {
var acc testutil.Accumulator
var err error
- du := []*disk.DiskUsageStat{
+ duAll := []*disk.DiskUsageStat{
{
Path: "/",
Fstype: "ext4",
@@ -33,8 +33,20 @@ func TestDiskStats(t *testing.T) {
InodesFree: 468,
},
}
+ duFiltered := []*disk.DiskUsageStat{
+ {
+ Path: "/",
+ Fstype: "ext4",
+ Total: 128,
+ Free: 23,
+ InodesTotal: 1234,
+ InodesFree: 234,
+ },
+ }
- mps.On("DiskUsage").Return(du, nil)
+ mps.On("DiskUsage", []string(nil)).Return(duAll, nil)
+ mps.On("DiskUsage", []string{"/", "/dev"}).Return(duFiltered, nil)
+ mps.On("DiskUsage", []string{"/", "/home"}).Return(duAll, nil)
err = (&DiskStats{ps: &mps}).Gather(&acc)
require.NoError(t, err)
@@ -53,32 +65,32 @@ func TestDiskStats(t *testing.T) {
}
fields1 := map[string]interface{}{
- "total": uint64(128), //tags1)
- "used": uint64(105), //tags1)
- "free": uint64(23), //tags1)
- "inodes_total": uint64(1234), //tags1)
- "inodes_free": uint64(234), //tags1)
- "inodes_used": uint64(1000), //tags1)
+ "total": uint64(128),
+ "used": uint64(105),
+ "free": uint64(23),
+ "inodes_total": uint64(1234),
+ "inodes_free": uint64(234),
+ "inodes_used": uint64(1000),
}
fields2 := map[string]interface{}{
- "total": uint64(256), //tags2)
- "used": uint64(210), //tags2)
- "free": uint64(46), //tags2)
- "inodes_total": uint64(2468), //tags2)
- "inodes_free": uint64(468), //tags2)
- "inodes_used": uint64(2000), //tags2)
+ "total": uint64(256),
+ "used": uint64(210),
+ "free": uint64(46),
+ "inodes_total": uint64(2468),
+ "inodes_free": uint64(468),
+ "inodes_used": uint64(2000),
}
acc.AssertContainsTaggedFields(t, "disk", fields1, tags1)
acc.AssertContainsTaggedFields(t, "disk", fields2, tags2)
// We expect 6 more DiskPoints to show up with an explicit match on "/"
- // and /home not matching the /dev in Mountpoints
- err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc)
+ // and /home not matching the /dev in MountPoints
+ err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc)
assert.Equal(t, expectedAllDiskPoints+6, acc.NFields())
- // We should see all the diskpoints as Mountpoints includes both
+ // We should see all the diskpoints as MountPoints includes both
// / and /home
- err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc)
+ err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc)
assert.Equal(t, 2*expectedAllDiskPoints+6, acc.NFields())
}
diff --git a/plugins/inputs/system/docker.go b/plugins/inputs/system/docker.go
deleted file mode 100644
index 3a77fad5f..000000000
--- a/plugins/inputs/system/docker.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// +build linux
-
-package system
-
-import (
- "fmt"
-
- "github.com/influxdb/telegraf/plugins/inputs"
-)
-
-type DockerStats struct {
- ps PS
-}
-
-func (_ *DockerStats) Description() string {
- return "Read metrics about docker containers"
-}
-
-func (_ *DockerStats) SampleConfig() string { return "" }
-
-func (s *DockerStats) Gather(acc inputs.Accumulator) error {
- containers, err := s.ps.DockerStat()
- if err != nil {
- return fmt.Errorf("error getting docker info: %s", err)
- }
-
- for _, cont := range containers {
- tags := map[string]string{
- "id": cont.Id,
- "name": cont.Name,
- "command": cont.Command,
- }
- for k, v := range cont.Labels {
- tags[k] = v
- }
-
- cts := cont.CPU
-
- fields := map[string]interface{}{
- "user": cts.User,
- "system": cts.System,
- "idle": cts.Idle,
- "nice": cts.Nice,
- "iowait": cts.Iowait,
- "irq": cts.Irq,
- "softirq": cts.Softirq,
- "steal": cts.Steal,
- "guest": cts.Guest,
- "guest_nice": cts.GuestNice,
-
- "cache": cont.Mem.Cache,
- "rss": cont.Mem.RSS,
- "rss_huge": cont.Mem.RSSHuge,
- "mapped_file": cont.Mem.MappedFile,
- "swap_in": cont.Mem.Pgpgin,
- "swap_out": cont.Mem.Pgpgout,
- "page_fault": cont.Mem.Pgfault,
- "page_major_fault": cont.Mem.Pgmajfault,
- "inactive_anon": cont.Mem.InactiveAnon,
- "active_anon": cont.Mem.ActiveAnon,
- "inactive_file": cont.Mem.InactiveFile,
- "active_file": cont.Mem.ActiveFile,
- "unevictable": cont.Mem.Unevictable,
- "memory_limit": cont.Mem.HierarchicalMemoryLimit,
- "total_cache": cont.Mem.TotalCache,
- "total_rss": cont.Mem.TotalRSS,
- "total_rss_huge": cont.Mem.TotalRSSHuge,
- "total_mapped_file": cont.Mem.TotalMappedFile,
- "total_swap_in": cont.Mem.TotalPgpgIn,
- "total_swap_out": cont.Mem.TotalPgpgOut,
- "total_page_fault": cont.Mem.TotalPgFault,
- "total_page_major_fault": cont.Mem.TotalPgMajFault,
- "total_inactive_anon": cont.Mem.TotalInactiveAnon,
- "total_active_anon": cont.Mem.TotalActiveAnon,
- "total_inactive_file": cont.Mem.TotalInactiveFile,
- "total_active_file": cont.Mem.TotalActiveFile,
- "total_unevictable": cont.Mem.TotalUnevictable,
- }
- acc.AddFields("docker", fields, tags)
- }
-
- return nil
-}
-
-func init() {
- inputs.Add("docker", func() inputs.Input {
- return &DockerStats{ps: &systemPS{}}
- })
-}
diff --git a/plugins/inputs/system/docker_test.go b/plugins/inputs/system/docker_test.go
deleted file mode 100644
index 9ed06dd3e..000000000
--- a/plugins/inputs/system/docker_test.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// +build linux
-
-package system
-
-import (
- "testing"
-
- "github.com/influxdb/telegraf/testutil"
- "github.com/shirou/gopsutil/cpu"
- "github.com/shirou/gopsutil/docker"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestDockerStats_GenerateStats(t *testing.T) {
- var mps MockPS
- var acc testutil.Accumulator
-
- ds := &DockerContainerStat{
- Name: "blah",
- CPU: &cpu.CPUTimesStat{
- CPU: "all",
- User: 3.1,
- System: 8.2,
- Idle: 80.1,
- Nice: 1.3,
- Iowait: 0.2,
- Irq: 0.1,
- Softirq: 0.11,
- Steal: 0.0001,
- Guest: 8.1,
- GuestNice: 0.324,
- },
- Mem: &docker.CgroupMemStat{
- ContainerID: "blah",
- Cache: 1,
- RSS: 2,
- RSSHuge: 3,
- MappedFile: 4,
- Pgpgin: 5,
- Pgpgout: 6,
- Pgfault: 7,
- Pgmajfault: 8,
- InactiveAnon: 9,
- ActiveAnon: 10,
- InactiveFile: 11,
- ActiveFile: 12,
- Unevictable: 13,
- HierarchicalMemoryLimit: 14,
- TotalCache: 15,
- TotalRSS: 16,
- TotalRSSHuge: 17,
- TotalMappedFile: 18,
- TotalPgpgIn: 19,
- TotalPgpgOut: 20,
- TotalPgFault: 21,
- TotalPgMajFault: 22,
- TotalInactiveAnon: 23,
- TotalActiveAnon: 24,
- TotalInactiveFile: 25,
- TotalActiveFile: 26,
- TotalUnevictable: 27,
- },
- }
-
- mps.On("DockerStat").Return([]*DockerContainerStat{ds}, nil)
-
- err := (&DockerStats{&mps}).Gather(&acc)
- require.NoError(t, err)
-
- dockertags := map[string]string{
- "name": "blah",
- "id": "",
- "command": "",
- }
-
- fields := map[string]interface{}{
- "user": 3.1,
- "system": 8.2,
- "idle": 80.1,
- "nice": 1.3,
- "iowait": 0.2,
- "irq": 0.1,
- "softirq": 0.11,
- "steal": 0.0001,
- "guest": 8.1,
- "guest_nice": 0.324,
-
- "cache": uint64(1),
- "rss": uint64(2),
- "rss_huge": uint64(3),
- "mapped_file": uint64(4),
- "swap_in": uint64(5),
- "swap_out": uint64(6),
- "page_fault": uint64(7),
- "page_major_fault": uint64(8),
- "inactive_anon": uint64(9),
- "active_anon": uint64(10),
- "inactive_file": uint64(11),
- "active_file": uint64(12),
- "unevictable": uint64(13),
- "memory_limit": uint64(14),
- "total_cache": uint64(15),
- "total_rss": uint64(16),
- "total_rss_huge": uint64(17),
- "total_mapped_file": uint64(18),
- "total_swap_in": uint64(19),
- "total_swap_out": uint64(20),
- "total_page_fault": uint64(21),
- "total_page_major_fault": uint64(22),
- "total_inactive_anon": uint64(23),
- "total_active_anon": uint64(24),
- "total_inactive_file": uint64(25),
- "total_active_file": uint64(26),
- "total_unevictable": uint64(27),
- }
-
- acc.AssertContainsTaggedFields(t, "docker", fields, dockertags)
-}
diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/system/memory.go
index f58a8cd92..32a2f2b09 100644
--- a/plugins/inputs/system/memory.go
+++ b/plugins/inputs/system/memory.go
@@ -3,7 +3,7 @@ package system
import (
"fmt"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type MemStats struct {
diff --git a/plugins/inputs/system/memory_test.go b/plugins/inputs/system/memory_test.go
index bf461e2e2..0a85bc869 100644
--- a/plugins/inputs/system/memory_test.go
+++ b/plugins/inputs/system/memory_test.go
@@ -3,7 +3,7 @@ package system
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/mem"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go
index 6e8bfe224..6e9a5f93e 100644
--- a/plugins/inputs/system/mock_PS.go
+++ b/plugins/inputs/system/mock_PS.go
@@ -33,8 +33,8 @@ func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
return r0, r1
}
-func (m *MockPS) DiskUsage() ([]*disk.DiskUsageStat, error) {
- ret := m.Called()
+func (m *MockPS) DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) {
+ ret := m.Called(mountPointFilter)
r0 := ret.Get(0).([]*disk.DiskUsageStat)
r1 := ret.Error(1)
@@ -87,15 +87,6 @@ func (m *MockPS) SwapStat() (*mem.SwapMemoryStat, error) {
return r0, r1
}
-func (m *MockPS) DockerStat() ([]*DockerContainerStat, error) {
- ret := m.Called()
-
- r0 := ret.Get(0).([]*DockerContainerStat)
- r1 := ret.Error(1)
-
- return r0, r1
-}
-
func (m *MockPS) NetConnections() ([]net.NetConnectionStat, error) {
ret := m.Called()
diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go
index 42f0d5854..7f71f5200 100644
--- a/plugins/inputs/system/net.go
+++ b/plugins/inputs/system/net.go
@@ -5,7 +5,7 @@ import (
"net"
"strings"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type NetIOStats struct {
diff --git a/plugins/inputs/system/net_test.go b/plugins/inputs/system/net_test.go
index 3ec2cb990..3297acf07 100644
--- a/plugins/inputs/system/net_test.go
+++ b/plugins/inputs/system/net_test.go
@@ -4,7 +4,7 @@ import (
"syscall"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/shirou/gopsutil/net"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/system/netstat.go b/plugins/inputs/system/netstat.go
index 71f2a0da6..0fe704ee0 100644
--- a/plugins/inputs/system/netstat.go
+++ b/plugins/inputs/system/netstat.go
@@ -4,7 +4,7 @@ import (
"fmt"
"syscall"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type NetStats struct {
diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go
index 966747718..98c9b8b31 100644
--- a/plugins/inputs/system/ps.go
+++ b/plugins/inputs/system/ps.go
@@ -1,39 +1,25 @@
package system
import (
- gonet "net"
"os"
- "strings"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
- dc "github.com/fsouza/go-dockerclient"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/disk"
- "github.com/shirou/gopsutil/docker"
"github.com/shirou/gopsutil/mem"
"github.com/shirou/gopsutil/net"
)
-type DockerContainerStat struct {
- Id string
- Name string
- Command string
- Labels map[string]string
- CPU *cpu.CPUTimesStat
- Mem *docker.CgroupMemStat
-}
-
type PS interface {
CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error)
- DiskUsage() ([]*disk.DiskUsageStat, error)
+ DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error)
NetIO() ([]net.NetIOCountersStat, error)
NetProto() ([]net.NetProtoCountersStat, error)
DiskIO() (map[string]disk.DiskIOCountersStat, error)
VMStat() (*mem.VirtualMemoryStat, error)
SwapStat() (*mem.SwapMemoryStat, error)
- DockerStat() ([]*DockerContainerStat, error)
NetConnections() ([]net.NetConnectionStat, error)
}
@@ -44,9 +30,7 @@ func add(acc inputs.Accumulator,
}
}
-type systemPS struct {
- dockerClient *dc.Client
-}
+type systemPS struct{}
func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
var cpuTimes []cpu.CPUTimesStat
@@ -67,15 +51,31 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) {
return cpuTimes, nil
}
-func (s *systemPS) DiskUsage() ([]*disk.DiskUsageStat, error) {
+func (s *systemPS) DiskUsage(
+ mountPointFilter []string,
+) ([]*disk.DiskUsageStat, error) {
parts, err := disk.DiskPartitions(true)
if err != nil {
return nil, err
}
+ // Make a "set" out of the filter slice
+ filterSet := make(map[string]bool)
+ for _, filter := range mountPointFilter {
+ filterSet[filter] = true
+ }
+
var usage []*disk.DiskUsageStat
for _, p := range parts {
+ if len(mountPointFilter) > 0 {
+ // If the mount point is not a member of the filter set,
+ // don't gather info on it.
+ _, ok := filterSet[p.Mountpoint]
+ if !ok {
+ continue
+ }
+ }
if _, err := os.Stat(p.Mountpoint); err == nil {
du, err := disk.DiskUsage(p.Mountpoint)
if err != nil {
@@ -117,52 +117,3 @@ func (s *systemPS) VMStat() (*mem.VirtualMemoryStat, error) {
func (s *systemPS) SwapStat() (*mem.SwapMemoryStat, error) {
return mem.SwapMemory()
}
-
-func (s *systemPS) DockerStat() ([]*DockerContainerStat, error) {
- if s.dockerClient == nil {
- c, err := dc.NewClient("unix:///var/run/docker.sock")
- if err != nil {
- return nil, err
- }
-
- s.dockerClient = c
- }
-
- opts := dc.ListContainersOptions{}
-
- containers, err := s.dockerClient.ListContainers(opts)
- if err != nil {
- if _, ok := err.(*gonet.OpError); ok {
- return nil, nil
- }
-
- return nil, err
- }
-
- var stats []*DockerContainerStat
-
- for _, container := range containers {
- ctu, err := docker.CgroupCPUDocker(container.ID)
- if err != nil {
- return nil, err
- }
-
- mem, err := docker.CgroupMemDocker(container.ID)
- if err != nil {
- return nil, err
- }
-
- name := strings.Join(container.Names, " ")
-
- stats = append(stats, &DockerContainerStat{
- Id: container.ID,
- Name: name,
- Command: container.Command,
- Labels: container.Labels,
- CPU: ctu,
- Mem: mem,
- })
- }
-
- return stats, nil
-}
diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go
index 813ab84f5..4a0a76d48 100644
--- a/plugins/inputs/system/system.go
+++ b/plugins/inputs/system/system.go
@@ -8,7 +8,7 @@ import (
"github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/load"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type SystemStats struct{}
diff --git a/plugins/inputs/trig/trig.go b/plugins/inputs/trig/trig.go
index 13c44e247..604f9734a 100644
--- a/plugins/inputs/trig/trig.go
+++ b/plugins/inputs/trig/trig.go
@@ -3,7 +3,7 @@ package trig
import (
"math"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Trig struct {
diff --git a/plugins/inputs/trig/trig_test.go b/plugins/inputs/trig/trig_test.go
index 82605b0a5..1471edbea 100644
--- a/plugins/inputs/trig/trig_test.go
+++ b/plugins/inputs/trig/trig_test.go
@@ -4,7 +4,7 @@ import (
"math"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
)
func TestTrig(t *testing.T) {
diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go
index 95c9d0ba0..6dcce8058 100644
--- a/plugins/inputs/twemproxy/twemproxy.go
+++ b/plugins/inputs/twemproxy/twemproxy.go
@@ -7,7 +7,7 @@ import (
"net"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Twemproxy struct {
diff --git a/plugins/inputs/twemproxy/twemproxy_test.go b/plugins/inputs/twemproxy/twemproxy_test.go
index 60209d1a1..dd79048e0 100644
--- a/plugins/inputs/twemproxy/twemproxy_test.go
+++ b/plugins/inputs/twemproxy/twemproxy_test.go
@@ -5,7 +5,7 @@ import (
"net"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/zfs/zfs.go b/plugins/inputs/zfs/zfs.go
index 109b261f8..13f2d9806 100644
--- a/plugins/inputs/zfs/zfs.go
+++ b/plugins/inputs/zfs/zfs.go
@@ -6,8 +6,8 @@ import (
"strconv"
"strings"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
type Zfs struct {
diff --git a/plugins/inputs/zfs/zfs_test.go b/plugins/inputs/zfs/zfs_test.go
index 9530084d0..e40d91c02 100644
--- a/plugins/inputs/zfs/zfs_test.go
+++ b/plugins/inputs/zfs/zfs_test.go
@@ -5,7 +5,7 @@ import (
"os"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go
index 93a07840d..c2940f5e3 100644
--- a/plugins/inputs/zookeeper/zookeeper.go
+++ b/plugins/inputs/zookeeper/zookeeper.go
@@ -10,7 +10,7 @@ import (
"strings"
"time"
- "github.com/influxdb/telegraf/plugins/inputs"
+ "github.com/influxdata/telegraf/plugins/inputs"
)
// Zookeeper is a zookeeper plugin
diff --git a/plugins/inputs/zookeeper/zookeeper_test.go b/plugins/inputs/zookeeper/zookeeper_test.go
index 354382ecc..bc02ffb9d 100644
--- a/plugins/inputs/zookeeper/zookeeper_test.go
+++ b/plugins/inputs/zookeeper/zookeeper_test.go
@@ -3,7 +3,7 @@ package zookeeper
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go
index 8a0d24f94..ac8357c90 100644
--- a/plugins/outputs/all/all.go
+++ b/plugins/outputs/all/all.go
@@ -1,16 +1,18 @@
package all
import (
- _ "github.com/influxdb/telegraf/plugins/outputs/amon"
- _ "github.com/influxdb/telegraf/plugins/outputs/amqp"
- _ "github.com/influxdb/telegraf/plugins/outputs/datadog"
- _ "github.com/influxdb/telegraf/plugins/outputs/influxdb"
- _ "github.com/influxdb/telegraf/plugins/outputs/kafka"
- _ "github.com/influxdb/telegraf/plugins/outputs/kinesis"
- _ "github.com/influxdb/telegraf/plugins/outputs/librato"
- _ "github.com/influxdb/telegraf/plugins/outputs/mqtt"
- _ "github.com/influxdb/telegraf/plugins/outputs/nsq"
- _ "github.com/influxdb/telegraf/plugins/outputs/opentsdb"
- _ "github.com/influxdb/telegraf/plugins/outputs/prometheus_client"
- _ "github.com/influxdb/telegraf/plugins/outputs/riemann"
+ _ "github.com/influxdata/telegraf/plugins/outputs/amon"
+ _ "github.com/influxdata/telegraf/plugins/outputs/amqp"
+ _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch"
+ _ "github.com/influxdata/telegraf/plugins/outputs/datadog"
+ _ "github.com/influxdata/telegraf/plugins/outputs/graphite"
+ _ "github.com/influxdata/telegraf/plugins/outputs/influxdb"
+ _ "github.com/influxdata/telegraf/plugins/outputs/kafka"
+ _ "github.com/influxdata/telegraf/plugins/outputs/kinesis"
+ _ "github.com/influxdata/telegraf/plugins/outputs/librato"
+ _ "github.com/influxdata/telegraf/plugins/outputs/mqtt"
+ _ "github.com/influxdata/telegraf/plugins/outputs/nsq"
+ _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb"
+ _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client"
+ _ "github.com/influxdata/telegraf/plugins/outputs/riemann"
)
diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go
index d8fceb035..e9f2c9f30 100644
--- a/plugins/outputs/amon/amon.go
+++ b/plugins/outputs/amon/amon.go
@@ -8,9 +8,9 @@ import (
"net/http"
"strings"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
type Amon struct {
diff --git a/plugins/outputs/amon/amon_test.go b/plugins/outputs/amon/amon_test.go
index cfe4e9f23..b725bab9e 100644
--- a/plugins/outputs/amon/amon_test.go
+++ b/plugins/outputs/amon/amon_test.go
@@ -6,9 +6,9 @@ import (
"testing"
"time"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
- "github.com/influxdb/influxdb/client/v2"
+ "github.com/influxdata/influxdb/client/v2"
)
func TestBuildPoint(t *testing.T) {
diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go
index 6f0e0fde3..bdbf47b86 100644
--- a/plugins/outputs/amqp/amqp.go
+++ b/plugins/outputs/amqp/amqp.go
@@ -2,13 +2,16 @@ package amqp
import (
"bytes"
+ "crypto/tls"
+ "crypto/x509"
"fmt"
+ "io/ioutil"
"log"
"sync"
"time"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
"github.com/streadway/amqp"
)
@@ -17,6 +20,12 @@ type AMQP struct {
URL string
// AMQP exchange
Exchange string
+ // path to CA file
+ SslCa string
+ // path to host cert file
+ SslCert string
+ // path to cert key file
+ SslKey string
// Routing Key Tag
RoutingTag string `toml:"routing_tag"`
// InfluxDB database
@@ -46,6 +55,11 @@ var sampleConfig = `
# ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
+ # Use ssl
+ #ssl_ca = "/etc/telegraf/ca.pem"
+ #ssl_cert = "/etc/telegraf/cert.pem"
+ #ssl_key = "/etc/telegraf/key.pem"
+
# InfluxDB retention policy
#retention_policy = "default"
# InfluxDB database
@@ -64,7 +78,32 @@ func (q *AMQP) Connect() error {
"retention_policy": q.RetentionPolicy,
}
- connection, err := amqp.Dial(q.URL)
+ var connection *amqp.Connection
+ var err error
+ if q.SslCert != "" && q.SslKey != "" {
+ // make new tls config
+ cfg := new(tls.Config)
+ if q.SslCa != "" {
+ // create ca pool
+ cfg.RootCAs = x509.NewCertPool()
+
+ // add self-signed cert
+ if ca, err := ioutil.ReadFile(q.SslCa); err == nil {
+ cfg.RootCAs.AppendCertsFromPEM(ca)
+ } else {
+ log.Println(err)
+ }
+ }
+ if cert, err := tls.LoadX509KeyPair(q.SslCert, q.SslKey); err == nil {
+ cfg.Certificates = append(cfg.Certificates, cert)
+ } else {
+ log.Println(err)
+ }
+ connection, err = amqp.DialTLS(q.URL, cfg)
+
+ } else {
+ connection, err = amqp.Dial(q.URL)
+ }
if err != nil {
return err
}
diff --git a/plugins/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go
index 4c6a9a8d3..a65634cab 100644
--- a/plugins/outputs/amqp/amqp_test.go
+++ b/plugins/outputs/amqp/amqp_test.go
@@ -3,7 +3,7 @@ package amqp
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md
new file mode 100644
index 000000000..853d038c3
--- /dev/null
+++ b/plugins/outputs/cloudwatch/README.md
@@ -0,0 +1,33 @@
+## Amazon CloudWatch Output for Telegraf
+
+This plugin will send points to Amazon CloudWatch.
+
+## Amazon Authentication
+
+This plugin uses a credential chain for Authentication with the CloudWatch
+API endpoint. In the following order the plugin will attempt to authenticate.
+1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
+2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk)
+3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk)
+
+## Config
+
+For this output plugin to function correctly the following variables
+must be configured.
+
+* region
+* namespace
+
+### region
+
+The region is the Amazon region that you wish to connect to.
+Examples include but are not limited to:
+* us-west-1
+* us-west-2
+* us-east-1
+* ap-southeast-1
+* ap-southeast-2
+
+### namespace
+
+The namespace used for AWS CloudWatch metrics.
diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go
new file mode 100644
index 000000000..1e20836da
--- /dev/null
+++ b/plugins/outputs/cloudwatch/cloudwatch.go
@@ -0,0 +1,236 @@
+package cloudwatch
+
+import (
+ "log"
+ "math"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/cloudwatch"
+
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
+)
+
+type CloudWatch struct {
+ Region string // AWS Region
+ Namespace string // CloudWatch Metrics Namespace
+ svc *cloudwatch.CloudWatch
+}
+
+var sampleConfig = `
+ # Amazon REGION
+ region = 'us-east-1'
+
+ # Namespace for the CloudWatch MetricDatums
+ namespace = 'InfluxData/Telegraf'
+`
+
+func (c *CloudWatch) SampleConfig() string {
+ return sampleConfig
+}
+
+func (c *CloudWatch) Description() string {
+ return "Configuration for AWS CloudWatch output."
+}
+
+func (c *CloudWatch) Connect() error {
+ Config := &aws.Config{
+ Region: aws.String(c.Region),
+ Credentials: credentials.NewChainCredentials(
+ []credentials.Provider{
+ &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{},
+ }),
+ }
+
+ svc := cloudwatch.New(session.New(Config))
+
+ params := &cloudwatch.ListMetricsInput{
+ Namespace: aws.String(c.Namespace),
+ }
+
+ _, err := svc.ListMetrics(params) // Try a read-only call to test connection.
+
+ if err != nil {
+ log.Printf("cloudwatch: Error in ListMetrics API call : %+v \n", err.Error())
+ }
+
+ c.svc = svc
+
+ return err
+}
+
+func (c *CloudWatch) Close() error {
+ return nil
+}
+
+func (c *CloudWatch) Write(points []*client.Point) error {
+ for _, pt := range points {
+ err := c.WriteSinglePoint(pt)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Write data for a single point. A point can have many fields and one field
+// is equal to one MetricDatum. There is a limit on how many MetricDatums a
+// request can have so we process one Point at a time.
+func (c *CloudWatch) WriteSinglePoint(point *client.Point) error {
+ datums := BuildMetricDatum(point)
+
+ const maxDatumsPerCall = 20 // PutMetricData only supports up to 20 data points per call
+
+ for _, partition := range PartitionDatums(maxDatumsPerCall, datums) {
+ err := c.WriteToCloudWatch(partition)
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error {
+ params := &cloudwatch.PutMetricDataInput{
+ MetricData: datums,
+ Namespace: aws.String(c.Namespace),
+ }
+
+ _, err := c.svc.PutMetricData(params)
+
+ if err != nil {
+ log.Printf("CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error())
+ }
+
+ return err
+}
+
+// Partition the MetricDatums into smaller slices of a max size so that are under the limit
+// for the AWS API calls.
+func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch.MetricDatum {
+
+ numberOfPartitions := len(datums) / size
+ if len(datums)%size != 0 {
+ numberOfPartitions += 1
+ }
+
+ partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions)
+
+ for i := 0; i < numberOfPartitions; i++ {
+ start := size * i
+ end := size * (i + 1)
+ if end > len(datums) {
+ end = len(datums)
+ }
+
+ partitions[i] = datums[start:end]
+ }
+
+ return partitions
+}
+
+// Make a MetricDatum for each field in a Point. Only fields with values that can be
+// converted to float64 are supported. Non-supported fields are skipped.
+func BuildMetricDatum(point *client.Point) []*cloudwatch.MetricDatum {
+ datums := make([]*cloudwatch.MetricDatum, len(point.Fields()))
+ i := 0
+
+ var value float64
+
+ for k, v := range point.Fields() {
+ switch t := v.(type) {
+ case int:
+ value = float64(t)
+ case int32:
+ value = float64(t)
+ case int64:
+ value = float64(t)
+ case float64:
+ value = t
+ case bool:
+ if t {
+ value = 1
+ } else {
+ value = 0
+ }
+ case time.Time:
+ value = float64(t.Unix())
+ default:
+ // Skip unsupported type.
+ datums = datums[:len(datums)-1]
+ continue
+ }
+
+ datums[i] = &cloudwatch.MetricDatum{
+ MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")),
+ Value: aws.Float64(value),
+ Dimensions: BuildDimensions(point.Tags()),
+ Timestamp: aws.Time(point.Time()),
+ }
+
+ i += 1
+ }
+
+ return datums
+}
+
+// Make a list of Dimensions by using a Point's tags. CloudWatch supports up to
+// 10 dimensions per metric so we only keep up to the first 10 alphabetically.
+// This always includes the "host" tag if it exists.
+func BuildDimensions(ptTags map[string]string) []*cloudwatch.Dimension {
+
+ const MaxDimensions = 10
+ dimensions := make([]*cloudwatch.Dimension, int(math.Min(float64(len(ptTags)), MaxDimensions)))
+
+ i := 0
+
+ // This is pretty ugly but we always want to include the "host" tag if it exists.
+ if host, ok := ptTags["host"]; ok {
+ dimensions[i] = &cloudwatch.Dimension{
+ Name: aws.String("host"),
+ Value: aws.String(host),
+ }
+ i += 1
+ }
+
+ var keys []string
+ for k := range ptTags {
+ if k != "host" {
+ keys = append(keys, k)
+ }
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ if i >= MaxDimensions {
+ break
+ }
+
+ dimensions[i] = &cloudwatch.Dimension{
+ Name: aws.String(k),
+ Value: aws.String(ptTags[k]),
+ }
+
+ i += 1
+ }
+
+ return dimensions
+}
+
+func init() {
+ outputs.Add("cloudwatch", func() outputs.Output {
+ return &CloudWatch{}
+ })
+}
diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go
new file mode 100644
index 000000000..2041e14fd
--- /dev/null
+++ b/plugins/outputs/cloudwatch/cloudwatch_test.go
@@ -0,0 +1,88 @@
+package cloudwatch
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/cloudwatch"
+
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/testutil"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Test that each tag becomes one dimension
+func TestBuildDimensions(t *testing.T) {
+ const MaxDimensions = 10
+
+ assert := assert.New(t)
+
+ testPoint := testutil.TestPoint(1)
+ dimensions := BuildDimensions(testPoint.Tags())
+
+ tagKeys := make([]string, len(testPoint.Tags()))
+ i := 0
+ for k, _ := range testPoint.Tags() {
+ tagKeys[i] = k
+ i += 1
+ }
+
+ sort.Strings(tagKeys)
+
+ if len(testPoint.Tags()) >= MaxDimensions {
+ assert.Equal(MaxDimensions, len(dimensions), "Number of dimensions should be less than MaxDimensions")
+ } else {
+ assert.Equal(len(testPoint.Tags()), len(dimensions), "Number of dimensions should be equal to number of tags")
+ }
+
+ for i, key := range tagKeys {
+ if i >= 10 {
+ break
+ }
+ assert.Equal(key, *dimensions[i].Name, "Key should be equal")
+ assert.Equal(testPoint.Tags()[key], *dimensions[i].Value, "Value should be equal")
+ }
+}
+
+// Test that points with valid values have a MetricDatum created where as non valid do not.
+// Skips "time.Time" type as something is converting the value to string.
+func TestBuildMetricDatums(t *testing.T) {
+ assert := assert.New(t)
+
+ validPoints := []*client.Point{
+ testutil.TestPoint(1),
+ testutil.TestPoint(int32(1)),
+ testutil.TestPoint(int64(1)),
+ testutil.TestPoint(float64(1)),
+ testutil.TestPoint(true),
+ }
+
+ for _, point := range validPoints {
+ datums := BuildMetricDatum(point)
+ assert.Equal(1, len(datums), "Valid type should create a Datum")
+ }
+
+ nonValidPoint := testutil.TestPoint("Foo")
+
+ assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum")
+}
+
+func TestPartitionDatums(t *testing.T) {
+
+ assert := assert.New(t)
+
+ testDatum := cloudwatch.MetricDatum{
+ MetricName: aws.String("Foo"),
+ Value: aws.Float64(1),
+ }
+
+ oneDatum := []*cloudwatch.MetricDatum{&testDatum}
+ twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum}
+ threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum}
+
+ assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
+ assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum))
+ assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum))
+}
diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go
index 4231b1f28..7d6539789 100644
--- a/plugins/outputs/datadog/datadog.go
+++ b/plugins/outputs/datadog/datadog.go
@@ -10,9 +10,9 @@ import (
"sort"
"strings"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
type Datadog struct {
diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go
index fe0b7c1fe..968a8e9c8 100644
--- a/plugins/outputs/datadog/datadog_test.go
+++ b/plugins/outputs/datadog/datadog_test.go
@@ -9,9 +9,9 @@ import (
"testing"
"time"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
- "github.com/influxdb/influxdb/client/v2"
+ "github.com/influxdata/influxdb/client/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md
new file mode 100644
index 000000000..48313a886
--- /dev/null
+++ b/plugins/outputs/graphite/README.md
@@ -0,0 +1,13 @@
+# Graphite Output Plugin
+
+This plugin writes to [Graphite](http://graphite.readthedocs.org/en/latest/index.html) via raw TCP.
+
+Parameters:
+
+ Servers []string
+ Prefix string
+ Timeout int
+
+* `servers`: List of strings, ["mygraphiteserver:2003"].
+* `prefix`: String use to prefix all sent metrics.
+* `timeout`: Connection timeout in second.
diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go
new file mode 100644
index 000000000..f9781041f
--- /dev/null
+++ b/plugins/outputs/graphite/graphite.go
@@ -0,0 +1,134 @@
+package graphite
+
+import (
+ "errors"
+ "fmt"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "log"
+ "math/rand"
+ "net"
+ "strings"
+ "time"
+)
+
+type Graphite struct {
+ // URL is only for backwards compatability
+ Servers []string
+ Prefix string
+ Timeout int
+ conns []net.Conn
+}
+
+var sampleConfig = `
+ # TCP endpoint for your graphite instance.
+ servers = ["localhost:2003"]
+ # Prefix metrics name
+ prefix = ""
+ # timeout in seconds for the write connection to graphite
+ timeout = 2
+`
+
+func (g *Graphite) Connect() error {
+ // Set default values
+ if g.Timeout <= 0 {
+ g.Timeout = 2
+ }
+ if len(g.Servers) == 0 {
+ g.Servers = append(g.Servers, "localhost:2003")
+ }
+ // Get Connections
+ var conns []net.Conn
+ for _, server := range g.Servers {
+ conn, err := net.DialTimeout("tcp", server, time.Duration(g.Timeout)*time.Second)
+ if err == nil {
+ conns = append(conns, conn)
+ }
+ }
+ g.conns = conns
+ return nil
+}
+
+func (g *Graphite) Close() error {
+ // Closing all connections
+ for _, conn := range g.conns {
+ conn.Close()
+ }
+ return nil
+}
+
+func (g *Graphite) SampleConfig() string {
+ return sampleConfig
+}
+
+func (g *Graphite) Description() string {
+ return "Configuration for Graphite server to send metrics to"
+}
+
+// Choose a random server in the cluster to write to until a successful write
+// occurs, logging each unsuccessful. If all servers fail, return error.
+func (g *Graphite) Write(points []*client.Point) error {
+ // Prepare data
+ var bp []string
+ for _, point := range points {
+ // Get name
+ name := point.Name()
+ // Convert UnixNano to Unix timestamps
+ timestamp := point.UnixNano() / 1000000000
+
+ for field_name, value := range point.Fields() {
+ // Convert value
+ value_str := fmt.Sprintf("%#v", value)
+ // Write graphite point
+ var graphitePoint string
+ if name == field_name {
+ graphitePoint = fmt.Sprintf("%s.%s %s %d\n",
+ strings.Replace(point.Tags()["host"], ".", "_", -1),
+ strings.Replace(name, ".", "_", -1),
+ value_str,
+ timestamp)
+ } else {
+ graphitePoint = fmt.Sprintf("%s.%s.%s %s %d\n",
+ strings.Replace(point.Tags()["host"], ".", "_", -1),
+ strings.Replace(name, ".", "_", -1),
+ strings.Replace(field_name, ".", "_", -1),
+ value_str,
+ timestamp)
+ }
+ if g.Prefix != "" {
+ graphitePoint = fmt.Sprintf("%s.%s", g.Prefix, graphitePoint)
+ }
+ bp = append(bp, graphitePoint)
+ //fmt.Printf(graphitePoint)
+ }
+ }
+ graphitePoints := strings.Join(bp, "")
+
+ // This will get set to nil if a successful write occurs
+ err := errors.New("Could not write to any Graphite server in cluster\n")
+
+ // Send data to a random server
+ p := rand.Perm(len(g.conns))
+ for _, n := range p {
+ if _, e := fmt.Fprintf(g.conns[n], graphitePoints); e != nil {
+ // Error
+ log.Println("ERROR: " + err.Error())
+ // Let's try the next one
+ } else {
+ // Success
+ err = nil
+ break
+ }
+ }
+ // try to reconnect
+ if err != nil {
+ g.Connect()
+ }
+ return err
+}
+
+func init() {
+ outputs.Add("graphite", func() outputs.Output {
+ return &Graphite{}
+ })
+}
diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go
new file mode 100644
index 000000000..be4cc2472
--- /dev/null
+++ b/plugins/outputs/graphite/graphite_test.go
@@ -0,0 +1,104 @@
+package graphite
+
+import (
+ "bufio"
+ "net"
+ "net/textproto"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/influxdata/influxdb/client/v2"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGraphiteError(t *testing.T) {
+ // Init plugin
+ g := Graphite{
+ Servers: []string{"127.0.0.1:2003", "127.0.0.1:12003"},
+ Prefix: "my.prefix",
+ }
+ // Init points
+ pt1, _ := client.NewPoint(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"mymeasurement": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ // Prepare point list
+ var points []*client.Point
+ points = append(points, pt1)
+ // Error
+ err1 := g.Connect()
+ require.NoError(t, err1)
+ err2 := g.Write(points)
+ require.Error(t, err2)
+ assert.Equal(t, "Could not write to any Graphite server in cluster\n", err2.Error())
+}
+
+func TestGraphiteOK(t *testing.T) {
+ var wg sync.WaitGroup
+ // Init plugin
+ g := Graphite{
+ Prefix: "my.prefix",
+ }
+ // Init points
+ pt1, _ := client.NewPoint(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"mymeasurement": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ pt2, _ := client.NewPoint(
+ "mymeasurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ pt3, _ := client.NewPoint(
+ "my_measurement",
+ map[string]string{"host": "192.168.0.1"},
+ map[string]interface{}{"value": float64(3.14)},
+ time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
+ )
+ // Prepare point list
+ var points []*client.Point
+ points = append(points, pt1)
+ points = append(points, pt2)
+ points = append(points, pt3)
+ // Start TCP server
+ wg.Add(1)
+ go TCPServer(t, &wg)
+ wg.Wait()
+ // Connect
+ wg.Add(1)
+ err1 := g.Connect()
+ wg.Wait()
+ require.NoError(t, err1)
+ // Send Data
+ err2 := g.Write(points)
+ require.NoError(t, err2)
+ wg.Add(1)
+ // Waiting TCPserver
+ wg.Wait()
+ g.Close()
+}
+
+func TCPServer(t *testing.T, wg *sync.WaitGroup) {
+ tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003")
+ wg.Done()
+ conn, _ := tcpServer.Accept()
+ wg.Done()
+ reader := bufio.NewReader(conn)
+ tp := textproto.NewReader(reader)
+ data1, _ := tp.ReadLine()
+ assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data1)
+ data2, _ := tp.ReadLine()
+ assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.value 3.14 1289430000", data2)
+ data3, _ := tp.ReadLine()
+ assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.value 3.14 1289430000", data3)
+ conn.Close()
+ wg.Done()
+}
diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go
index f6b79b009..f45f020b6 100644
--- a/plugins/outputs/influxdb/influxdb.go
+++ b/plugins/outputs/influxdb/influxdb.go
@@ -9,9 +9,9 @@ import (
"strings"
"time"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
type InfluxDB struct {
diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go
index cf1d7d9b3..5da0c056f 100644
--- a/plugins/outputs/influxdb/influxdb_test.go
+++ b/plugins/outputs/influxdb/influxdb_test.go
@@ -6,7 +6,7 @@ import (
"net/http/httptest"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go
index 8e53cc511..b16347c92 100644
--- a/plugins/outputs/kafka/kafka.go
+++ b/plugins/outputs/kafka/kafka.go
@@ -1,12 +1,14 @@
package kafka
import (
+ "crypto/tls"
+ "crypto/x509"
"errors"
"fmt"
-
"github.com/Shopify/sarama"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
+ "io/ioutil"
)
type Kafka struct {
@@ -16,8 +18,17 @@ type Kafka struct {
Topic string
// Routing Key Tag
RoutingTag string `toml:"routing_tag"`
+ // TLS client certificate
+ Certificate string
+ // TLS client key
+ Key string
+ // TLS certificate authority
+ CA string
+ // Verfiy SSL certificate chain
+ VerifySsl bool
- producer sarama.SyncProducer
+ tlsConfig tls.Config
+ producer sarama.SyncProducer
}
var sampleConfig = `
@@ -28,10 +39,60 @@ var sampleConfig = `
# Telegraf tag to use as a routing key
# ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host"
+
+ # Optional TLS configuration:
+ # Client certificate
+ certificate = ""
+ # Client key
+ key = ""
+ # Certificate authority file
+ ca = ""
+ # Verify SSL certificate chain
+ verify_ssl = false
`
+func createTlsConfiguration(k *Kafka) (t *tls.Config, err error) {
+ if k.Certificate != "" && k.Key != "" && k.CA != "" {
+ cert, err := tls.LoadX509KeyPair(k.Certificate, k.Key)
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("Cout not load Kafka TLS client key/certificate: %s",
+ err))
+ }
+
+ caCert, err := ioutil.ReadFile(k.CA)
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("Cout not load Kafka TLS CA: %s",
+ err))
+ }
+
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(caCert)
+
+ t = &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: caCertPool,
+ InsecureSkipVerify: k.VerifySsl,
+ }
+ }
+ // will be nil by default if nothing is provided
+ return t, nil
+}
+
func (k *Kafka) Connect() error {
- producer, err := sarama.NewSyncProducer(k.Brokers, nil)
+ config := sarama.NewConfig()
+ config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
+ config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
+ tlsConfig, err := createTlsConfiguration(k)
+ if err != nil {
+ return err
+ }
+
+ if tlsConfig != nil {
+ config.Net.TLS.Config = tlsConfig
+ config.Net.TLS.Enable = true
+ }
+
+ producer, err := sarama.NewSyncProducer(k.Brokers, config)
if err != nil {
return err
}
diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go
index 2c1734857..2af343778 100644
--- a/plugins/outputs/kafka/kafka_test.go
+++ b/plugins/outputs/kafka/kafka_test.go
@@ -3,7 +3,7 @@ package kafka
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go
index 11e26fdf9..23ca03c5e 100644
--- a/plugins/outputs/kinesis/kinesis.go
+++ b/plugins/outputs/kinesis/kinesis.go
@@ -1,7 +1,6 @@
package kinesis
import (
- "errors"
"fmt"
"log"
"os"
@@ -15,8 +14,8 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kinesis"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
type KinesisOutput struct {
@@ -101,7 +100,7 @@ func (k *KinesisOutput) Connect() error {
}
func (k *KinesisOutput) Close() error {
- return errors.New("Error")
+ return nil
}
func FormatMetric(k *KinesisOutput, point *client.Point) (string, error) {
diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go
index 4c667c860..76eb6ebca 100644
--- a/plugins/outputs/kinesis/kinesis_test.go
+++ b/plugins/outputs/kinesis/kinesis_test.go
@@ -1,7 +1,7 @@
package kinesis
import (
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
"testing"
)
diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go
index 75aecb756..6afcb4542 100644
--- a/plugins/outputs/librato/librato.go
+++ b/plugins/outputs/librato/librato.go
@@ -7,9 +7,9 @@ import (
"log"
"net/http"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
type Librato struct {
diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go
index 129352027..25418baa5 100644
--- a/plugins/outputs/librato/librato_test.go
+++ b/plugins/outputs/librato/librato_test.go
@@ -9,9 +9,9 @@ import (
"testing"
"time"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
- "github.com/influxdb/influxdb/client/v2"
+ "github.com/influxdata/influxdb/client/v2"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go
index a70b2e575..7c47cf741 100644
--- a/plugins/outputs/mqtt/mqtt.go
+++ b/plugins/outputs/mqtt/mqtt.go
@@ -10,9 +10,9 @@ import (
"sync"
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/internal"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/internal"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
const MaxClientIdLen = 8
diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go
index 0922b83ed..f25f4497f 100644
--- a/plugins/outputs/mqtt/mqtt_test.go
+++ b/plugins/outputs/mqtt/mqtt_test.go
@@ -3,7 +3,7 @@ package mqtt
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go
index db58670a2..79818ec5c 100644
--- a/plugins/outputs/nsq/nsq.go
+++ b/plugins/outputs/nsq/nsq.go
@@ -2,8 +2,8 @@ package nsq
import (
"fmt"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
"github.com/nsqio/go-nsq"
)
diff --git a/plugins/outputs/nsq/nsq_test.go b/plugins/outputs/nsq/nsq_test.go
index 4448383f4..b2d703a70 100644
--- a/plugins/outputs/nsq/nsq_test.go
+++ b/plugins/outputs/nsq/nsq_test.go
@@ -3,7 +3,7 @@ package nsq
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go
index 236385d71..6e9f3e26a 100644
--- a/plugins/outputs/opentsdb/opentsdb.go
+++ b/plugins/outputs/opentsdb/opentsdb.go
@@ -8,8 +8,8 @@ import (
"strings"
"time"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
type OpenTSDB struct {
diff --git a/plugins/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go
index f75bd7205..92df3fb52 100644
--- a/plugins/outputs/opentsdb/opentsdb_test.go
+++ b/plugins/outputs/opentsdb/opentsdb_test.go
@@ -4,7 +4,7 @@ import (
"reflect"
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go
index 1fbf9056a..4e429722a 100644
--- a/plugins/outputs/prometheus_client/prometheus_client.go
+++ b/plugins/outputs/prometheus_client/prometheus_client.go
@@ -5,8 +5,8 @@ import (
"log"
"net/http"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
"github.com/prometheus/client_golang/prometheus"
)
diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go
index dc353486c..73163ee1d 100644
--- a/plugins/outputs/prometheus_client/prometheus_client_test.go
+++ b/plugins/outputs/prometheus_client/prometheus_client_test.go
@@ -5,9 +5,9 @@ import (
"github.com/stretchr/testify/require"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/inputs/prometheus"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
+ "github.com/influxdata/telegraf/testutil"
)
var pTesting *PrometheusClient
diff --git a/plugins/outputs/registry.go b/plugins/outputs/registry.go
index d7ea30492..d4c6ba1e5 100644
--- a/plugins/outputs/registry.go
+++ b/plugins/outputs/registry.go
@@ -1,7 +1,7 @@
package outputs
import (
- "github.com/influxdb/influxdb/client/v2"
+ "github.com/influxdata/influxdb/client/v2"
)
type Output interface {
diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go
index afbde0051..c1b22ec46 100644
--- a/plugins/outputs/riemann/riemann.go
+++ b/plugins/outputs/riemann/riemann.go
@@ -6,8 +6,8 @@ import (
"os"
"github.com/amir/raidman"
- "github.com/influxdb/influxdb/client/v2"
- "github.com/influxdb/telegraf/plugins/outputs"
+ "github.com/influxdata/influxdb/client/v2"
+ "github.com/influxdata/telegraf/plugins/outputs"
)
type Riemann struct {
diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go
index 31e9478b1..8b3f27ac0 100644
--- a/plugins/outputs/riemann/riemann_test.go
+++ b/plugins/outputs/riemann/riemann_test.go
@@ -3,7 +3,7 @@ package riemann
import (
"testing"
- "github.com/influxdb/telegraf/testutil"
+ "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
diff --git a/scripts/Vagrantfile b/scripts/Vagrantfile
index 3c0199bdb..a04450d6d 100644
--- a/scripts/Vagrantfile
+++ b/scripts/Vagrantfile
@@ -7,7 +7,7 @@ VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "ubuntu/trusty64"
- config.vm.synced_folder "..", "/home/vagrant/go/src/github.com/influxdb/telegraf",
+ config.vm.synced_folder "..", "/home/vagrant/go/src/github.com/influxdata/telegraf",
type: "rsync",
rsync__args: ["--verbose", "--archive", "--delete", "-z", "--safe-links"],
rsync__exclude: ["./telegraf", ".vagrant/"]
@@ -26,7 +26,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
gvm use go1.4.2 --default
echo "export PATH=$PATH:$GOPATH/bin" >> "$HOME/.bashrc"
echo 'export GOPATH=/home/vagrant/go' >> "$HOME/.bashrc"
- cd "$HOME/go/src/github.com/influxdb/telegraf" &&\
+ cd "$HOME/go/src/github.com/influxdata/telegraf" &&\
rm -rf Godeps/_workspace/pkg &&\
GOPATH="$HOME/go" make
SHELL
diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh
index 96319bf72..bbad51506 100755
--- a/scripts/circle-test.sh
+++ b/scripts/circle-test.sh
@@ -34,7 +34,7 @@ export GOPATH=$BUILD_DIR
# Turning off GOGC speeds up build times
export GOGC=off
export PATH=$GOPATH/bin:$PATH
-exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb
+exit_if_fail mkdir -p $GOPATH/src/github.com/influxdata
# Dump some test config to the log.
echo "Test configuration"
@@ -44,8 +44,8 @@ echo "\$GOPATH: $GOPATH"
echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH"
# Move the checked-out source to a better location
-exit_if_fail mv $HOME/telegraf $GOPATH/src/github.com/influxdb
-exit_if_fail cd $GOPATH/src/github.com/influxdb/telegraf
+exit_if_fail mv $HOME/telegraf $GOPATH/src/github.com/influxdata
+exit_if_fail cd $GOPATH/src/github.com/influxdata/telegraf
# Verify that go fmt has been run
check_go_fmt
diff --git a/scripts/package.sh b/scripts/package.sh
deleted file mode 100755
index fbbf39eb8..000000000
--- a/scripts/package.sh
+++ /dev/null
@@ -1,365 +0,0 @@
-#!/usr/bin/env bash
-
-###########################################################################
-# Packaging script which creates debian and RPM packages. It optionally
-# tags the repo with the given version.
-#
-# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS
-# CLI tools must also be installed.
-#
-# https://github.com/jordansissel/fpm
-# http://aws.amazon.com/cli/
-#
-# Packaging process: to package a build, simply execute:
-#
-# package.sh
-#
-# The script will automatically determined the version number from git using
-# `git describe --always --tags`
-#
-# AWS upload: the script will also offer to upload the packages to S3. If
-# this option is selected, the credentials should be present in the file
-# ~/aws.conf. The contents should be of the form:
-#
-# [default]
-# aws_access_key_id=
-# aws_secret_access_key=
-# region = us-east-1
-#
-# Trim the leading spaces when creating the file. The script will exit if
-# S3 upload is requested, but this file does not exist.
-
-AWS_FILE=~/aws.conf
-
-INSTALL_ROOT_DIR=/opt/telegraf
-TELEGRAF_LOG_DIR=/var/log/telegraf
-CONFIG_ROOT_DIR=/etc/opt/telegraf
-CONFIG_D_DIR=/etc/opt/telegraf/telegraf.d
-LOGROTATE_DIR=/etc/logrotate.d
-
-SAMPLE_CONFIGURATION=etc/telegraf.conf
-LOGROTATE_CONFIGURATION=etc/logrotate.d/telegraf
-INITD_SCRIPT=scripts/init.sh
-SYSTEMD_SCRIPT=scripts/telegraf.service
-
-TMP_WORK_DIR=`mktemp -d`
-POST_INSTALL_PATH=`mktemp`
-ARCH=`uname -i`
-LICENSE=MIT
-URL=influxdb.com
-MAINTAINER=support@influxdb.com
-VENDOR=InfluxDB
-DESCRIPTION="InfluxDB Telegraf agent"
-PKG_DEPS=(coreutils)
-GO_VERSION="go1.5"
-GOPATH_INSTALL=
-BINS=(
- telegraf
- )
-
-###########################################################################
-# Helper functions.
-
-# usage prints simple usage information.
-usage() {
- echo -e "$0\n"
- cleanup_exit $1
-}
-
-# make_dir_tree creates the directory structure within the packages.
-make_dir_tree() {
- work_dir=$1
- version=$2
- mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts
- if [ $? -ne 0 ]; then
- echo "Failed to create installation directory -- aborting."
- cleanup_exit 1
- fi
- mkdir -p $work_dir/$CONFIG_ROOT_DIR
- if [ $? -ne 0 ]; then
- echo "Failed to create configuration directory -- aborting."
- cleanup_exit 1
- fi
- mkdir -p $work_dir/$CONFIG_D_DIR
- if [ $? -ne 0 ]; then
- echo "Failed to create configuration subdirectory -- aborting."
- cleanup_exit 1
- fi
- mkdir -p $work_dir/$LOGROTATE_DIR
- if [ $? -ne 0 ]; then
- echo "Failed to create logrotate directory -- aborting."
- cleanup_exit 1
- fi
-
-}
-
-# cleanup_exit removes all resources created during the process and exits with
-# the supplied returned code.
-cleanup_exit() {
- rm -r $TMP_WORK_DIR
- rm $POST_INSTALL_PATH
- exit $1
-}
-
-# check_gopath sanity checks the value of the GOPATH env variable, and determines
-# the path where build artifacts are installed. GOPATH may be a colon-delimited
-# list of directories.
-check_gopath() {
- [ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1
- GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1`
- [ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1
- echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation."
-}
-
-# check_clean_tree ensures that no source file is locally modified.
-check_clean_tree() {
- modified=$(git ls-files --modified | wc -l)
- if [ $modified -ne 0 ]; then
- echo "The source tree is not clean -- aborting."
- cleanup_exit 1
- fi
- echo "Git tree is clean."
-}
-
-# do_build builds the code. The version and commit must be passed in.
-do_build() {
- version=$1
- commit=`git rev-parse HEAD`
- if [ $? -ne 0 ]; then
- echo "Unable to retrieve current commit -- aborting"
- cleanup_exit 1
- fi
-
- for b in ${BINS[*]}; do
- rm -f $GOPATH_INSTALL/bin/$b
- done
-
- gdm restore
- go install -ldflags="-X main.Version $version" ./...
- if [ $? -ne 0 ]; then
- echo "Build failed, unable to create package -- aborting"
- cleanup_exit 1
- fi
- echo "Build completed successfully."
-}
-
-# generate_postinstall_script creates the post-install script for the
-# package. It must be passed the version.
-generate_postinstall_script() {
- version=$1
- cat <$POST_INSTALL_PATH
-#!/bin/sh
-rm -f $INSTALL_ROOT_DIR/telegraf
-rm -f $INSTALL_ROOT_DIR/init.sh
-ln -sfn $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf
-
-if ! id telegraf >/dev/null 2>&1; then
- useradd --help 2>&1| grep -- --system > /dev/null 2>&1
- old_useradd=\$?
- if [ \$old_useradd -eq 0 ]
- then
- useradd --system -U -M telegraf
- else
- groupadd telegraf && useradd -M -g telegraf telegraf
- fi
-fi
-
-# Systemd
-if which systemctl > /dev/null 2>&1 ; then
- cp $INSTALL_ROOT_DIR/versions/$version/scripts/telegraf.service \
- /lib/systemd/system/telegraf.service
- systemctl enable telegraf
-
- # restart on upgrade of package
- if [ "$#" -eq 2 ]; then
- systemctl restart telegraf
- fi
-
-# Sysv
-else
- ln -sfn $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh \
- $INSTALL_ROOT_DIR/init.sh
- rm -f /etc/init.d/telegraf
- ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf
- chmod +x /etc/init.d/telegraf
- # update-rc.d sysv service:
- if which update-rc.d > /dev/null 2>&1 ; then
- update-rc.d -f telegraf remove
- update-rc.d telegraf defaults
- # CentOS-style sysv:
- else
- chkconfig --add telegraf
- fi
-
- # restart on upgrade of package
- if [ "$#" -eq 2 ]; then
- /etc/init.d/telegraf restart
- fi
-
- mkdir -p $TELEGRAF_LOG_DIR
- chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR
-fi
-
-chown -R -L telegraf:telegraf $INSTALL_ROOT_DIR
-chmod -R a+rX $INSTALL_ROOT_DIR
-
-EOF
- echo "Post-install script created successfully at $POST_INSTALL_PATH"
-}
-
-###########################################################################
-# Start the packaging process.
-
-if [ "$1" == "-h" ]; then
- usage 0
-elif [ "$1" == "" ]; then
- VERSION=`git describe --always --tags | tr -d v`
-else
- VERSION="$1"
-fi
-
-cd `git rev-parse --show-toplevel`
-echo -e "\nStarting package process, version: $VERSION\n"
-
-check_gopath
-do_build $VERSION
-make_dir_tree $TMP_WORK_DIR $VERSION
-
-###########################################################################
-# Copy the assets to the installation directories.
-
-for b in ${BINS[*]}; do
- cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION
- if [ $? -ne 0 ]; then
- echo "Failed to copy binaries to packaging directory -- aborting."
- cleanup_exit 1
- fi
-done
-
-echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION"
-
-cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts
-if [ $? -ne 0 ]; then
- echo "Failed to copy init.d script to packaging directory -- aborting."
- cleanup_exit 1
-fi
-echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
-
-cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts
-if [ $? -ne 0 ]; then
- echo "Failed to copy systemd file to packaging directory -- aborting."
- cleanup_exit 1
-fi
-echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
-
-cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/telegraf.conf
-if [ $? -ne 0 ]; then
- echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting."
- cleanup_exit 1
-fi
-
-cp $LOGROTATE_CONFIGURATION $TMP_WORK_DIR/$LOGROTATE_DIR/telegraf
-if [ $? -ne 0 ]; then
- echo "Failed to copy $LOGROTATE_CONFIGURATION to packaging directory -- aborting."
- cleanup_exit 1
-fi
-
-generate_postinstall_script $VERSION
-
-###########################################################################
-# Create the actual packages.
-
-if [ "$CIRCLE_BRANCH" == "" ]; then
- echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] "
- read response
- response=`echo $response | tr 'A-Z' 'a-z'`
- if [ "x$response" == "xn" ]; then
- echo "Packaging aborted."
- cleanup_exit 1
- fi
-fi
-
-if [ $ARCH == "i386" ]; then
- rpm_package=telegraf-$VERSION-1.i686.rpm
- debian_package=telegraf_${VERSION}_i686.deb
- deb_args="-a i686"
- rpm_args="setarch i686"
-elif [ $ARCH == "arm" ]; then
- rpm_package=telegraf-$VERSION-1.armel.rpm
- debian_package=telegraf_${VERSION}_armel.deb
-else
- rpm_package=telegraf-$VERSION-1.x86_64.rpm
- debian_package=telegraf_${VERSION}_amd64.deb
-fi
-
-COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE \
- --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH \
- --name telegraf --provides telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ."
-$rpm_args fpm -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS
-if [ $? -ne 0 ]; then
- echo "Failed to create RPM package -- aborting."
- cleanup_exit 1
-fi
-echo "RPM package created successfully."
-
-fpm -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS
-if [ $? -ne 0 ]; then
- echo "Failed to create Debian package -- aborting."
- cleanup_exit 1
-fi
-echo "Debian package created successfully."
-
-###########################################################################
-# Offer to publish the packages.
-
-if [ "$CIRCLE_BRANCH" == "" ]; then
- echo -n "Publish packages to S3? [y/N] "
- read response
- response=`echo $response | tr 'A-Z' 'a-z'`
- if [ "x$response" == "xy" ]; then
- echo "Publishing packages to S3."
- if [ ! -e "$AWS_FILE" ]; then
- echo "$AWS_FILE does not exist -- aborting."
- cleanup_exit 1
- fi
-
- # Upload .deb and .rpm packages
- for filepath in `ls *.{deb,rpm}`; do
- echo "Uploading $filepath to S3"
- filename=`basename $filepath`
- echo "Uploading $filename to s3://get.influxdb.org/telegraf/$filename"
- AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath \
- s3://get.influxdb.org/telegraf/$filename \
- --acl public-read --region us-east-1
- if [ $? -ne 0 ]; then
- echo "Upload failed -- aborting".
- cleanup_exit 1
- fi
- rm $filepath
- done
-
- # Make and upload linux amd64, 386, and arm
- make build-linux-bins
- for b in `ls telegraf_*`; do
- zippedbin=${b}_${VERSION}.tar.gz
- # Zip the binary
- tar -zcf $TMP_WORK_DIR/$zippedbin ./$b
- echo "Uploading binary: $zippedbin to S3"
- AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $TMP_WORK_DIR/$zippedbin \
- s3://get.influxdb.org/telegraf/$zippedbin \
- --acl public-read --region us-east-1
- if [ $? -ne 0 ]; then
- echo "Binary upload failed -- aborting".
- cleanup_exit 1
- fi
- done
- else
- echo "Not publishing packages to S3."
- fi
-fi
-
-###########################################################################
-# All done.
-
-echo -e "\nPackaging process complete."
-cleanup_exit 0
diff --git a/scripts/telegraf.service b/scripts/telegraf.service
index d92f3072c..6f4450402 100644
--- a/scripts/telegraf.service
+++ b/scripts/telegraf.service
@@ -1,6 +1,6 @@
[Unit]
Description=The plugin-driven server agent for reporting metrics into InfluxDB
-Documentation=https://github.com/influxdb/telegraf
+Documentation=https://github.com/influxdata/telegraf
After=network.target
[Service]
diff --git a/testutil/testutil.go b/testutil/testutil.go
index 581220299..436b57361 100644
--- a/testutil/testutil.go
+++ b/testutil/testutil.go
@@ -6,7 +6,7 @@ import (
"os"
"time"
- "github.com/influxdb/influxdb/client/v2"
+ "github.com/influxdata/influxdb/client/v2"
)
var localhost = "localhost"