diff --git a/.gitignore b/.gitignore index d432f6865..7d27d694e 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ tivan .vagrant /telegraf .idea +*~ +*# diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f562c3ea..28b47fe20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,29 +1,104 @@ +## v0.10.1 [unreleased] + +### Release Notes: + +- The docker plugin has been significantly overhauled to add more metrics +and allow for docker-machine (incl OSX) support. +[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md) +for the latest measurements, fields, and tags. There is also now support for +specifying a docker endpoint to get metrics from. + +### Features +- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! +- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! +- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! +- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454! +- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion. +- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek! +- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert! +- AMQP SSL support. Thanks @ekini! +- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert! +- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain! +- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod! +- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable. +- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering. +- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics. +- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2! +- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration. +- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul. + +### Bugfixes +- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! +- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin +- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! +- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated. +- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats. +- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux + +## v0.10.0 [2016-01-12] + +### Release Notes +- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` +and configuration files are in `/etc/telegraf` +- **breaking change** `plugins` have been renamed to `inputs`. This was done because +`plugins` is too generic, as there are now also "output plugins", and will likely +be "aggregator plugins" and "filter plugins" in the future. Additionally, +`inputs/` and `outputs/` directories have been placed in the root-level `plugins/` +directory. +- **breaking change** the `io` plugin has been renamed `diskio` +- **breaking change** plugin measurements aggregated into a single measurement. +- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters +for configuration. +- **breaking change** `twemproxy` plugin: `prefix` option removed. +- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_` +instead of only `cpu_` +- **breaking change** some command-line flags have been renamed to separate words. +`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`, +`-outputfilter` -> `-output-filter` +- The prometheus plugin schema has not been changed (measurements have not been +aggregated). + +### Packaging change note: + +RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their +configurations overwritten by the upgrade. There is a backup stored at +/etc/telegraf/telegraf.conf.$(date +%s).backup. + +### Features +- Plugin measurements aggregated into a single measurement. +- Added ability to specify per-plugin tags +- Added ability to specify per-plugin measurement suffix and prefix. +(`name_prefix` and `name_suffix`) +- Added ability to override base plugin measurement name. (`name_override`) + +### Bugfixes + ## v0.2.5 [unreleased] ### Features -- [#427](https://github.com/influxdb/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! -- [#428](https://github.com/influxdb/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! -- [#449](https://github.com/influxdb/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff +- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! +- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! +- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff ### Bugfixes -- [#430](https://github.com/influxdb/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! -- [#452](https://github.com/influxdb/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! +- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! +- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! ## v0.2.4 [2015-12-08] ### Features -- [#412](https://github.com/influxdb/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! -- [#410](https://github.com/influxdb/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! -- [#414](https://github.com/influxdb/telegraf/issues/414): Jolokia plugin auth parameters -- [#415](https://github.com/influxdb/telegraf/issues/415): memcached plugin: support unix sockets -- [#418](https://github.com/influxdb/telegraf/pull/418): memcached plugin additional unit tests. -- [#408](https://github.com/influxdb/telegraf/pull/408): MailChimp plugin. -- [#382](https://github.com/influxdb/telegraf/pull/382): Add system wide network protocol stats to `net` plugin. -- [#401](https://github.com/influxdb/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! +- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! +- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! +- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters +- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets +- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests. +- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin. +- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin. +- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! ### Bugfixes -- [#405](https://github.com/influxdb/telegraf/issues/405): Prometheus output cardinality issue -- [#388](https://github.com/influxdb/telegraf/issues/388): Fix collection hangup when cpu times decrement. +- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue +- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement. ## v0.2.3 [2015-11-30] @@ -38,11 +113,11 @@ functional. same type can be specified, like this: ``` -[[plugins.cpu]] +[[inputs.cpu]] percpu = false totalcpu = true -[[plugins.cpu]] +[[inputs.cpu]] percpu = true totalcpu = false drop = ["cpu_time"] @@ -52,15 +127,15 @@ same type can be specified, like this: - Aerospike plugin: tag changed from `host` -> `aerospike_host` ### Features -- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj! -- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin. -- [#392](https://github.com/influxdb/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! -- [#383](https://github.com/influxdb/telegraf/pull/383): Specify plugins as a list. -- [#354](https://github.com/influxdb/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! +- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj! +- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin. +- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! +- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list. +- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! ### Bugfixes -- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning. -- [#389](https://github.com/influxdb/telegraf/issues/389): NaN value panic +- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning. +- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic ## v0.2.2 [2015-11-18] @@ -69,7 +144,7 @@ same type can be specified, like this: lists of servers/URLs. 0.2.2 is being released solely to fix that bug ### Bugfixes -- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in plugins. +- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs. ## v0.2.1 [2015-11-16] @@ -86,22 +161,22 @@ changed to just run docker commands in the Makefile. See `make docker-run` and same type. ### Features -- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive! -- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! -- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! -- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello! -- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output. -- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc -- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot! -- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output. -- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists. -- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! +- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive! +- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! +- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! +- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello! +- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output. +- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc +- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot! +- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output. +- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists. +- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! ### Bugfixes -- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin. -- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements. -- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes -- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo! +- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin. +- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements. +- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes +- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo! ## v0.2.0 [2015-10-27] @@ -122,38 +197,38 @@ be controlled via the `round_interval` and `flush_jitter` config options. - Telegraf will now retry metric flushes twice ### Features -- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info -- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini -- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin -- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee! -- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay! -- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou! +- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info +- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini +- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin +- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee! +- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay! +- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou! - Memory plugin: cached and buffered measurements re-added - Logging: additional logging for each collection interval, track the number -of metrics collected and from how many plugins. -- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib! -- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou! -- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive! -- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc -- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive! -- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2. -- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points. -- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot! -- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini! -- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals -- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes -- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter! -- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham! +of metrics collected and from how many inputs. +- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib! +- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou! +- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive! +- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc +- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive! +- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2. +- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points. +- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot! +- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini! +- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals +- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes +- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter! +- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham! ### Bugfixes -- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! -- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! -- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! -- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini! -- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac! -- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings. -- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags. -- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi! +- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! +- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! +- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! +- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini! +- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac! +- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings. +- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags. +- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi! ## v0.1.9 [2015-09-22] @@ -163,7 +238,7 @@ will still be backwards compatible if only `url` is specified. - The -test flag will now output two metric collections - Support for filtering telegraf outputs on the CLI -- Telegraf will now allow filtering of output sinks on the command-line using the `-outputfilter` -flag, much like how the `-filter` flag works for plugins. +flag, much like how the `-filter` flag works for inputs. - Support for filtering on config-file creation -- Telegraf now supports filtering to -sample-config command. You can now run `telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config @@ -179,27 +254,27 @@ have been renamed for consistency. Some measurements have also been removed from re-added in a "verbose" mode if there is demand for it. ### Features -- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support -- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! -- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini! -- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl! -- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup. -- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee! -- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks +- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support +- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! +- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini! +- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl! +- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup. +- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee! +- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks and filtering when specifying a config file. ### Bugfixes -- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support -- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics -- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug +- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support +- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics +- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug - Fix net plugin on darwin -- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! -- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced! -- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+ -- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini! -- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux. -- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini! -- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini! +- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! +- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced! +- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+ +- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini! +- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux. +- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini! +- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini! ## v0.1.8 [2015-09-04] @@ -208,106 +283,106 @@ and filtering when specifying a config file. - Now using Go 1.5 to build telegraf ### Features -- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin -- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 -- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes -- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 -- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option -- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3 -- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin +- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin +- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 +- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes +- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 +- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option +- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3 +- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin ### Bugfixes ## v0.1.7 [2015-08-28] ### Features -- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer. -- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! -- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. -- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space -- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag. -- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface +- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer. +- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! +- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. +- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space +- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag. +- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface - Indent the toml config file for readability ### Bugfixes -- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. -- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix. -- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! -- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc! +- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing. +- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix. +- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! +- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc! ## v0.1.6 [2015-08-20] ### Features -- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham! -- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies -- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! +- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham! +- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies +- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! ### Bugfixes -- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility -- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! -- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! -- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error +- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility +- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! +- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! +- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error ## v0.1.5 [2015-08-13] ### Features -- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! -- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! -- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! -- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk! -- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh! -- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database. -- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc! -- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser! -- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing -- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales! -- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira! -- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser! -- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet! -- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham! -- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering. -- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! +- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! +- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! +- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! +- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk! +- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh! +- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database. +- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc! +- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser! +- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing +- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales! +- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira! +- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser! +- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet! +- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham! +- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering. +- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! ### Bugfixes -- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users -- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes -- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama -- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally -- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format +- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users +- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes +- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama +- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally +- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format ## v0.1.4 [2015-07-09] ### Features -- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! +- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! ### Bugfixes -- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! -- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! +- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! +- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! ## v0.1.3 [2015-07-05] ### Features -- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! -- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! +- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! +- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! ### Bugfixes -- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! -- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! +- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! +- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! ## v0.1.2 [2015-07-01] ### Features -- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! -- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. -- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! -- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! +- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! +- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. +- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! +- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! ### Bugfixes -- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script. -- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! -- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! -- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer! -- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff! +- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script. +- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! +- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! +- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer! +- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff! ## v0.1.1 [2015-06-19] diff --git a/CONFIGURATION.md b/CONFIGURATION.md new file mode 100644 index 000000000..ee79da98a --- /dev/null +++ b/CONFIGURATION.md @@ -0,0 +1,199 @@ +# Telegraf Configuration + +## Generating a Configuration File + +A default Telegraf config file can be generated using the `-sample-config` flag, +like this: `telegraf -sample-config` + +To generate a file with specific inputs and outputs, you can use the +`-input-filter` and `-output-filter` flags, like this: +`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka` + +## Telegraf Agent Configuration + +Telegraf has a few options you can configure under the `agent` section of the +config. + +* **hostname**: The hostname is passed as a tag. By default this will be +the value returned by `hostname` on the machine running Telegraf. +You can override that value here. +* **interval**: How often to gather metrics. Uses a simple number + +unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes. +* **debug**: Set to true to gather and send metrics to STDOUT as well as +InfluxDB. + +## Input Configuration + +There are some configuration options that are configurable per input: + +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. +* **interval**: How often to gather this metric. Normal plugins use a single +global interval, but if one particular input should be run less or more often, +you can configure that here. + +### Input Filters + +There are also filters that can be configured per input: + +* **pass**: An array of strings that is used to filter metrics generated by the +current input. Each string in the array is tested as a glob match against field names +and if it matches, the field is emitted. +* **drop**: The inverse of pass, if a field name matches, it is not emitted. +* **tagpass**: tag names and arrays of strings that are used to filter +measurements by the current input. Each string in the array is tested as a glob +match against the tag name, and if it matches the measurement is emitted. +* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not +emitted. This is tested on measurements that have passed the tagpass test. + +### Input Configuration Examples + +This is a full working config that will output CPU data to an InfluxDB instance +at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output +measurements at a 10s interval and will collect per-cpu data, dropping any +fields which begin with `time_`. + +```toml +[tags] + dc = "denver-1" + +[agent] + interval = "10s" + +# OUTPUTS +[[outputs.influxdb]] + url = "http://192.168.59.103:8086" # required. + database = "telegraf" # required. + precision = "s" + +# INPUTS +[[inputs.cpu]] + percpu = true + totalcpu = false + # filter all fields beginning with 'time_' + drop = ["time_*"] +``` + +### Input Config: tagpass and tagdrop + +```toml +[[inputs.cpu]] + percpu = true + totalcpu = false + drop = ["cpu_time"] + # Don't collect CPU data for cpu6 & cpu7 + [inputs.cpu.tagdrop] + cpu = [ "cpu6", "cpu7" ] + +[[inputs.disk]] + [inputs.disk.tagpass] + # tagpass conditions are OR, not AND. + # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) + # then the metric passes + fstype = [ "ext4", "xfs" ] + # Globs can also be used on the tag values + path = [ "/opt", "/home*" ] +``` + +### Input Config: pass and drop + +```toml +# Drop all metrics for guest & steal CPU usage +[[inputs.cpu]] + percpu = false + totalcpu = true + drop = ["usage_guest", "usage_steal"] + +# Only store inode related metrics for disks +[[inputs.disk]] + pass = ["inodes*"] +``` + +### Input config: prefix, suffix, and override + +This plugin will emit measurements with the name `cpu_total` + +```toml +[[inputs.cpu]] + name_suffix = "_total" + percpu = false + totalcpu = true +``` + +This will emit measurements with the name `foobar` + +```toml +[[inputs.cpu]] + name_override = "foobar" + percpu = false + totalcpu = true +``` + +### Input config: tags + +This plugin will emit measurements with two additional tags: `tag1=foo` and +`tag2=bar` + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + tag1 = "foo" + tag2 = "bar" +``` + +### Multiple inputs of the same type + +Additional inputs (or outputs) of the same type can be specified, +just define more instances in the config file. It is highly recommended that +you utilize `name_override`, `name_prefix`, or `name_suffix` config options +to avoid measurement collisions: + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + +[[inputs.cpu]] + percpu = true + totalcpu = false + name_override = "percpu_usage" + drop = ["cpu_time*"] +``` + +## Output Configuration + +Telegraf also supports specifying multiple output sinks to send data to, +configuring each output sink is different, but examples can be +found by running `telegraf -sample-config`. + +Outputs also support the same configurable options as inputs +(pass, drop, tagpass, tagdrop) + +```toml +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf" + precision = "s" + # Drop all measurements that start with "aerospike" + drop = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-aerospike-data" + precision = "s" + # Only accept aerospike data: + pass = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-cpu0-data" + precision = "s" + # Only store measurements where the tag "cpu" matches the value "cpu0" + [outputs.influxdb.tagpass] + cpu = ["cpu0"] +``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7723f0605..f7e2ec86f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,35 +1,48 @@ +## Steps for Contributing: + +1. [Sign the CLA](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#sign-the-cla) +1. Write your input or output plugin (see below for details) +1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go` +1. If your plugin requires a new Go package, +[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency) + ## Sign the CLA Before we can merge a pull request, you will need to sign the CLA, which can be found [on our website](http://influxdb.com/community/cla.html) -## Plugins +## Adding a dependency -This section is for developers who want to create new collection plugins. +Assuming you can already build the project: + +1. `go get github.com/sparrc/gdm` +1. `gdm save` + +## Input Plugins + +This section is for developers who want to create new collection inputs. Telegraf is entirely plugin driven. This interface allows for operators to pick and chose what is gathered as well as makes it easy for developers to create new ways of generating metrics. Plugin authorship is kept as simple as possible to promote people to develop -and submit new plugins. +and submit new inputs. -### Plugin Guidelines +### Input Plugin Guidelines -* A plugin must conform to the `plugins.Plugin` interface. -* Each generated metric automatically has the name of the plugin that generated -it prepended. This is to keep plugins honest. -* Plugins should call `plugins.Add` in their `init` function to register themselves. +* A plugin must conform to the `inputs.Input` interface. +* Input Plugins should call `inputs.Add` in their `init` function to register themselves. See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdb/telegraf/plugins/all/all.go` file. +* Input Plugins must be added to the +`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this plugin does. -### Plugin interface +### Input interface ```go -type Plugin interface { +type Input interface { SampleConfig() string Description() string Gather(Accumulator) error @@ -52,52 +65,32 @@ type Accumulator interface { The way that a plugin emits metrics is by interacting with the Accumulator. The `Add` function takes 3 arguments: -* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`. +* **measurement**: A string description of the metric. For instance `bytes_read` or ` +faults`. * **value**: A value for the metric. This accepts 5 different types of value: * **int**: The most common type. All int types are accepted but favor using `int64` Useful for counters, etc. * **float**: Favor `float64`, useful for gauges, percentages, etc. - * **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc. - * **string**: Typically used to indicate a message, or some kind of freeform information. - * **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`. + * **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, + etc. + * **string**: Typically used to indicate a message, or some kind of freeform + information. + * **time.Time**: Useful for indicating when a state last occurred, for instance ` + light_on_since`. * **tags**: This is a map of strings to strings to describe the where or who about the metric. For instance, the `net` plugin adds a tag named `"interface"` set to the name of the network interface, like `"eth0"`. -The `AddFieldsWithTime` allows multiple values for a point to be passed. The values -used are the same type profile as **value** above. The **timestamp** argument -allows a point to be registered as having occurred at an arbitrary time. - Let's say you've written a plugin that emits metrics about processes on the current host. -```go - -type Process struct { - CPUTime float64 - MemoryBytes int64 - PID int -} - -func Gather(acc plugins.Accumulator) error { - for _, process := range system.Processes() { - tags := map[string]string { - "pid": fmt.Sprintf("%d", process.Pid), - } - - acc.Add("cpu", process.CPUTime, tags, time.Now()) - acc.Add("memory", process.MemoryBytes, tags, time.Now()) - } -} -``` - -### Plugin Example +### Input Plugin Example ```go package simple // simple.go -import "github.com/influxdb/telegraf/plugins" +import "github.com/influxdata/telegraf/plugins/inputs" type Simple struct { Ok bool @@ -111,7 +104,7 @@ func (s *Simple) SampleConfig() string { return "ok = true # indicate if everything is fine" } -func (s *Simple) Gather(acc plugins.Accumulator) error { +func (s *Simple) Gather(acc inputs.Accumulator) error { if s.Ok { acc.Add("state", "pretty good", nil) } else { @@ -122,19 +115,19 @@ func (s *Simple) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("simple", func() plugins.Plugin { return &Simple{} }) + inputs.Add("simple", func() inputs.Input { return &Simple{} }) } ``` -## Service Plugins +## Service Input Plugins This section is for developers who want to create new "service" collection -plugins. A service plugin differs from a regular plugin in that it operates +inputs. A service plugin differs from a regular plugin in that it operates a background service while Telegraf is running. One example would be the `statsd` plugin, which operates a statsd server. -Service Plugins are substantially more complicated than a regular plugin, as they -will require threads and locks to verify data integrity. Service Plugins should +Service Input Plugins are substantially more complicated than a regular plugin, as they +will require threads and locks to verify data integrity. Service Input Plugins should be avoided unless there is no way to create their behavior with a regular plugin. Their interface is quite similar to a regular plugin, with the addition of `Start()` @@ -143,7 +136,7 @@ and `Stop()` methods. ### Service Plugin Guidelines * Same as the `Plugin` guidelines, except that they must conform to the -`plugins.ServicePlugin` interface. +`inputs.ServiceInput` interface. ### Service Plugin interface @@ -157,19 +150,19 @@ type ServicePlugin interface { } ``` -## Outputs +## Output Plugins This section is for developers who want to create a new output sink. Outputs are created in a similar manner as collection plugins, and their interface has similar constructs. -### Output Guidelines +### Output Plugin Guidelines * An output must conform to the `outputs.Output` interface. * Outputs should call `outputs.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdb/telegraf/outputs/all/all.go` file. +`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the output can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this output does. @@ -193,7 +186,7 @@ package simpleoutput // simpleoutput.go -import "github.com/influxdb/telegraf/outputs" +import "github.com/influxdata/telegraf/plugins/outputs" type Simple struct { Ok bool @@ -230,7 +223,7 @@ func init() { ``` -## Service Outputs +## Service Output Plugins This section is for developers who want to create new "service" output. A service output differs from a regular output in that it operates a background service @@ -243,7 +236,7 @@ and `Stop()` methods. ### Service Output Guidelines * Same as the `Output` guidelines, except that they must conform to the -`plugins.ServiceOutput` interface. +`output.ServiceOutput` interface. ### Service Output interface @@ -274,7 +267,7 @@ which would take some time to replicate. To overcome this situation we've decided to use docker containers to provide a fast and reproducible environment to test those services which require it. For other situations -(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go ) +(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/redis/redis_test.go) a simple mock will suffice. To execute Telegraf tests follow these simple steps: diff --git a/Godeps b/Godeps index d17d8dd25..9f46fd79b 100644 --- a/Godeps +++ b/Godeps @@ -1,52 +1,54 @@ git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034 -github.com/Shopify/sarama 159e9990b0796511607dd0d7aaa3eb37d1829d16 -github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81 +github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef +github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252 github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339 -github.com/armon/go-metrics 06b60999766278efd6d2b5d8418a58c3d5b99e87 -github.com/aws/aws-sdk-go 999b1591218c36d5050d1ba7266eba956e65965f +github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757 +github.com/aws/aws-sdk-go 3ad0b07b44c22c21c734d1094981540b7a11e942 github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d -github.com/boltdb/bolt b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0 +github.com/boltdb/bolt 6465994716bf6400605746e79224cf1e7ed68725 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 -github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e +github.com/dancannon/gorethink ff457cac6a529d9749d841a733d76e8305cba3c8 github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d -github.com/eapache/go-resiliency f341fb4dca45128e4aa86389fa6a675d55fe25e1 +github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 -github.com/fsouza/go-dockerclient 7177a9e3543b0891a5d91dbf7051e0f71455c8ef -github.com/go-ini/ini 9314fb0ef64171d6a3d0a4fa570dfa33441cba05 -github.com/go-sql-driver/mysql d512f204a577a4ab037a1816604c48c9c13210be -github.com/gogo/protobuf e492fd34b12d0230755c45aa5fb1e1eea6a84aa9 -github.com/golang/protobuf 68415e7123da32b07eab49c96d2c4d6158360e9b +github.com/fsouza/go-dockerclient 6fb38e6bb3d544d7eb5b55fd396cd4e6850802d8 +github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3 +github.com/go-sql-driver/mysql 72ea5d0b32a04c67710bf63e97095d82aea5f352 +github.com/gogo/protobuf c57e439bad574c2e0877ff18d514badcfced004d +github.com/golang/protobuf 2402d76f3d41f928c7902a765dfc872356dd3aad github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 -github.com/hailocab/go-hostpool 0637eae892be221164aff5fcbccc57171aea6406 +github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64 github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 -github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294 +github.com/hashicorp/raft b95f335efee1992886864389183ebda0c0a5d0f6 github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee -github.com/influxdb/influxdb 69a7664f2d4b75aec300b7cbfc7e57c971721f04 +github.com/influxdata/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5 +github.com/influxdb/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 -github.com/klauspost/crc32 0aff1ea9c20474c3901672b5b6ead0ac611156de -github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9 +github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 +github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f -github.com/pborman/uuid cccd189d45f7ac3368a0d127efb7f4d08ae0b655 -github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4 +github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988 +github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common 56b90312e937d43b930f06a59bf0d6a4ae1944bc +github.com/prometheus/common 0a3005bb37bc411040083a55372e77c405f6464c github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil fc932d9090f13a84fb4b3cb8baa124610cab184c +github.com/shirou/gopsutil 8850f58d7035653e1ab90711481954c8ca1b9813 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 -github.com/stretchr/testify e3a8ff8ce36581f87a15341206f205b1da467059 +github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18 github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 -golang.org/x/crypto 7b85b097bf7527677d54d3220065e966a0e3b613 -golang.org/x/net 1796f9b8b7178e3c7587dff118d3bb9d37f9b0b3 -gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e +golang.org/x/crypto 3760e016850398b85094c4c99e955b8c3dea5711 +golang.org/x/net 72aa00c6241a8013dc9b040abb45f57edbe73945 +golang.org/x/text cf4986612c83df6c55578ba198316d1684a9a287 +gopkg.in/dancannon/gorethink.v1 e2cef022d0495329dfb0635991de76efcab5cf50 gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 -gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49 +gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64 gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 diff --git a/Makefile b/Makefile index b9de93ffb..3dedfb703 100644 --- a/Makefile +++ b/Makefile @@ -21,21 +21,8 @@ dev: prepare "-X main.Version=$(VERSION)" \ ./cmd/telegraf/telegraf.go -# Build linux 64-bit, 32-bit and arm architectures -build-linux-bins: prepare - GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - # Get dependencies and use gdm to checkout changesets prepare: - go get ./... go get github.com/sparrc/gdm gdm restore diff --git a/README.md b/README.md index 4130d55bb..7207db8a9 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,43 @@ -# Telegraf - A native agent for InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) +# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) Telegraf is an agent written in Go for collecting metrics from the system it's -running on, or from other services, and writing them into InfluxDB. +running on, or from other services, and writing them into InfluxDB or other +[outputs](https://github.com/influxdata/telegraf#supported-output-plugins). Design goals are to have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics from well known services (like Hadoop, Postgres, or Redis) and third party APIs (like Mailchimp, AWS CloudWatch, or Google Analytics). -We'll eagerly accept pull requests for new plugins and will manage the set of -plugins that Telegraf supports. See the -[contributing guide](CONTRIBUTING.md) for instructions on -writing new plugins. +New input and output plugins are designed to be easy to contribute, +we'll eagerly accept pull +requests and will manage the set of plugins that Telegraf supports. +See the [contributing guide](CONTRIBUTING.md) for instructions on writing +new plugins. ## Installation: +NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions +of telegraf, both in the database layout and the configuration file. 0.2.x +will continue to be supported, see below for download links. + +For more details on the differences between Telegraf 0.2.x and 0.10.x, see +the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/). + ### Linux deb and rpm packages: Latest: +* http://get.influxdb.org/telegraf/telegraf_0.10.0-1_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.0-1.x86_64.rpm + +0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm ##### Package instructions: -* Telegraf binary is installed in `/opt/telegraf/telegraf` -* Telegraf daemon configuration file is in `/etc/opt/telegraf/telegraf.conf` +* Telegraf binary is installed in `/usr/bin/telegraf` +* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf` * On sysv systems, the telegraf daemon can be controlled via `service telegraf [action]` * On systemd systems (such as Ubuntu 15+), the telegraf daemon can be @@ -33,6 +46,11 @@ controlled via `systemctl [action] telegraf` ### Linux binaries: Latest: +* http://get.influxdb.org/telegraf/telegraf-0.10.0_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.0_linux_386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.0_linux_arm.tar.gz + +0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz * http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz * http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz @@ -51,182 +69,83 @@ brew update brew install telegraf ``` -### Version 0.3.0 Beta - -Version 0.3.0 will introduce many new breaking changes to Telegraf. For starters, -plugin measurements will be aggregated into fields. This means that there will no -longer be a `cpu_usage_idle` measurement, there will be a `cpu` measurement with -a `usage_idle` field. - -There will also be config file changes, meaning that your 0.2.x Telegraf config -files will no longer work properly. It is recommended that you use the -`-sample-config` flag to generate a new config file to see what the changes are. -You can also read the -[0.3.0 configuration guide](https://github.com/influxdb/telegraf/blob/0.3.0/CONFIGURATION.md) -to see some of the new features and options available. - -You can read more about the justifications for the aggregated measurements -[here](https://github.com/influxdb/telegraf/issues/152), and a more detailed -breakdown of the work [here](https://github.com/influxdb/telegraf/pull/437). -Once we're closer to a full release, there will be a detailed blog post -explaining all the changes. - -* http://get.influxdb.org/telegraf/telegraf_0.3.0-beta2_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.3.0_beta2-1.x86_64.rpm -* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.3.0-beta2.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_386_0.3.0-beta2.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.3.0-beta2.tar.gz - ### From Source: Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm), which gets installed via the Makefile -if you don't have it already. You also must build with golang version 1.4+. +if you don't have it already. You also must build with golang version 1.5+. 1. [Install Go](https://golang.org/doc/install) 2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH) -3. Run `go get github.com/influxdb/telegraf` -4. Run `cd $GOPATH/src/github.com/influxdb/telegraf` +3. Run `go get github.com/influxdata/telegraf` +4. Run `cd $GOPATH/src/github.com/influxdata/telegraf` 5. Run `make` ### How to use it: -* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration. -* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`. -to create a config file with only CPU and memory plugins defined, and InfluxDB -output defined. -* Edit the configuration to match your needs. -* Run `telegraf -config telegraf.conf -test` to output one full measurement -sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using -the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test` -* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs. -* Run `telegraf -config telegraf.conf -filter system:swap`. -to run telegraf with only the system & swap plugins defined in the config. +```console +$ telegraf -help +Telegraf, The plugin-driven server agent for collecting and reporting metrics. -## Telegraf Options +Usage: -Telegraf has a few options you can configure under the `agent` section of the -config. + telegraf -* **hostname**: The hostname is passed as a tag. By default this will be -the value returned by `hostname` on the machine running Telegraf. -You can override that value here. -* **interval**: How often to gather metrics. Uses a simple number + -unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes. -* **debug**: Set to true to gather and send metrics to STDOUT as well as -InfluxDB. +The flags are: -## Plugin Options + -config configuration file to load + -test gather metrics once, print them to stdout, and exit + -sample-config print out full sample configuration to stdout + -config-directory directory containing additional *.conf files + -input-filter filter the input plugins to enable, separator is : + -output-filter filter the output plugins to enable, separator is : + -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -debug print metrics as they're generated to stdout + -quiet run in quiet mode + -version print the version to stdout -There are 5 configuration options that are configurable per plugin: +Examples: -* **pass**: An array of strings that is used to filter metrics generated by the -current plugin. Each string in the array is tested as a glob match against metric names -and if it matches, the metric is emitted. -* **drop**: The inverse of pass, if a metric name matches, it is not emitted. -* **tagpass**: tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as a glob match against -the tag name, and if it matches the metric is emitted. -* **tagdrop**: The inverse of tagpass. If a tag matches, the metric is not emitted. -This is tested on metrics that have passed the tagpass test. -* **interval**: How often to gather this metric. Normal plugins use a single -global interval, but if one particular plugin should be run less or more often, -you can configure that here. + # generate a telegraf config file: + telegraf -sample-config > telegraf.conf -### Plugin Configuration Examples + # generate config with only cpu input & influxdb output plugins defined + telegraf -sample-config -input-filter cpu -output-filter influxdb -This is a full working config that will output CPU data to an InfluxDB instance -at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output -measurements at a 10s interval and will collect per-cpu data, dropping any -measurements which begin with `cpu_time`. + # run a single telegraf collection, outputing metrics to stdout + telegraf -config telegraf.conf -test -```toml -[tags] - dc = "denver-1" + # run telegraf with all plugins defined in config file + telegraf -config telegraf.conf -[agent] - interval = "10s" - -# OUTPUTS -[outputs] -[[outputs.influxdb]] - url = "http://192.168.59.103:8086" # required. - database = "telegraf" # required. - precision = "s" - -# PLUGINS -[plugins] -[[plugins.cpu]] - percpu = true - totalcpu = false - drop = ["cpu_time*"] + # run telegraf, enabling the cpu & memory input, and influxdb output plugins + telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb ``` -Below is how to configure `tagpass` and `tagdrop` parameters +## Configuration -```toml -[plugins] -[[plugins.cpu]] - percpu = true - totalcpu = false - drop = ["cpu_time"] - # Don't collect CPU data for cpu6 & cpu7 - [plugins.cpu.tagdrop] - cpu = [ "cpu6", "cpu7" ] +See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced +configuration options. -[[plugins.disk]] - [plugins.disk.tagpass] - # tagpass conditions are OR, not AND. - # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) - # then the metric passes - fstype = [ "ext4", "xfs" ] - # Globs can also be used on the tag values - path = [ "/opt", "/home*" ] -``` +## Supported Input Plugins -Below is how to configure `pass` and `drop` parameters +Telegraf currently has support for collecting metrics from many sources. For +more information on each, please look at the directory of the same name in +`plugins/inputs`. -```toml -# Drop all metrics for guest CPU usage -[[plugins.cpu]] - drop = [ "cpu_usage_guest" ] - -# Only store inode related metrics for disks -[[plugins.disk]] - pass = [ "disk_inodes*" ] -``` - - -Additional plugins (or outputs) of the same type can be specified, -just define more instances in the config file: - -```toml -[[plugins.cpu]] - percpu = false - totalcpu = true - -[[plugins.cpu]] - percpu = true - totalcpu = false - drop = ["cpu_time*"] -``` - -## Supported Plugins - -**You can view usage instructions for each plugin by running** -`telegraf -usage `. - -Telegraf currently has support for collecting metrics from: +Currently implemented sources: * aerospike * apache * bcache * disque +* docker * elasticsearch * exec (generic JSON-emitting executable plugin) * haproxy * httpjson (generic JSON-emitting http service plugin) * influxdb -* jolokia (remote JMX with JSON over HTTP) +* jolokia * leofs * lustre2 * mailchimp @@ -234,7 +153,9 @@ Telegraf currently has support for collecting metrics from: * mongodb * mysql * nginx +* nsq * phpfpm +* phusion passenger * ping * postgresql * procstat @@ -246,18 +167,17 @@ Telegraf currently has support for collecting metrics from: * twemproxy * zfs * zookeeper +* sensors * system * cpu * mem - * io * net * netstat * disk + * diskio * swap -## Supported Service Plugins - -Telegraf can collect metrics via the following services: +Telegraf can also collect metrics via the following service plugins: * statsd * kafka_consumer @@ -265,52 +185,21 @@ Telegraf can collect metrics via the following services: We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API. -## Output options - -Telegraf also supports specifying multiple output sinks to send data to, -configuring each output sink is different, but examples can be -found by running `telegraf -sample-config`. - -Outputs also support the same configurable options as plugins -(pass, drop, tagpass, tagdrop), added in 0.2.4 - -```toml -[[outputs.influxdb]] - urls = [ "http://localhost:8086" ] - database = "telegraf" - precision = "s" - # Drop all measurements that start with "aerospike" - drop = ["aerospike*"] - -[[outputs.influxdb]] - urls = [ "http://localhost:8086" ] - database = "telegraf-aerospike-data" - precision = "s" - # Only accept aerospike data: - pass = ["aerospike*"] - -[[outputs.influxdb]] - urls = [ "http://localhost:8086" ] - database = "telegraf-cpu0-data" - precision = "s" - # Only store measurements where the tag "cpu" matches the value "cpu0" - [outputs.influxdb.tagpass] - cpu = ["cpu0"] -``` - - -## Supported Outputs +## Supported Output Plugins * influxdb -* nsq -* kafka -* datadog -* opentsdb -* amqp (rabbitmq) -* mqtt -* librato -* prometheus * amon +* amqp +* aws kinesis +* aws cloudwatch +* datadog +* graphite +* kafka +* librato +* mqtt +* nsq +* opentsdb +* prometheus * riemann ## Contributing diff --git a/accumulator.go b/accumulator.go index 8dbf2e8aa..c628907d7 100644 --- a/accumulator.go +++ b/accumulator.go @@ -7,9 +7,9 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/internal/config" + "github.com/influxdata/telegraf/internal/config" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) type Accumulator interface { @@ -29,12 +29,12 @@ type Accumulator interface { } func NewAccumulator( - pluginConfig *config.PluginConfig, + inputConfig *config.InputConfig, points chan *client.Point, ) Accumulator { acc := accumulator{} acc.points = points - acc.pluginConfig = pluginConfig + acc.inputConfig = inputConfig return &acc } @@ -47,7 +47,7 @@ type accumulator struct { debug bool - pluginConfig *config.PluginConfig + inputConfig *config.InputConfig prefix string } @@ -69,30 +69,76 @@ func (ac *accumulator) AddFields( tags map[string]string, t ...time.Time, ) { - // Validate uint64 and float64 fields + if len(fields) == 0 || len(measurement) == 0 { + return + } + + if !ac.inputConfig.Filter.ShouldTagsPass(tags) { + return + } + + // Override measurement name if set + if len(ac.inputConfig.NameOverride) != 0 { + measurement = ac.inputConfig.NameOverride + } + // Apply measurement prefix and suffix if set + if len(ac.inputConfig.MeasurementPrefix) != 0 { + measurement = ac.inputConfig.MeasurementPrefix + measurement + } + if len(ac.inputConfig.MeasurementSuffix) != 0 { + measurement = measurement + ac.inputConfig.MeasurementSuffix + } + + if tags == nil { + tags = make(map[string]string) + } + // Apply plugin-wide tags if set + for k, v := range ac.inputConfig.Tags { + if _, ok := tags[k]; !ok { + tags[k] = v + } + } + // Apply daemon-wide tags if set + for k, v := range ac.defaultTags { + if _, ok := tags[k]; !ok { + tags[k] = v + } + } + + result := make(map[string]interface{}) for k, v := range fields { + // Filter out any filtered fields + if ac.inputConfig != nil { + if !ac.inputConfig.Filter.ShouldPass(k) { + continue + } + } + result[k] = v + + // Validate uint64 and float64 fields switch val := v.(type) { case uint64: // InfluxDB does not support writing uint64 if val < uint64(9223372036854775808) { - fields[k] = int64(val) + result[k] = int64(val) } else { - fields[k] = int64(9223372036854775807) + result[k] = int64(9223372036854775807) } case float64: // NaNs are invalid values in influxdb, skip measurement if math.IsNaN(val) || math.IsInf(val, 0) { if ac.debug { - log.Printf("Measurement [%s] has a NaN or Inf field, skipping", - measurement) + log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ + "field, skipping", + measurement, k) } - return + continue } } } - - if tags == nil { - tags = make(map[string]string) + fields = nil + if len(result) == 0 { + return } var timestamp time.Time @@ -106,19 +152,7 @@ func (ac *accumulator) AddFields( measurement = ac.prefix + measurement } - if ac.pluginConfig != nil { - if !ac.pluginConfig.Filter.ShouldPass(measurement) || !ac.pluginConfig.Filter.ShouldTagsPass(tags) { - return - } - } - - for k, v := range ac.defaultTags { - if _, ok := tags[k]; !ok { - tags[k] = v - } - } - - pt, err := client.NewPoint(measurement, tags, fields, timestamp) + pt, err := client.NewPoint(measurement, tags, result, timestamp) if err != nil { log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) return diff --git a/agent.go b/agent.go index 68b1b5f16..5425fba33 100644 --- a/agent.go +++ b/agent.go @@ -1,19 +1,20 @@ package telegraf import ( - "crypto/rand" + cryptorand "crypto/rand" "fmt" "log" "math/big" + "math/rand" "os" "sync" "time" - "github.com/influxdb/telegraf/internal/config" - "github.com/influxdb/telegraf/outputs" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/internal/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) // Agent runs telegraf and collects data based on the given config @@ -58,7 +59,7 @@ func (a *Agent) Connect() error { } err := o.Output.Connect() if err != nil { - log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name) + log.Printf("Failed to connect to output %s, retrying in 15s, error was '%s' \n", o.Name, err) time.Sleep(15 * time.Second) err = o.Output.Connect() if err != nil { @@ -85,33 +86,44 @@ func (a *Agent) Close() error { return err } -// gatherParallel runs the plugins that are using the same reporting interval +// gatherParallel runs the inputs that are using the same reporting interval // as the telegraf agent. func (a *Agent) gatherParallel(pointChan chan *client.Point) error { var wg sync.WaitGroup start := time.Now() counter := 0 - for _, plugin := range a.Config.Plugins { - if plugin.Config.Interval != 0 { + jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds() + for _, input := range a.Config.Inputs { + if input.Config.Interval != 0 { continue } wg.Add(1) counter++ - go func(plugin *config.RunningPlugin) { + go func(input *config.RunningInput) { defer wg.Done() - acc := NewAccumulator(plugin.Config, pointChan) + acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - acc.SetPrefix(plugin.Name + "_") acc.SetDefaultTags(a.Config.Tags) - if err := plugin.Plugin.Gather(acc); err != nil { - log.Printf("Error in plugin [%s]: %s", plugin.Name, err) + if jitter != 0 { + nanoSleep := rand.Int63n(jitter) + d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep)) + if err != nil { + log.Printf("Jittering collection interval failed for plugin %s", + input.Name) + } else { + time.Sleep(d) + } } - }(plugin) + if err := input.Input.Gather(acc); err != nil { + log.Printf("Error in input [%s]: %s", input.Name, err) + } + + }(input) } if counter == 0 { @@ -121,36 +133,39 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error { wg.Wait() elapsed := time.Since(start) - log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n", - a.Config.Agent.Interval, counter, elapsed) + if !a.Config.Agent.Quiet { + log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n", + a.Config.Agent.Interval.Duration, counter, elapsed) + } return nil } -// gatherSeparate runs the plugins that have been configured with their own +// gatherSeparate runs the inputs that have been configured with their own // reporting interval. func (a *Agent) gatherSeparate( shutdown chan struct{}, - plugin *config.RunningPlugin, + input *config.RunningInput, pointChan chan *client.Point, ) error { - ticker := time.NewTicker(plugin.Config.Interval) + ticker := time.NewTicker(input.Config.Interval) for { var outerr error start := time.Now() - acc := NewAccumulator(plugin.Config, pointChan) + acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - acc.SetPrefix(plugin.Name + "_") acc.SetDefaultTags(a.Config.Tags) - if err := plugin.Plugin.Gather(acc); err != nil { - log.Printf("Error in plugin [%s]: %s", plugin.Name, err) + if err := input.Input.Gather(acc); err != nil { + log.Printf("Error in input [%s]: %s", input.Name, err) } elapsed := time.Since(start) - log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n", - plugin.Config.Interval, plugin.Name, elapsed) + if !a.Config.Agent.Quiet { + log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n", + input.Config.Interval, input.Name, elapsed) + } if outerr != nil { return outerr @@ -165,7 +180,7 @@ func (a *Agent) gatherSeparate( } } -// Test verifies that we can 'Gather' from all plugins with their configured +// Test verifies that we can 'Gather' from all inputs with their configured // Config struct func (a *Agent) Test() error { shutdown := make(chan struct{}) @@ -184,27 +199,27 @@ func (a *Agent) Test() error { } }() - for _, plugin := range a.Config.Plugins { - acc := NewAccumulator(plugin.Config, pointChan) + for _, input := range a.Config.Inputs { + acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(true) - acc.SetPrefix(plugin.Name + "_") + // acc.SetPrefix(input.Name + "_") - fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name) - if plugin.Config.Interval != 0 { - fmt.Printf("* Internal: %s\n", plugin.Config.Interval) + fmt.Printf("* Plugin: %s, Collection 1\n", input.Name) + if input.Config.Interval != 0 { + fmt.Printf("* Internal: %s\n", input.Config.Interval) } - if err := plugin.Plugin.Gather(acc); err != nil { + if err := input.Input.Gather(acc); err != nil { return err } - // Special instructions for some plugins. cpu, for example, needs to be + // Special instructions for some inputs. cpu, for example, needs to be // run twice in order to return cpu usage percentages. - switch plugin.Name { - case "cpu", "mongodb": + switch input.Name { + case "cpu", "mongodb", "procstat": time.Sleep(500 * time.Millisecond) - fmt.Printf("* Plugin: %s, Collection 2\n", plugin.Name) - if err := plugin.Plugin.Gather(acc); err != nil { + fmt.Printf("* Plugin: %s, Collection 2\n", input.Name) + if err := input.Input.Gather(acc); err != nil { return err } } @@ -235,8 +250,10 @@ func (a *Agent) writeOutput( if err == nil { // Write successful elapsed := time.Since(start) - log.Printf("Flushed %d metrics to output %s in %s\n", - len(filtered), ro.Name, elapsed) + if !a.Config.Agent.Quiet { + log.Printf("Flushed %d metrics to output %s in %s\n", + len(filtered), ro.Name, elapsed) + } return } @@ -309,7 +326,7 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration { outinterval := ininterval if injitter.Nanoseconds() != 0 { maxjitter := big.NewInt(injitter.Nanoseconds()) - if j, err := rand.Int(rand.Reader, maxjitter); err == nil { + if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil { jitter = j.Int64() } outinterval = time.Duration(jitter + ininterval.Nanoseconds()) @@ -327,15 +344,16 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration { func (a *Agent) Run(shutdown chan struct{}) error { var wg sync.WaitGroup - a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration, + a.Config.Agent.FlushInterval.Duration = jitterInterval( + a.Config.Agent.FlushInterval.Duration, a.Config.Agent.FlushJitter.Duration) - log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+ - "Flush Interval:%s\n", - a.Config.Agent.Interval, a.Config.Agent.Debug, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval) + log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+ + "Flush Interval:%s \n", + a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet, + a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - // channel shared between all plugin threads for accumulating points + // channel shared between all input threads for accumulating points pointChan := make(chan *client.Point, 1000) // Round collection to nearest interval by sleeping @@ -354,29 +372,29 @@ func (a *Agent) Run(shutdown chan struct{}) error { } }() - for _, plugin := range a.Config.Plugins { + for _, input := range a.Config.Inputs { // Start service of any ServicePlugins - switch p := plugin.Plugin.(type) { - case plugins.ServicePlugin: + switch p := input.Input.(type) { + case inputs.ServiceInput: if err := p.Start(); err != nil { - log.Printf("Service for plugin %s failed to start, exiting\n%s\n", - plugin.Name, err.Error()) + log.Printf("Service for input %s failed to start, exiting\n%s\n", + input.Name, err.Error()) return err } defer p.Stop() } - // Special handling for plugins that have their own collection interval + // Special handling for inputs that have their own collection interval // configured. Default intervals are handled below with gatherParallel - if plugin.Config.Interval != 0 { + if input.Config.Interval != 0 { wg.Add(1) - go func(plugin *config.RunningPlugin) { + go func(input *config.RunningInput) { defer wg.Done() - if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil { + if err := a.gatherSeparate(shutdown, input, pointChan); err != nil { log.Printf(err.Error()) } - }(plugin) + }(input) } } diff --git a/agent_test.go b/agent_test.go index 7dd65ef26..3420e665a 100644 --- a/agent_test.go +++ b/agent_test.go @@ -5,80 +5,99 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/internal/config" + "github.com/influxdata/telegraf/internal/config" // needing to load the plugins - _ "github.com/influxdb/telegraf/plugins/all" + _ "github.com/influxdata/telegraf/plugins/inputs/all" // needing to load the outputs - _ "github.com/influxdb/telegraf/outputs/all" + _ "github.com/influxdata/telegraf/plugins/outputs/all" ) func TestAgent_LoadPlugin(t *testing.T) { c := config.NewConfig() - c.PluginFilters = []string{"mysql"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + c.InputFilters = []string{"mysql"} + err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ := NewAgent(c) - assert.Equal(t, 1, len(a.Config.Plugins)) + assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + c.InputFilters = []string{"foo"} + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 0, len(a.Config.Plugins)) + assert.Equal(t, 0, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"mysql", "foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + c.InputFilters = []string{"mysql", "foo"} + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 1, len(a.Config.Plugins)) + assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"mysql", "redis"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + c.InputFilters = []string{"mysql", "redis"} + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 2, len(a.Config.Plugins)) + assert.Equal(t, 2, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"mysql", "foo", "redis", "bar"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + c.InputFilters = []string{"mysql", "foo", "redis", "bar"} + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 2, len(a.Config.Plugins)) + assert.Equal(t, 2, len(a.Config.Inputs)) } func TestAgent_LoadOutput(t *testing.T) { c := config.NewConfig() c.OutputFilters = []string{"influxdb"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ := NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) + c = config.NewConfig() + c.OutputFilters = []string{"kafka"} + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) + a, _ = NewAgent(c) + assert.Equal(t, 1, len(a.Config.Outputs)) + c = config.NewConfig() c.OutputFilters = []string{} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 0, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "kafka"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) + assert.Equal(t, 3, len(c.Outputs)) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) } diff --git a/build.py b/build.py new file mode 100755 index 000000000..a5892f26a --- /dev/null +++ b/build.py @@ -0,0 +1,674 @@ +#!/usr/bin/env python2.7 +# +# This is the Telegraf build script. +# +# Current caveats: +# - Does not checkout the correct commit/branch (for now, you will need to do so manually) +# - Has external dependencies for packaging (fpm) and uploading (boto) +# + +import sys +import os +import subprocess +import time +import datetime +import shutil +import tempfile +import hashlib +import re + +try: + import boto + from boto.s3.key import Key +except ImportError: + pass + +# PACKAGING VARIABLES +INSTALL_ROOT_DIR = "/usr/bin" +LOG_DIR = "/var/log/telegraf" +SCRIPT_DIR = "/usr/lib/telegraf/scripts" +CONFIG_DIR = "/etc/telegraf" +LOGROTATE_DIR = "/etc/logrotate.d" + +INIT_SCRIPT = "scripts/init.sh" +SYSTEMD_SCRIPT = "scripts/telegraf.service" +LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf" +DEFAULT_CONFIG = "etc/telegraf.conf" +POSTINST_SCRIPT = "scripts/post-install.sh" +PREINST_SCRIPT = "scripts/pre-install.sh" + +# META-PACKAGE VARIABLES +PACKAGE_LICENSE = "MIT" +PACKAGE_URL = "https://github.com/influxdata/telegraf" +MAINTAINER = "support@influxdb.com" +VENDOR = "InfluxData" +DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB." + +# SCRIPT START +prereqs = [ 'git', 'go' ] +optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] + +fpm_common_args = "-f -s dir --log error \ + --vendor {} \ + --url {} \ + --license {} \ + --maintainer {} \ + --config-files {} \ + --config-files {} \ + --after-install {} \ + --before-install {} \ + --description \"{}\"".format( + VENDOR, + PACKAGE_URL, + PACKAGE_LICENSE, + MAINTAINER, + CONFIG_DIR + '/telegraf.conf', + LOGROTATE_DIR + '/telegraf', + POSTINST_SCRIPT, + PREINST_SCRIPT, + DESCRIPTION) + +targets = { + 'telegraf' : './cmd/telegraf/telegraf.go', +} + +supported_builds = { + # TODO(rossmcdonald): Add support for multiple GOARM values + 'darwin': [ "amd64", "386" ], + # 'windows': [ "amd64", "386", "arm", "arm64" ], + 'linux': [ "amd64", "386", "arm" ] +} +supported_go = [ '1.5.1' ] +supported_packages = { + "darwin": [ "tar", "zip" ], + "linux": [ "deb", "rpm", "tar", "zip" ], + "windows": [ "tar", "zip" ], +} + +def run(command, allow_failure=False, shell=False): + out = None + try: + if shell: + out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) + else: + out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + out = out.decode("utf8") + except subprocess.CalledProcessError as e: + print("") + print("") + print("Executed command failed!") + print("-- Command run was: {}".format(command)) + print("-- Failure was: {}".format(e.output)) + if allow_failure: + print("Continuing...") + return None + else: + print("") + print("Stopping.") + sys.exit(1) + except OSError as e: + print("") + print("") + print("Invalid command!") + print("-- Command run was: {}".format(command)) + print("-- Failure was: {}".format(e)) + if allow_failure: + print("Continuing...") + return out + else: + print("") + print("Stopping.") + sys.exit(1) + else: + return out + +def create_temp_dir(): + return tempfile.mkdtemp(prefix="telegraf-build.") + +def get_current_version(): + command = "git describe --always --tags --abbrev=0" + out = run(command) + return out.strip() + +def get_current_commit(short=False): + command = None + if short: + command = "git log --pretty=format:'%h' -n 1" + else: + command = "git rev-parse HEAD" + out = run(command) + return out.strip('\'\n\r ') + +def get_current_branch(): + command = "git rev-parse --abbrev-ref HEAD" + out = run(command) + return out.strip() + +def get_system_arch(): + arch = os.uname()[4] + if arch == "x86_64": + arch = "amd64" + return arch + +def get_system_platform(): + if sys.platform.startswith("linux"): + return "linux" + else: + return sys.platform + +def get_go_version(): + out = run("go version") + matches = re.search('go version go(\S+)', out) + if matches is not None: + return matches.groups()[0].strip() + return None + +def check_path_for(b): + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + full_path = os.path.join(path, b) + if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + return full_path + +def check_environ(build_dir = None): + print("\nChecking environment:") + for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + print("\t- {} -> {}".format(v, os.environ.get(v))) + + cwd = os.getcwd() + if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: + print("\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.") + +def check_prereqs(): + print("\nChecking for dependencies:") + for req in prereqs: + print("\t- {} ->".format(req),) + path = check_path_for(req) + if path: + print("{}".format(path)) + else: + print("?") + for req in optional_prereqs: + print("\t- {} (optional) ->".format(req)) + path = check_path_for(req) + if path: + print("{}".format(path)) + else: + print("?") + print("") + +def upload_packages(packages, nightly=False): + print("Uploading packages to S3...") + print("") + c = boto.connect_s3() + # TODO(rossmcdonald) - Set to different S3 bucket for release vs nightly + bucket = c.get_bucket('get.influxdb.org') + for p in packages: + name = os.path.join('telegraf', os.path.basename(p)) + if bucket.get_key(name) is None or nightly: + print("\t - Uploading {}...".format(name)) + k = Key(bucket) + k.key = name + if nightly: + n = k.set_contents_from_filename(p, replace=True) + else: + n = k.set_contents_from_filename(p, replace=False) + k.make_public() + print("[ DONE ]") + else: + print("\t - Not uploading {}, already exists.".format(p)) + print("") + +def run_tests(race, parallel, timeout, no_vet): + get_command = "go get -d -t ./..." + print("Retrieving Go dependencies...") + sys.stdout.flush() + run(get_command) + print("done.") + print("Running tests:") + print("\tRace: ", race) + if parallel is not None: + print("\tParallel:", parallel) + if timeout is not None: + print("\tTimeout:", timeout) + sys.stdout.flush() + p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + print(out) + print(err) + return False + if not no_vet: + p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print("Go vet failed. Please run 'go vet ./...' and fix any errors.") + print(out) + print(err) + return False + else: + print("Skipping go vet ...") + sys.stdout.flush() + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + code = os.system(test_command) + if code != 0: + print("Tests Failed") + return False + else: + print("Tests Passed") + return True + +def build(version=None, + branch=None, + commit=None, + platform=None, + arch=None, + nightly=False, + rc=None, + race=False, + clean=False, + outdir=".", + goarm_version="6"): + print("-------------------------") + print("") + print("Build plan:") + print("\t- version: {}".format(version)) + if rc: + print("\t- release candidate: {}".format(rc)) + print("\t- commit: {}".format(commit)) + print("\t- branch: {}".format(branch)) + print("\t- platform: {}".format(platform)) + print("\t- arch: {}".format(arch)) + if arch == 'arm' and goarm_version: + print("\t- ARM version: {}".format(goarm_version)) + print("\t- nightly? {}".format(str(nightly).lower())) + print("\t- race enabled? {}".format(str(race).lower())) + print("") + + if not os.path.exists(outdir): + os.makedirs(outdir) + elif clean and outdir != '/': + print("Cleaning build directory...") + shutil.rmtree(outdir) + os.makedirs(outdir) + + if rc: + # If a release candidate, update the version information accordingly + version = "{}rc{}".format(version, rc) + + print("Starting build...") + for b, c in targets.items(): + print("\t- Building '{}'...".format(os.path.join(outdir, b)),) + build_command = "" + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + if arch == "arm" and goarm_version: + if goarm_version not in ["5", "6", "7", "arm64"]: + print("!! Invalid ARM build version: {}".format(goarm_version)) + build_command += "GOARM={} ".format(goarm_version) + build_command += "go build -o {} ".format(os.path.join(outdir, b)) + if race: + build_command += "-race " + go_version = get_go_version() + if "1.4" in go_version: + build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat()) + build_command += "-X main.Version {} ".format(version) + build_command += "-X main.Branch {} ".format(branch) + build_command += "-X main.Commit {}\" ".format(get_current_commit()) + else: + build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat()) + build_command += "-X main.Version={} ".format(version) + build_command += "-X main.Branch={} ".format(branch) + build_command += "-X main.Commit={}\" ".format(get_current_commit()) + build_command += c + run(build_command, shell=True) + print("[ DONE ]") + print("") + +def create_dir(path): + try: + os.makedirs(path) + except OSError as e: + print(e) + +def rename_file(fr, to): + try: + os.rename(fr, to) + except OSError as e: + print(e) + # Return the original filename + return fr + else: + # Return the new filename + return to + +def copy_file(fr, to): + try: + shutil.copy(fr, to) + except OSError as e: + print(e) + +def create_package_fs(build_root): + print("\t- Creating a filesystem hierarchy from directory: {}".format(build_root)) + # Using [1:] for the path names due to them being absolute + # (will overwrite previous paths, per 'os.path.join' documentation) + dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + for d in dirs: + create_dir(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0o755) + +def package_scripts(build_root): + print("\t- Copying scripts and sample configuration to build directory") + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) + +def go_get(update=False): + get_command = None + if update: + get_command = "go get -u -f -d ./..." + else: + get_command = "go get -d ./..." + print("Retrieving Go dependencies...") + run(get_command) + print("done.\n") + +def generate_md5_from_file(path): + m = hashlib.md5() + with open(path, 'rb') as f: + while True: + data = f.read(4096) + if not data: + break + m.update(data) + return m.hexdigest() + +def build_packages(build_output, version, nightly=False, rc=None, iteration=1): + outfiles = [] + tmp_build_dir = create_temp_dir() + try: + print("-------------------------") + print("") + print("Packaging...") + for p in build_output: + # Create top-level folder displaying which platform (linux, etc) + create_dir(os.path.join(tmp_build_dir, p)) + for a in build_output[p]: + current_location = build_output[p][a] + # Create second-level directory displaying the architecture (amd64, etc)p + build_root = os.path.join(tmp_build_dir, p, a) + # Create directory tree to mimic file system of package + create_dir(build_root) + create_package_fs(build_root) + # Copy in packaging and miscellaneous scripts + package_scripts(build_root) + # Copy newly-built binaries to packaging directory + for b in targets: + if p == 'windows': + b = b + '.exe' + fr = os.path.join(current_location, b) + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b) + print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)) + copy_file(fr, to) + # Package the directory structure + for package_type in supported_packages[p]: + print("\t- Packaging directory '{}' as '{}'...".format(build_root, package_type)) + name = "telegraf" + package_version = version + package_iteration = iteration + if package_type in ['zip', 'tar']: + if nightly: + name = '{}-nightly_{}_{}'.format(name, p, a) + else: + name = '{}-{}_{}_{}'.format(name, version, p, a) + if package_type == 'tar': + # Add `tar.gz` to path to reduce package size + current_location = os.path.join(current_location, name + '.tar.gz') + if rc is not None: + package_iteration = "0.rc{}".format(rc) + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + a, + package_type, + package_version, + package_iteration, + build_root, + current_location) + if package_type == "rpm": + fpm_command += "--depends coreutils " + fpm_command += "--depends lsof" + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + print("[ COULD NOT DETERMINE OUTPUT ]") + else: + # Strip nightly version (the unix epoch) from filename + if nightly and package_type in ['deb', 'rpm']: + outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) + outfiles.append(os.path.join(os.getcwd(), outfile)) + print("[ DONE ]") + # Display MD5 hash for generated package + print("\t\tMD5 = {}".format(generate_md5_from_file(outfile))) + print("") + return outfiles + finally: + # Cleanup + shutil.rmtree(tmp_build_dir) + +def print_usage(): + print("Usage: ./build.py [options]") + print("") + print("Options:") + print("\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build.") + print("\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all") + print("\t --goarm= \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6") + print("\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all") + print("\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.") + print("\t --commit= \n\t\t- Use specific commit for build (currently a NOOP).") + print("\t --branch= \n\t\t- Build from a specific branch (currently a NOOP).") + print("\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information).") + print("\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise).") + print("\t --race \n\t\t- Whether the produced build should have race detection enabled.") + print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).") + print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).") + print("\t --update \n\t\t- Whether dependencies should be updated prior to building.") + print("\t --test \n\t\t- Run Go tests. Will not produce a build.") + print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.") + print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.") + print("\t --clean \n\t\t- Clean the build output directory prior to creating build.") + print("") + +def print_package_summary(packages): + print(packages) + +def main(): + # Command-line arguments + outdir = "build" + commit = None + target_platform = None + target_arch = None + nightly = False + race = False + branch = None + version = get_current_version() + rc = None + package = False + update = False + clean = False + upload = False + test = False + parallel = None + timeout = None + iteration = 1 + no_vet = False + goarm_version = "6" + + for arg in sys.argv[1:]: + if '--outdir' in arg: + # Output directory. If none is specified, then builds will be placed in the same directory. + output_dir = arg.split("=")[1] + if '--commit' in arg: + # Commit to build from. If none is specified, then it will build from the most recent commit. + commit = arg.split("=")[1] + if '--branch' in arg: + # Branch to build from. If none is specified, then it will build from the current branch. + branch = arg.split("=")[1] + elif '--arch' in arg: + # Target architecture. If none is specified, then it will build for the current arch. + target_arch = arg.split("=")[1] + elif '--platform' in arg: + # Target platform. If none is specified, then it will build for the current platform. + target_platform = arg.split("=")[1] + elif '--version' in arg: + # Version to assign to this build (0.9.5, etc) + version = arg.split("=")[1] + elif '--rc' in arg: + # Signifies that this is a release candidate build. + rc = arg.split("=")[1] + elif '--race' in arg: + # Signifies that race detection should be enabled. + race = True + elif '--package' in arg: + # Signifies that packages should be built. + package = True + elif '--nightly' in arg: + # Signifies that this is a nightly build. + nightly = True + elif '--update' in arg: + # Signifies that dependencies should be updated. + update = True + elif '--upload' in arg: + # Signifies that the resulting packages should be uploaded to S3 + upload = True + elif '--test' in arg: + # Run tests and exit + test = True + elif '--parallel' in arg: + # Set parallel for tests. + parallel = int(arg.split("=")[1]) + elif '--timeout' in arg: + # Set timeout for tests. + timeout = arg.split("=")[1] + elif '--clean' in arg: + # Signifies that the outdir should be deleted before building + clean = True + elif '--iteration' in arg: + iteration = arg.split("=")[1] + elif '--no-vet' in arg: + no_vet = True + elif '--goarm' in arg: + # Signifies GOARM flag to pass to build command when compiling for ARM + goarm_version = arg.split("=")[1] + elif '--help' in arg: + print_usage() + return 0 + else: + print("!! Unknown argument: {}".format(arg)) + print_usage() + return 1 + + if nightly: + if rc: + print("!! Cannot be both nightly and a release candidate! Stopping.") + return 1 + # In order to support nightly builds on the repository, we are adding the epoch timestamp + # to the version so that version numbers are always greater than the previous nightly. + version = "{}.n{}".format(version, int(time.time())) + + # Pre-build checks + check_environ() + check_prereqs() + + if not commit: + commit = get_current_commit(short=True) + if not branch: + branch = get_current_branch() + if not target_arch: + if 'arm' in get_system_arch(): + # Prevent uname from reporting ARM arch (eg 'armv7l') + target_arch = "arm" + else: + target_arch = get_system_arch() + if not target_platform: + target_platform = get_system_platform() + if rc or nightly: + # If a release candidate or nightly, set iteration to 0 (instead of 1) + iteration = 0 + + build_output = {} + # TODO(rossmcdonald): Prepare git repo for build (checking out correct branch/commit, etc.) + # prepare(branch=branch, commit=commit) + if test: + if not run_tests(race, parallel, timeout, no_vet): + return 1 + return 0 + + go_get(update=update) + + platforms = [] + single_build = True + if target_platform == 'all': + platforms = list(supported_builds.keys()) + single_build = False + else: + platforms = [target_platform] + + for platform in platforms: + build_output.update( { platform : {} } ) + archs = [] + if target_arch == "all": + single_build = False + archs = supported_builds.get(platform) + else: + archs = [target_arch] + for arch in archs: + od = outdir + if not single_build: + od = os.path.join(outdir, platform, arch) + build(version=version, + branch=branch, + commit=commit, + platform=platform, + arch=arch, + nightly=nightly, + rc=rc, + race=race, + clean=clean, + outdir=od, + goarm_version=goarm_version) + build_output.get(platform).update( { arch : od } ) + + # Build packages + if package: + if not check_path_for("fpm"): + print("!! Cannot package without command 'fpm'. Stopping.") + return 1 + packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration) + # TODO(rossmcdonald): Add nice output for print_package_summary() + # print_package_summary(packages) + # Optionally upload to S3 + if upload: + upload_packages(packages, nightly=nightly) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/circle.yml b/circle.yml index c1c2d35ab..d86d46dba 100644 --- a/circle.yml +++ b/circle.yml @@ -4,14 +4,12 @@ machine: post: - sudo service zookeeper stop - go version - - go version | grep 1.5.1 || sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.5.1.linux-amd64.tar.gz + - go version | grep 1.5.2 || sudo rm -rf /usr/local/go + - wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz - go version dependencies: - cache_directories: - - "~/telegraf-build/src" override: - docker info diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 8f9a628f9..72fb9fdcf 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -7,146 +7,235 @@ import ( "os" "os/signal" "strings" + "syscall" - "github.com/influxdb/telegraf" - "github.com/influxdb/telegraf/internal/config" - _ "github.com/influxdb/telegraf/outputs/all" - _ "github.com/influxdb/telegraf/plugins/all" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/config" + _ "github.com/influxdata/telegraf/plugins/inputs/all" + _ "github.com/influxdata/telegraf/plugins/outputs/all" ) var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout") +var fQuiet = flag.Bool("quiet", false, + "run in quiet mode") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") var fConfig = flag.String("config", "", "configuration file to load") -var fConfigDirectory = flag.String("configdirectory", "", +var fConfigDirectory = flag.String("config-directory", "", "directory containing additional *.conf files") var fVersion = flag.Bool("version", false, "display the version") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") -var fPLuginFilters = flag.String("filter", "", - "filter the plugins to enable, separator is :") -var fOutputFilters = flag.String("outputfilter", "", +var fInputFilters = flag.String("input-filter", "", + "filter the inputs to enable, separator is :") +var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") +var fInputFiltersLegacy = flag.String("filter", "", + "filter the inputs to enable, separator is :") +var fOutputFiltersLegacy = flag.String("outputfilter", "", + "filter the outputs to enable, separator is :") +var fConfigDirectoryLegacy = flag.String("configdirectory", "", + "directory containing additional *.conf files") + // Telegraf version // -ldflags "-X main.Version=`git describe --always --tags`" var Version string +const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics. + +Usage: + + telegraf + +The flags are: + + -config configuration file to load + -test gather metrics once, print them to stdout, and exit + -sample-config print out full sample configuration to stdout + -config-directory directory containing additional *.conf files + -input-filter filter the input plugins to enable, separator is : + -output-filter filter the output plugins to enable, separator is : + -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -debug print metrics as they're generated to stdout + -quiet run in quiet mode + -version print the version to stdout + +Examples: + + # generate a telegraf config file: + telegraf -sample-config > telegraf.conf + + # generate config with only cpu input & influxdb output plugins defined + telegraf -sample-config -input-filter cpu -output-filter influxdb + + # run a single telegraf collection, outputing metrics to stdout + telegraf -config telegraf.conf -test + + # run telegraf with all plugins defined in config file + telegraf -config telegraf.conf + + # run telegraf, enabling the cpu & memory input, and influxdb output plugins + telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb +` + func main() { - flag.Parse() + reload := make(chan bool, 1) + reload <- true + for <-reload { + reload <- false + flag.Usage = usageExit + flag.Parse() - var pluginFilters []string - if *fPLuginFilters != "" { - pluginsFilter := strings.TrimSpace(*fPLuginFilters) - pluginFilters = strings.Split(":"+pluginsFilter+":", ":") - } + if flag.NFlag() == 0 { + usageExit() + } - var outputFilters []string - if *fOutputFilters != "" { - outputFilter := strings.TrimSpace(*fOutputFilters) - outputFilters = strings.Split(":"+outputFilter+":", ":") - } + var inputFilters []string + if *fInputFiltersLegacy != "" { + inputFilter := strings.TrimSpace(*fInputFiltersLegacy) + inputFilters = strings.Split(":"+inputFilter+":", ":") + } + if *fInputFilters != "" { + inputFilter := strings.TrimSpace(*fInputFilters) + inputFilters = strings.Split(":"+inputFilter+":", ":") + } - if *fVersion { - v := fmt.Sprintf("Telegraf - Version %s", Version) - fmt.Println(v) - return - } + var outputFilters []string + if *fOutputFiltersLegacy != "" { + outputFilter := strings.TrimSpace(*fOutputFiltersLegacy) + outputFilters = strings.Split(":"+outputFilter+":", ":") + } + if *fOutputFilters != "" { + outputFilter := strings.TrimSpace(*fOutputFilters) + outputFilters = strings.Split(":"+outputFilter+":", ":") + } - if *fSampleConfig { - config.PrintSampleConfig(pluginFilters, outputFilters) - return - } + if *fVersion { + v := fmt.Sprintf("Telegraf - Version %s", Version) + fmt.Println(v) + return + } - if *fUsage != "" { - if err := config.PrintPluginConfig(*fUsage); err != nil { - if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { - log.Fatalf("%s and %s", err, err2) + if *fSampleConfig { + config.PrintSampleConfig(inputFilters, outputFilters) + return + } + + if *fUsage != "" { + if err := config.PrintInputConfig(*fUsage); err != nil { + if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { + log.Fatalf("%s and %s", err, err2) + } + } + return + } + + var ( + c *config.Config + err error + ) + + if *fConfig != "" { + c = config.NewConfig() + c.OutputFilters = outputFilters + c.InputFilters = inputFilters + err = c.LoadConfig(*fConfig) + if err != nil { + log.Fatal(err) + } + } else { + fmt.Println("Usage: Telegraf") + flag.PrintDefaults() + return + } + + if *fConfigDirectoryLegacy != "" { + err = c.LoadDirectory(*fConfigDirectoryLegacy) + if err != nil { + log.Fatal(err) } } - return - } - var ( - c *config.Config - err error - ) + if *fConfigDirectory != "" { + err = c.LoadDirectory(*fConfigDirectory) + if err != nil { + log.Fatal(err) + } + } + if len(c.Outputs) == 0 { + log.Fatalf("Error: no outputs found, did you provide a valid config file?") + } + if len(c.Inputs) == 0 { + log.Fatalf("Error: no inputs found, did you provide a valid config file?") + } - if *fConfig != "" { - c = config.NewConfig() - c.OutputFilters = outputFilters - c.PluginFilters = pluginFilters - err = c.LoadConfig(*fConfig) + ag, err := telegraf.NewAgent(c) if err != nil { log.Fatal(err) } - } else { - fmt.Println("Usage: Telegraf") - flag.PrintDefaults() - return - } - if *fConfigDirectory != "" { - err = c.LoadDirectory(*fConfigDirectory) + if *fDebug { + ag.Config.Agent.Debug = true + } + + if *fQuiet { + ag.Config.Agent.Quiet = true + } + + if *fTest { + err = ag.Test() + if err != nil { + log.Fatal(err) + } + return + } + + err = ag.Connect() if err != nil { log.Fatal(err) } - } - if len(c.Outputs) == 0 { - log.Fatalf("Error: no outputs found, did you provide a valid config file?") - } - if len(c.Plugins) == 0 { - log.Fatalf("Error: no plugins found, did you provide a valid config file?") - } - ag, err := telegraf.NewAgent(c) - if err != nil { - log.Fatal(err) - } + shutdown := make(chan struct{}) + signals := make(chan os.Signal) + signal.Notify(signals, os.Interrupt, syscall.SIGHUP) + go func() { + sig := <-signals + if sig == os.Interrupt { + close(shutdown) + } + if sig == syscall.SIGHUP { + log.Printf("Reloading Telegraf config\n") + <-reload + reload <- true + close(shutdown) + } + }() - if *fDebug { - ag.Config.Agent.Debug = true - } + log.Printf("Starting Telegraf (version %s)\n", Version) + log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) + log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " ")) + log.Printf("Tags enabled: %s", c.ListTags()) - if *fTest { - err = ag.Test() - if err != nil { - log.Fatal(err) - } - return - } + if *fPidfile != "" { + f, err := os.Create(*fPidfile) + if err != nil { + log.Fatalf("Unable to create pidfile: %s", err) + } - err = ag.Connect() - if err != nil { - log.Fatal(err) - } + fmt.Fprintf(f, "%d\n", os.Getpid()) - shutdown := make(chan struct{}) - signals := make(chan os.Signal) - signal.Notify(signals, os.Interrupt) - go func() { - <-signals - close(shutdown) - }() - - log.Printf("Starting Telegraf (version %s)\n", Version) - log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("Loaded plugins: %s", strings.Join(c.PluginNames(), " ")) - log.Printf("Tags enabled: %s", c.ListTags()) - - if *fPidfile != "" { - f, err := os.Create(*fPidfile) - if err != nil { - log.Fatalf("Unable to create pidfile: %s", err) + f.Close() } - fmt.Fprintf(f, "%d\n", os.Getpid()) - - f.Close() + ag.Run(shutdown) } - - ag.Run(shutdown) +} + +func usageExit() { + fmt.Println(usage) + os.Exit(0) } diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0781d3028..9871ae7bc 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1,7 +1,7 @@ # Telegraf configuration # Telegraf is entirely plugin driven. All metrics are gathered from the -# declared plugins. +# declared inputs. # Even if a plugin has no configuration, it must be declared in here # to be active. Declaring a plugin means just specifying the name @@ -49,8 +49,6 @@ # OUTPUTS # ############################################################################### -[outputs] - # Configuration for influxdb server to send metrics to [[outputs.influxdb]] # The full HTTP or UDP endpoint URL for your InfluxDB instance. @@ -76,13 +74,11 @@ ############################################################################### -# PLUGINS # +# INPUTS # ############################################################################### -[plugins] - # Read metrics about cpu usage -[[plugins.cpu]] +[[inputs.cpu]] # Whether to report per-cpu stats or not percpu = true # Whether to report total system cpu stats or not @@ -91,14 +87,14 @@ drop = ["cpu_time"] # Read metrics about disk usage by mount point -[[plugins.disk]] +[[inputs.disk]] # By default, telegraf gather stats for all mountpoints. # Setting mountpoints will restrict the stats to the specified mountpoints. - # Mountpoints=["/"] + # mount_points=["/"] # Read metrics about disk IO by device -[[plugins.io]] - # By default, telegraf will gather stats for all devices including +[[inputs.diskio]] + # By default, telegraf will gather stats for all devices including # disk partitions. # Setting devices will restrict the stats to the specified devices. # Devices=["sda","sdb"] @@ -106,18 +102,18 @@ # SkipSerialNumber = true # Read metrics about memory usage -[[plugins.mem]] +[[inputs.mem]] # no configuration # Read metrics about swap memory usage -[[plugins.swap]] +[[inputs.swap]] # no configuration # Read metrics about system load & uptime -[[plugins.system]] +[[inputs.system]] # no configuration ############################################################################### -# SERVICE PLUGINS # +# SERVICE INPUTS # ############################################################################### diff --git a/internal/config/config.go b/internal/config/config.go index 348496f0a..3b5e4ff17 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -10,14 +10,14 @@ import ( "strings" "time" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/naoina/toml" "github.com/naoina/toml/ast" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) // Config specifies the URL/user/password for the database that telegraf @@ -25,11 +25,11 @@ import ( // specified type Config struct { Tags map[string]string - PluginFilters []string + InputFilters []string OutputFilters []string Agent *AgentConfig - Plugins []*RunningPlugin + Inputs []*RunningInput Outputs []*RunningOutput } @@ -45,9 +45,9 @@ func NewConfig() *Config { }, Tags: make(map[string]string), - Plugins: make([]*RunningPlugin, 0), + Inputs: make([]*RunningInput, 0), Outputs: make([]*RunningOutput, 0), - PluginFilters: make([]string, 0), + InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } return c @@ -61,13 +61,22 @@ type AgentConfig struct { // ie, if Interval=10s then always collect on :00, :10, :20, etc. RoundInterval bool + // CollectionJitter is used to jitter the collection by a random amount. + // Each plugin will sleep for a random time within jitter before collecting. + // This can be used to avoid many plugins querying things like sysfs at the + // same time, which can have a measurable effect on the system. + CollectionJitter internal.Duration + // Interval at which to flush data FlushInterval internal.Duration // FlushRetries is the number of times to retry each data flush FlushRetries int - // FlushJitter tells + // FlushJitter Jitters the flush interval by a random amount. + // This is primarily to avoid large write spikes for users running a large + // number of telegraf instances. + // ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s FlushJitter internal.Duration // TODO(cam): Remove UTC and Precision parameters, they are no longer @@ -76,8 +85,11 @@ type AgentConfig struct { UTC bool `toml:"utc"` Precision string - // Option for running in debug mode - Debug bool + // Debug is the option for running in debug mode + Debug bool + + // Quiet is the option for running in quiet mode + Quiet bool Hostname string } @@ -93,10 +105,10 @@ type RunningOutput struct { Config *OutputConfig } -type RunningPlugin struct { +type RunningInput struct { Name string - Plugin plugins.Plugin - Config *PluginConfig + Input inputs.Input + Config *InputConfig } // Filter containing drop/pass and tagdrop/tagpass rules @@ -110,11 +122,15 @@ type Filter struct { IsActive bool } -// PluginConfig containing a name, interval, and filter -type PluginConfig struct { - Name string - Filter Filter - Interval time.Duration +// InputConfig containing a name, interval, and filter +type InputConfig struct { + Name string + NameOverride string + MeasurementPrefix string + MeasurementSuffix string + Tags map[string]string + Filter Filter + Interval time.Duration } // OutputConfig containing name and filter @@ -142,12 +158,12 @@ func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point { // ShouldPass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters -func (f Filter) ShouldPass(measurement string) bool { +func (f Filter) ShouldPass(fieldkey string) bool { if f.Pass != nil { for _, pat := range f.Pass { // TODO remove HasPrefix check, leaving it for now for legacy support. // Cam, 2015-12-07 - if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) { + if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) { return true } } @@ -158,7 +174,7 @@ func (f Filter) ShouldPass(measurement string) bool { for _, pat := range f.Drop { // TODO remove HasPrefix check, leaving it for now for legacy support. // Cam, 2015-12-07 - if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) { + if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) { return false } } @@ -200,16 +216,16 @@ func (f Filter) ShouldTagsPass(tags map[string]string) bool { return true } -// Plugins returns a list of strings of the configured plugins. -func (c *Config) PluginNames() []string { +// Inputs returns a list of strings of the configured inputs. +func (c *Config) InputNames() []string { var name []string - for _, plugin := range c.Plugins { - name = append(name, plugin.Name) + for _, input := range c.Inputs { + name = append(name, input.Name) } return name } -// Outputs returns a list of strings of the configured plugins. +// Outputs returns a list of strings of the configured inputs. func (c *Config) OutputNames() []string { var name []string for _, output := range c.Outputs { @@ -235,7 +251,7 @@ func (c *Config) ListTags() string { var header = `# Telegraf configuration # Telegraf is entirely plugin driven. All metrics are gathered from the -# declared plugins. +# declared inputs. # Even if a plugin has no configuration, it must be declared in here # to be active. Declaring a plugin means just specifying the name @@ -259,11 +275,16 @@ var header = `# Telegraf configuration # Configuration for telegraf agent [agent] - # Default data collection interval for all plugins + # Default data collection interval for all inputs interval = "10s" # Rounds collection interval to 'interval' # ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true + # Collection jitter is used to jitter the collection by a random amount. + # Each plugin will sleep for a random time within jitter before collecting. + # This can be used to avoid many plugins querying things like sysfs at the + # same time, which can have a measurable effect on the system. + collection_jitter = "0s" # Default data flushing interval for all outputs. You should not set this below # interval. Maximum flush_interval will be flush_interval + flush_jitter @@ -275,6 +296,8 @@ var header = `# Telegraf configuration # Run telegraf in debug mode debug = false + # Run telegraf in quiet mode + quiet = false # Override default hostname, if empty use os.Hostname() hostname = "" @@ -283,22 +306,20 @@ var header = `# Telegraf configuration # OUTPUTS # ############################################################################### -[outputs] ` var pluginHeader = ` ############################################################################### -# PLUGINS # +# INPUTS # ############################################################################### -[plugins] ` -var servicePluginHeader = ` +var serviceInputHeader = ` ############################################################################### -# SERVICE PLUGINS # +# SERVICE INPUTS # ############################################################################### ` @@ -322,35 +343,35 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) { printConfig(oname, output, "outputs") } - // Filter plugins + // Filter inputs var pnames []string - for pname := range plugins.Plugins { + for pname := range inputs.Inputs { if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) { pnames = append(pnames, pname) } } sort.Strings(pnames) - // Print Plugins + // Print Inputs fmt.Printf(pluginHeader) - servPlugins := make(map[string]plugins.ServicePlugin) + servInputs := make(map[string]inputs.ServiceInput) for _, pname := range pnames { - creator := plugins.Plugins[pname] - plugin := creator() + creator := inputs.Inputs[pname] + input := creator() - switch p := plugin.(type) { - case plugins.ServicePlugin: - servPlugins[pname] = p + switch p := input.(type) { + case inputs.ServiceInput: + servInputs[pname] = p continue } - printConfig(pname, plugin, "plugins") + printConfig(pname, input, "inputs") } - // Print Service Plugins - fmt.Printf(servicePluginHeader) - for name, plugin := range servPlugins { - printConfig(name, plugin, "plugins") + // Print Service Inputs + fmt.Printf(serviceInputHeader) + for name, input := range servInputs { + printConfig(name, input, "inputs") } } @@ -378,12 +399,12 @@ func sliceContains(name string, list []string) bool { return false } -// PrintPluginConfig prints the config usage of a single plugin. -func PrintPluginConfig(name string) error { - if creator, ok := plugins.Plugins[name]; ok { - printConfig(name, creator(), "plugins") +// PrintInputConfig prints the config usage of a single input. +func PrintInputConfig(name string) error { + if creator, ok := inputs.Inputs[name]; ok { + printConfig(name, creator(), "inputs") } else { - return errors.New(fmt.Sprintf("Plugin %s not found", name)) + return errors.New(fmt.Sprintf("Input %s not found", name)) } return nil } @@ -449,33 +470,15 @@ func (c *Config) LoadConfig(path string) error { return err } case "outputs": - for outputName, outputVal := range subTable.Fields { - switch outputSubTable := outputVal.(type) { - case *ast.Table: - if err = c.addOutput(outputName, outputSubTable); err != nil { - return err - } - case []*ast.Table: - for _, t := range outputSubTable { - if err = c.addOutput(outputName, t); err != nil { - return err - } - } - default: - return fmt.Errorf("Unsupported config format: %s", - outputName) - } - } - case "plugins": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case *ast.Table: - if err = c.addPlugin(pluginName, pluginSubTable); err != nil { + if err = c.addOutput(pluginName, pluginSubTable); err != nil { return err } case []*ast.Table: for _, t := range pluginSubTable { - if err = c.addPlugin(pluginName, t); err != nil { + if err = c.addOutput(pluginName, t); err != nil { return err } } @@ -484,10 +487,28 @@ func (c *Config) LoadConfig(path string) error { pluginName) } } - // Assume it's a plugin for legacy config file support if no other + case "inputs", "plugins": + for pluginName, pluginVal := range subTable.Fields { + switch pluginSubTable := pluginVal.(type) { + case *ast.Table: + if err = c.addInput(pluginName, pluginSubTable); err != nil { + return err + } + case []*ast.Table: + for _, t := range pluginSubTable { + if err = c.addInput(pluginName, t); err != nil { + return err + } + } + default: + return fmt.Errorf("Unsupported config format: %s", + pluginName) + } + } + // Assume it's an input input for legacy config file support if no other // identifiers are present default: - if err = c.addPlugin(name, subTable); err != nil { + if err = c.addInput(name, subTable); err != nil { return err } } @@ -523,36 +544,41 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return nil } -func (c *Config) addPlugin(name string, table *ast.Table) error { - if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) { +func (c *Config) addInput(name string, table *ast.Table) error { + if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) { return nil } - creator, ok := plugins.Plugins[name] - if !ok { - return fmt.Errorf("Undefined but requested plugin: %s", name) + // Legacy support renaming io input to diskio + if name == "io" { + name = "diskio" } - plugin := creator() - pluginConfig, err := buildPlugin(name, table) + creator, ok := inputs.Inputs[name] + if !ok { + return fmt.Errorf("Undefined but requested input: %s", name) + } + input := creator() + + pluginConfig, err := buildInput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, plugin); err != nil { + if err := toml.UnmarshalTable(table, input); err != nil { return err } - rp := &RunningPlugin{ + rp := &RunningInput{ Name: name, - Plugin: plugin, + Input: input, Config: pluginConfig, } - c.Plugins = append(c.Plugins, rp) + c.Inputs = append(c.Inputs, rp) return nil } // buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to -// be inserted into the OutputConfig/PluginConfig to be used for prefix +// be inserted into the OutputConfig/InputConfig to be used for prefix // filtering on tags and measurements func buildFilter(tbl *ast.Table) Filter { f := Filter{} @@ -628,10 +654,11 @@ func buildFilter(tbl *ast.Table) Filter { return f } -// buildPlugin parses plugin specific items from the ast.Table, builds the filter and returns a -// PluginConfig to be inserted into RunningPlugin -func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) { - cp := &PluginConfig{Name: name} +// buildInput parses input specific items from the ast.Table, +// builds the filter and returns a +// InputConfig to be inserted into RunningInput +func buildInput(name string, tbl *ast.Table) (*InputConfig, error) { + cp := &InputConfig{Name: name} if node, ok := tbl.Fields["interval"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -644,14 +671,51 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) { } } } + + if node, ok := tbl.Fields["name_prefix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.MeasurementPrefix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_suffix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.MeasurementSuffix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_override"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.NameOverride = str.Value + } + } + } + + cp.Tags = make(map[string]string) + if node, ok := tbl.Fields["tags"]; ok { + if subtbl, ok := node.(*ast.Table); ok { + if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil { + log.Printf("Could not parse tags for input %s\n", name) + } + } + } + + delete(tbl.Fields, "name_prefix") + delete(tbl.Fields, "name_suffix") + delete(tbl.Fields, "name_override") delete(tbl.Fields, "interval") + delete(tbl.Fields, "tags") cp.Filter = buildFilter(tbl) return cp, nil - } // buildOutput parses output specific items from the ast.Table, builds the filter and returns an -// OutputConfig to be inserted into RunningPlugin +// OutputConfig to be inserted into RunningInput // Note: error exists in the return for future calls that might require error func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) { oc := &OutputConfig{ @@ -659,5 +723,4 @@ func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) { Filter: buildFilter(tbl), } return oc, nil - } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index f6b929976..40af30c1e 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -4,21 +4,21 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/plugins" - "github.com/influxdb/telegraf/plugins/exec" - "github.com/influxdb/telegraf/plugins/memcached" - "github.com/influxdb/telegraf/plugins/procstat" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/exec" + "github.com/influxdata/telegraf/plugins/inputs/memcached" + "github.com/influxdata/telegraf/plugins/inputs/procstat" "github.com/stretchr/testify/assert" ) -func TestConfig_LoadSinglePlugin(t *testing.T) { +func TestConfig_LoadSingleInput(t *testing.T) { c := NewConfig() c.LoadConfig("./testdata/single_plugin.toml") - memcached := plugins.Plugins["memcached"]().(*memcached.Memcached) + memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached.Servers = []string{"localhost"} - mConfig := &PluginConfig{ + mConfig := &InputConfig{ Name: "memcached", Filter: Filter{ Drop: []string{"other", "stuff"}, @@ -39,10 +39,11 @@ func TestConfig_LoadSinglePlugin(t *testing.T) { }, Interval: 5 * time.Second, } + mConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Plugins[0].Plugin, + assert.Equal(t, memcached, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Plugins[0].Config, + assert.Equal(t, mConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") } @@ -57,10 +58,10 @@ func TestConfig_LoadDirectory(t *testing.T) { t.Error(err) } - memcached := plugins.Plugins["memcached"]().(*memcached.Memcached) + memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached.Servers = []string{"localhost"} - mConfig := &PluginConfig{ + mConfig := &InputConfig{ Name: "memcached", Filter: Filter{ Drop: []string{"other", "stuff"}, @@ -81,45 +82,40 @@ func TestConfig_LoadDirectory(t *testing.T) { }, Interval: 5 * time.Second, } - assert.Equal(t, memcached, c.Plugins[0].Plugin, + mConfig.Tags = make(map[string]string) + + assert.Equal(t, memcached, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Plugins[0].Config, + assert.Equal(t, mConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") - ex := plugins.Plugins["exec"]().(*exec.Exec) - ex.Commands = []*exec.Command{ - &exec.Command{ - Command: "/usr/bin/myothercollector --foo=bar", - Name: "myothercollector", - }, + ex := inputs.Inputs["exec"]().(*exec.Exec) + ex.Command = "/usr/bin/myothercollector --foo=bar" + eConfig := &InputConfig{ + Name: "exec", + MeasurementSuffix: "_myothercollector", } - eConfig := &PluginConfig{Name: "exec"} - assert.Equal(t, ex, c.Plugins[1].Plugin, + eConfig.Tags = make(map[string]string) + assert.Equal(t, ex, c.Inputs[1].Input, "Merged Testdata did not produce a correct exec struct.") - assert.Equal(t, eConfig, c.Plugins[1].Config, + assert.Equal(t, eConfig, c.Inputs[1].Config, "Merged Testdata did not produce correct exec metadata.") memcached.Servers = []string{"192.168.1.1"} - assert.Equal(t, memcached, c.Plugins[2].Plugin, + assert.Equal(t, memcached, c.Inputs[2].Input, "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Plugins[2].Config, + assert.Equal(t, mConfig, c.Inputs[2].Config, "Testdata did not produce correct memcached metadata.") - pstat := plugins.Plugins["procstat"]().(*procstat.Procstat) - pstat.Specifications = []*procstat.Specification{ - &procstat.Specification{ - PidFile: "/var/run/grafana-server.pid", - }, - &procstat.Specification{ - PidFile: "/var/run/influxdb/influxd.pid", - }, - } + pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) + pstat.PidFile = "/var/run/grafana-server.pid" - pConfig := &PluginConfig{Name: "procstat"} + pConfig := &InputConfig{Name: "procstat"} + pConfig.Tags = make(map[string]string) - assert.Equal(t, pstat, c.Plugins[3].Plugin, + assert.Equal(t, pstat, c.Inputs[3].Input, "Merged Testdata did not produce a correct procstat struct.") - assert.Equal(t, pConfig, c.Plugins[3].Config, + assert.Equal(t, pConfig, c.Inputs[3].Config, "Merged Testdata did not produce correct procstat metadata.") } diff --git a/internal/config/testdata/single_plugin.toml b/internal/config/testdata/single_plugin.toml index e591984f1..6670f6b2f 100644 --- a/internal/config/testdata/single_plugin.toml +++ b/internal/config/testdata/single_plugin.toml @@ -1,9 +1,9 @@ -[[plugins.memcached]] +[[inputs.memcached]] servers = ["localhost"] pass = ["some", "strings"] drop = ["other", "stuff"] interval = "5s" - [plugins.memcached.tagpass] + [inputs.memcached.tagpass] goodtag = ["mytag"] - [plugins.memcached.tagdrop] + [inputs.memcached.tagdrop] badtag = ["othertag"] diff --git a/internal/config/testdata/subconfig/exec.conf b/internal/config/testdata/subconfig/exec.conf index 552441031..d621e78e0 100644 --- a/internal/config/testdata/subconfig/exec.conf +++ b/internal/config/testdata/subconfig/exec.conf @@ -1,8 +1,4 @@ -[[plugins.exec]] - # specify commands via an array of tables - [[plugins.exec.commands]] +[[inputs.exec]] # the command to run command = "/usr/bin/myothercollector --foo=bar" - - # name of the command (used as a prefix for measurements) - name = "myothercollector" + name_suffix = "_myothercollector" diff --git a/internal/config/testdata/subconfig/memcached.conf b/internal/config/testdata/subconfig/memcached.conf index 8d67886c1..4c43febc7 100644 --- a/internal/config/testdata/subconfig/memcached.conf +++ b/internal/config/testdata/subconfig/memcached.conf @@ -1,9 +1,9 @@ -[[plugins.memcached]] +[[inputs.memcached]] servers = ["192.168.1.1"] pass = ["some", "strings"] drop = ["other", "stuff"] interval = "5s" - [plugins.memcached.tagpass] + [inputs.memcached.tagpass] goodtag = ["mytag"] - [plugins.memcached.tagdrop] + [inputs.memcached.tagdrop] badtag = ["othertag"] diff --git a/internal/config/testdata/subconfig/procstat.conf b/internal/config/testdata/subconfig/procstat.conf index 33f288d84..82708667f 100644 --- a/internal/config/testdata/subconfig/procstat.conf +++ b/internal/config/testdata/subconfig/procstat.conf @@ -1,5 +1,2 @@ -[[plugins.procstat]] - [[plugins.procstat.specifications]] +[[inputs.procstat]] pid_file = "/var/run/grafana-server.pid" - [[plugins.procstat.specifications]] - pid_file = "/var/run/influxdb/influxd.pid" diff --git a/internal/config/testdata/telegraf-agent.toml b/internal/config/testdata/telegraf-agent.toml index e63e47b56..5ede47016 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/internal/config/testdata/telegraf-agent.toml @@ -1,7 +1,7 @@ # Telegraf configuration # Telegraf is entirely plugin driven. All metrics are gathered from the -# declared plugins. +# declared inputs. # Even if a plugin has no configuration, it must be declared in here # to be active. Declaring a plugin means just specifying the name @@ -21,20 +21,13 @@ # Tags can also be specified via a normal map, but only one form at a time: [tags] - # dc = "us-east-1" + dc = "us-east-1" # Configuration for telegraf agent [agent] # Default data collection interval for all plugins interval = "10s" - # If utc = false, uses local time (utc is highly recommended) - utc = true - - # Precision of writes, valid values are n, u, ms, s, m, and h - # note: using second precision greatly helps InfluxDB compression - precision = "s" - # run telegraf in debug mode debug = false @@ -46,8 +39,6 @@ # OUTPUTS # ############################################################################### -[outputs] - # Configuration for influxdb server to send metrics to [[outputs.influxdb]] # The full HTTP endpoint URL for your InfluxDB instance @@ -58,17 +49,6 @@ # The target database for metrics. This database must already exist database = "telegraf" # required. - # Connection timeout (for the connection with InfluxDB), formatted as a string. - # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # If not provided, will default to 0 (no timeout) - # timeout = "5s" - - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - # Set the user agent for the POSTs (can be useful for log differentiation) - # user_agent = "telegraf" - [[outputs.influxdb]] urls = ["udp://localhost:8089"] database = "udp-telegraf" @@ -88,15 +68,13 @@ # PLUGINS # ############################################################################### -[plugins] - # Read Apache status information (mod_status) -[[plugins.apache]] -# An array of Apache status URI to gather stats. -urls = ["http://localhost/server-status?auto"] +[[inputs.apache]] + # An array of Apache status URI to gather stats. + urls = ["http://localhost/server-status?auto"] # Read metrics about cpu usage -[[plugins.cpu]] +[[inputs.cpu]] # Whether to report per-cpu stats or not percpu = true # Whether to report total system cpu stats or not @@ -105,11 +83,11 @@ urls = ["http://localhost/server-status?auto"] drop = ["cpu_time"] # Read metrics about disk usage by mount point -[[plugins.disk]] +[[inputs.diskio]] # no configuration # Read metrics from one or many disque servers -[[plugins.disque]] +[[inputs.disque]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, # 10.0.0.1:10000, etc. @@ -118,7 +96,7 @@ urls = ["http://localhost/server-status?auto"] servers = ["localhost"] # Read stats from one or more Elasticsearch servers or clusters -[[plugins.elasticsearch]] +[[inputs.elasticsearch]] # specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] @@ -127,17 +105,13 @@ urls = ["http://localhost/server-status?auto"] local = true # Read flattened metrics from one or more commands that output JSON to stdout -[[plugins.exec]] - # specify commands via an array of tables - [[exec.commands]] +[[inputs.exec]] # the command to run command = "/usr/bin/mycollector --foo=bar" - - # name of the command (used as a prefix for measurements) - name = "mycollector" + name_suffix = "_mycollector" # Read metrics of haproxy, via socket or csv stats page -[[plugins.haproxy]] +[[inputs.haproxy]] # An array of address to gather stats about. Specify an ip on hostname # with optional port. ie localhost, 10.10.3.33:1936, etc. # @@ -147,33 +121,30 @@ urls = ["http://localhost/server-status?auto"] # servers = ["socket:/run/haproxy/admin.sock"] # Read flattened metrics from one or more JSON HTTP endpoints -[[plugins.httpjson]] - # Specify services via an array of tables - [[httpjson.services]] +[[inputs.httpjson]] + # a name for the service being polled + name = "webserver_stats" - # a name for the service being polled - name = "webserver_stats" + # URL of each server in the service's cluster + servers = [ + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", + ] - # URL of each server in the service's cluster - servers = [ - "http://localhost:9999/stats/", - "http://localhost:9998/stats/", - ] + # HTTP method to use (case-sensitive) + method = "GET" - # HTTP method to use (case-sensitive) - method = "GET" - - # HTTP parameters (all values must be strings) - [httpjson.services.parameters] - event_type = "cpu_spike" - threshold = "0.75" + # HTTP parameters (all values must be strings) + [httpjson.parameters] + event_type = "cpu_spike" + threshold = "0.75" # Read metrics about disk IO by device -[[plugins.io]] +[[inputs.diskio]] # no configuration # read metrics from a Kafka topic -[[plugins.kafka_consumer]] +[[inputs.kafka_consumer]] # topic(s) to consume topics = ["telegraf"] # an array of Zookeeper connection strings @@ -186,7 +157,7 @@ urls = ["http://localhost/server-status?auto"] offset = "oldest" # Read metrics from a LeoFS Server via SNMP -[[plugins.leofs]] +[[inputs.leofs]] # An array of URI to gather stats about LeoFS. # Specify an ip or hostname with port. ie 127.0.0.1:4020 # @@ -194,7 +165,7 @@ urls = ["http://localhost/server-status?auto"] servers = ["127.0.0.1:4021"] # Read metrics from local Lustre service on OST, MDS -[[plugins.lustre2]] +[[inputs.lustre2]] # An array of /proc globs to search for Lustre stats # If not specified, the default will work on Lustre 2.5.x # @@ -202,11 +173,11 @@ urls = ["http://localhost/server-status?auto"] # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] # Read metrics about memory usage -[[plugins.mem]] +[[inputs.mem]] # no configuration # Read metrics from one or many memcached servers -[[plugins.memcached]] +[[inputs.memcached]] # An array of address to gather stats about. Specify an ip on hostname # with optional port. ie localhost, 10.0.0.1:11211, etc. # @@ -214,7 +185,7 @@ urls = ["http://localhost/server-status?auto"] servers = ["localhost"] # Read metrics from one or many MongoDB servers -[[plugins.mongodb]] +[[inputs.mongodb]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. @@ -223,7 +194,7 @@ urls = ["http://localhost/server-status?auto"] servers = ["127.0.0.1:27017"] # Read metrics from one or many mysql servers -[[plugins.mysql]] +[[inputs.mysql]] # specify servers via a url matching: # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # e.g. @@ -234,7 +205,7 @@ urls = ["http://localhost/server-status?auto"] servers = ["localhost"] # Read metrics about network interface usage -[[plugins.net]] +[[inputs.net]] # By default, telegraf gathers stats from any up interface (excluding loopback) # Setting interfaces will tell it to gather these explicit interfaces, # regardless of status. @@ -242,12 +213,12 @@ urls = ["http://localhost/server-status?auto"] # interfaces = ["eth0", ... ] # Read Nginx's basic status information (ngx_http_stub_status_module) -[[plugins.nginx]] +[[inputs.nginx]] # An array of Nginx stub_status URI to gather stats. urls = ["http://localhost/status"] # Ping given url(s) and return statistics -[[plugins.ping]] +[[inputs.ping]] # urls to ping urls = ["www.google.com"] # required # number of pings to send (ping -c ) @@ -260,10 +231,7 @@ urls = ["http://localhost/server-status?auto"] interface = "" # Read metrics from one or many postgresql servers -[[plugins.postgresql]] - # specify servers via an array of tables - [[postgresql.servers]] - +[[inputs.postgresql]] # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: @@ -290,14 +258,13 @@ urls = ["http://localhost/server-status?auto"] # address = "influx@remoteserver" # Read metrics from one or many prometheus clients -[[plugins.prometheus]] +[[inputs.prometheus]] # An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] # Read metrics from one or many RabbitMQ servers via the management API -[[plugins.rabbitmq]] +[[inputs.rabbitmq]] # Specify servers via an array of tables - [[rabbitmq.servers]] # name = "rmq-server-1" # optional tag # url = "http://localhost:15672" # username = "guest" @@ -308,7 +275,7 @@ urls = ["http://localhost/server-status?auto"] # nodes = ["rabbit@node1", "rabbit@node2"] # Read metrics from one or many redis servers -[[plugins.redis]] +[[inputs.redis]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, # 10.0.0.1:10000, etc. @@ -317,7 +284,7 @@ urls = ["http://localhost/server-status?auto"] servers = ["localhost"] # Read metrics from one or many RethinkDB servers -[[plugins.rethinkdb]] +[[inputs.rethinkdb]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, # rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. @@ -326,9 +293,9 @@ urls = ["http://localhost/server-status?auto"] servers = ["127.0.0.1:28015"] # Read metrics about swap memory usage -[[plugins.swap]] +[[inputs.swap]] # no configuration # Read metrics about system load & uptime -[[plugins.system]] +[[inputs.system]] # no configuration diff --git a/internal/internal.go b/internal/internal.go index 45164682b..8b0b33a41 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -3,7 +3,9 @@ package internal import ( "bufio" "errors" + "fmt" "os" + "strconv" "strings" "time" ) @@ -27,6 +29,47 @@ func (d *Duration) UnmarshalTOML(b []byte) error { var NotImplementedError = errors.New("not implemented yet") +type JSONFlattener struct { + Fields map[string]interface{} +} + +// FlattenJSON flattens nested maps/interfaces into a fields map +func (f *JSONFlattener) FlattenJSON( + fieldname string, + v interface{}, +) error { + if f.Fields == nil { + f.Fields = make(map[string]interface{}) + } + fieldname = strings.Trim(fieldname, "_") + switch t := v.(type) { + case map[string]interface{}: + for k, v := range t { + err := f.FlattenJSON(fieldname+"_"+k+"_", v) + if err != nil { + return err + } + } + case []interface{}: + for i, v := range t { + k := strconv.Itoa(i) + err := f.FlattenJSON(fieldname+"_"+k+"_", v) + if err != nil { + return nil + } + } + case float64: + f.Fields[fieldname] = t + case bool, string, nil: + // ignored types + return nil + default: + return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)", + t, t, fieldname) + } + return nil +} + // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { diff --git a/outputs/all/all.go b/outputs/all/all.go deleted file mode 100644 index 08ebf2549..000000000 --- a/outputs/all/all.go +++ /dev/null @@ -1,16 +0,0 @@ -package all - -import ( - _ "github.com/influxdb/telegraf/outputs/amon" - _ "github.com/influxdb/telegraf/outputs/amqp" - _ "github.com/influxdb/telegraf/outputs/datadog" - _ "github.com/influxdb/telegraf/outputs/influxdb" - _ "github.com/influxdb/telegraf/outputs/kafka" - _ "github.com/influxdb/telegraf/outputs/kinesis" - _ "github.com/influxdb/telegraf/outputs/librato" - _ "github.com/influxdb/telegraf/outputs/mqtt" - _ "github.com/influxdb/telegraf/outputs/nsq" - _ "github.com/influxdb/telegraf/outputs/opentsdb" - _ "github.com/influxdb/telegraf/outputs/prometheus_client" - _ "github.com/influxdb/telegraf/outputs/riemann" -) diff --git a/outputs/kafka/kafka.go b/outputs/kafka/kafka.go deleted file mode 100644 index fae955210..000000000 --- a/outputs/kafka/kafka.go +++ /dev/null @@ -1,85 +0,0 @@ -package kafka - -import ( - "errors" - "fmt" - - "github.com/Shopify/sarama" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" -) - -type Kafka struct { - // Kafka brokers to send metrics to - Brokers []string - // Kafka topic - Topic string - // Routing Key Tag - RoutingTag string `toml:"routing_tag"` - - producer sarama.SyncProducer -} - -var sampleConfig = ` - # URLs of kafka brokers - brokers = ["localhost:9092"] - # Kafka topic for producer messages - topic = "telegraf" - # Telegraf tag to use as a routing key - # ie, if this tag exists, it's value will be used as the routing key - routing_tag = "host" -` - -func (k *Kafka) Connect() error { - producer, err := sarama.NewSyncProducer(k.Brokers, nil) - if err != nil { - return err - } - k.producer = producer - return nil -} - -func (k *Kafka) Close() error { - return k.producer.Close() -} - -func (k *Kafka) SampleConfig() string { - return sampleConfig -} - -func (k *Kafka) Description() string { - return "Configuration for the Kafka server to send metrics to" -} - -func (k *Kafka) Write(points []*client.Point) error { - if len(points) == 0 { - return nil - } - - for _, p := range points { - // Combine tags from Point and BatchPoints and grab the resulting - // line-protocol output string to write to Kafka - value := p.String() - - m := &sarama.ProducerMessage{ - Topic: k.Topic, - Value: sarama.StringEncoder(value), - } - if h, ok := p.Tags()[k.RoutingTag]; ok { - m.Key = sarama.StringEncoder(h) - } - - _, _, err := k.producer.SendMessage(m) - if err != nil { - return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n", - err)) - } - } - return nil -} - -func init() { - outputs.Add("kafka", func() outputs.Output { - return &Kafka{} - }) -} diff --git a/plugins/all/all.go b/plugins/all/all.go deleted file mode 100644 index 8b4e754be..000000000 --- a/plugins/all/all.go +++ /dev/null @@ -1,37 +0,0 @@ -package all - -import ( - _ "github.com/influxdb/telegraf/plugins/aerospike" - _ "github.com/influxdb/telegraf/plugins/apache" - _ "github.com/influxdb/telegraf/plugins/bcache" - _ "github.com/influxdb/telegraf/plugins/disque" - _ "github.com/influxdb/telegraf/plugins/elasticsearch" - _ "github.com/influxdb/telegraf/plugins/exec" - _ "github.com/influxdb/telegraf/plugins/haproxy" - _ "github.com/influxdb/telegraf/plugins/httpjson" - _ "github.com/influxdb/telegraf/plugins/influxdb" - _ "github.com/influxdb/telegraf/plugins/jolokia" - _ "github.com/influxdb/telegraf/plugins/kafka_consumer" - _ "github.com/influxdb/telegraf/plugins/leofs" - _ "github.com/influxdb/telegraf/plugins/lustre2" - _ "github.com/influxdb/telegraf/plugins/mailchimp" - _ "github.com/influxdb/telegraf/plugins/memcached" - _ "github.com/influxdb/telegraf/plugins/mongodb" - _ "github.com/influxdb/telegraf/plugins/mysql" - _ "github.com/influxdb/telegraf/plugins/nginx" - _ "github.com/influxdb/telegraf/plugins/phpfpm" - _ "github.com/influxdb/telegraf/plugins/ping" - _ "github.com/influxdb/telegraf/plugins/postgresql" - _ "github.com/influxdb/telegraf/plugins/procstat" - _ "github.com/influxdb/telegraf/plugins/prometheus" - _ "github.com/influxdb/telegraf/plugins/puppetagent" - _ "github.com/influxdb/telegraf/plugins/rabbitmq" - _ "github.com/influxdb/telegraf/plugins/redis" - _ "github.com/influxdb/telegraf/plugins/rethinkdb" - _ "github.com/influxdb/telegraf/plugins/statsd" - _ "github.com/influxdb/telegraf/plugins/system" - _ "github.com/influxdb/telegraf/plugins/trig" - _ "github.com/influxdb/telegraf/plugins/twemproxy" - _ "github.com/influxdb/telegraf/plugins/zfs" - _ "github.com/influxdb/telegraf/plugins/zookeeper" -) diff --git a/plugins/elasticsearch/testdata_test.go b/plugins/elasticsearch/testdata_test.go deleted file mode 100644 index 7fc777553..000000000 --- a/plugins/elasticsearch/testdata_test.go +++ /dev/null @@ -1,759 +0,0 @@ -package elasticsearch - -const clusterResponse = ` -{ - "cluster_name": "elasticsearch_telegraf", - "status": "green", - "timed_out": false, - "number_of_nodes": 3, - "number_of_data_nodes": 3, - "active_primary_shards": 5, - "active_shards": 15, - "relocating_shards": 0, - "initializing_shards": 0, - "unassigned_shards": 0, - "indices": { - "v1": { - "status": "green", - "number_of_shards": 10, - "number_of_replicas": 1, - "active_primary_shards": 10, - "active_shards": 20, - "relocating_shards": 0, - "initializing_shards": 0, - "unassigned_shards": 0 - }, - "v2": { - "status": "red", - "number_of_shards": 10, - "number_of_replicas": 1, - "active_primary_shards": 0, - "active_shards": 0, - "relocating_shards": 0, - "initializing_shards": 0, - "unassigned_shards": 20 - } - } -} -` - -var clusterHealthExpected = map[string]interface{}{ - "status": "green", - "timed_out": false, - "number_of_nodes": 3, - "number_of_data_nodes": 3, - "active_primary_shards": 5, - "active_shards": 15, - "relocating_shards": 0, - "initializing_shards": 0, - "unassigned_shards": 0, -} - -var v1IndexExpected = map[string]interface{}{ - "status": "green", - "number_of_shards": 10, - "number_of_replicas": 1, - "active_primary_shards": 10, - "active_shards": 20, - "relocating_shards": 0, - "initializing_shards": 0, - "unassigned_shards": 0, -} - -var v2IndexExpected = map[string]interface{}{ - "status": "red", - "number_of_shards": 10, - "number_of_replicas": 1, - "active_primary_shards": 0, - "active_shards": 0, - "relocating_shards": 0, - "initializing_shards": 0, - "unassigned_shards": 20, -} - -const statsResponse = ` -{ - "cluster_name": "es-testcluster", - "nodes": { - "SDFsfSDFsdfFSDSDfSFDSDF": { - "timestamp": 1436365550135, - "name": "test.host.com", - "transport_address": "inet[/127.0.0.1:9300]", - "host": "test", - "ip": [ - "inet[/127.0.0.1:9300]", - "NONE" - ], - "attributes": { - "master": "true" - }, - "indices": { - "docs": { - "count": 29652, - "deleted": 5229 - }, - "store": { - "size_in_bytes": 37715234, - "throttle_time_in_millis": 215 - }, - "indexing": { - "index_total": 84790, - "index_time_in_millis": 29680, - "index_current": 0, - "delete_total": 13879, - "delete_time_in_millis": 1139, - "delete_current": 0, - "noop_update_total": 0, - "is_throttled": false, - "throttle_time_in_millis": 0 - }, - "get": { - "total": 1, - "time_in_millis": 2, - "exists_total": 0, - "exists_time_in_millis": 0, - "missing_total": 1, - "missing_time_in_millis": 2, - "current": 0 - }, - "search": { - "open_contexts": 0, - "query_total": 1452, - "query_time_in_millis": 5695, - "query_current": 0, - "fetch_total": 414, - "fetch_time_in_millis": 146, - "fetch_current": 0 - }, - "merges": { - "current": 0, - "current_docs": 0, - "current_size_in_bytes": 0, - "total": 133, - "total_time_in_millis": 21060, - "total_docs": 203672, - "total_size_in_bytes": 142900226 - }, - "refresh": { - "total": 1076, - "total_time_in_millis": 20078 - }, - "flush": { - "total": 115, - "total_time_in_millis": 2401 - }, - "warmer": { - "current": 0, - "total": 2319, - "total_time_in_millis": 448 - }, - "filter_cache": { - "memory_size_in_bytes": 7384, - "evictions": 0 - }, - "id_cache": { - "memory_size_in_bytes": 0 - }, - "fielddata": { - "memory_size_in_bytes": 12996, - "evictions": 0 - }, - "percolate": { - "total": 0, - "time_in_millis": 0, - "current": 0, - "memory_size_in_bytes": -1, - "memory_size": "-1b", - "queries": 0 - }, - "completion": { - "size_in_bytes": 0 - }, - "segments": { - "count": 134, - "memory_in_bytes": 1285212, - "index_writer_memory_in_bytes": 0, - "index_writer_max_memory_in_bytes": 172368955, - "version_map_memory_in_bytes": 611844, - "fixed_bit_set_memory_in_bytes": 0 - }, - "translog": { - "operations": 17702, - "size_in_bytes": 17 - }, - "suggest": { - "total": 0, - "time_in_millis": 0, - "current": 0 - }, - "query_cache": { - "memory_size_in_bytes": 0, - "evictions": 0, - "hit_count": 0, - "miss_count": 0 - }, - "recovery": { - "current_as_source": 0, - "current_as_target": 0, - "throttle_time_in_millis": 0 - } - }, - "os": { - "timestamp": 1436460392944, - "load_average": [ - 0.01, - 0.04, - 0.05 - ], - "mem": { - "free_in_bytes": 477761536, - "used_in_bytes": 1621868544, - "free_percent": 74, - "used_percent": 25, - "actual_free_in_bytes": 1565470720, - "actual_used_in_bytes": 534159360 - }, - "swap": { - "used_in_bytes": 0, - "free_in_bytes": 487997440 - } - }, - "process": { - "timestamp": 1436460392945, - "open_file_descriptors": 160, - "cpu": { - "percent": 2, - "sys_in_millis": 1870, - "user_in_millis": 13610, - "total_in_millis": 15480 - }, - "mem": { - "total_virtual_in_bytes": 4747890688 - } - }, - "jvm": { - "timestamp": 1436460392945, - "uptime_in_millis": 202245, - "mem": { - "heap_used_in_bytes": 52709568, - "heap_used_percent": 5, - "heap_committed_in_bytes": 259522560, - "heap_max_in_bytes": 1038876672, - "non_heap_used_in_bytes": 39634576, - "non_heap_committed_in_bytes": 40841216, - "pools": { - "young": { - "used_in_bytes": 32685760, - "max_in_bytes": 279183360, - "peak_used_in_bytes": 71630848, - "peak_max_in_bytes": 279183360 - }, - "survivor": { - "used_in_bytes": 8912880, - "max_in_bytes": 34865152, - "peak_used_in_bytes": 8912888, - "peak_max_in_bytes": 34865152 - }, - "old": { - "used_in_bytes": 11110928, - "max_in_bytes": 724828160, - "peak_used_in_bytes": 14354608, - "peak_max_in_bytes": 724828160 - } - } - }, - "threads": { - "count": 44, - "peak_count": 45 - }, - "gc": { - "collectors": { - "young": { - "collection_count": 2, - "collection_time_in_millis": 98 - }, - "old": { - "collection_count": 1, - "collection_time_in_millis": 24 - } - } - }, - "buffer_pools": { - "direct": { - "count": 40, - "used_in_bytes": 6304239, - "total_capacity_in_bytes": 6304239 - }, - "mapped": { - "count": 0, - "used_in_bytes": 0, - "total_capacity_in_bytes": 0 - } - } - }, - "thread_pool": { - "percolate": { - "threads": 123, - "queue": 23, - "active": 13, - "rejected": 235, - "largest": 23, - "completed": 33 - }, - "fetch_shard_started": { - "threads": 3, - "queue": 1, - "active": 5, - "rejected": 6, - "largest": 4, - "completed": 54 - }, - "listener": { - "threads": 1, - "queue": 2, - "active": 4, - "rejected": 8, - "largest": 1, - "completed": 1 - }, - "index": { - "threads": 6, - "queue": 8, - "active": 4, - "rejected": 2, - "largest": 3, - "completed": 6 - }, - "refresh": { - "threads": 23, - "queue": 7, - "active": 3, - "rejected": 4, - "largest": 8, - "completed": 3 - }, - "suggest": { - "threads": 2, - "queue": 7, - "active": 2, - "rejected": 1, - "largest": 8, - "completed": 3 - }, - "generic": { - "threads": 1, - "queue": 4, - "active": 6, - "rejected": 3, - "largest": 2, - "completed": 27 - }, - "warmer": { - "threads": 2, - "queue": 7, - "active": 3, - "rejected": 2, - "largest": 3, - "completed": 1 - }, - "search": { - "threads": 5, - "queue": 7, - "active": 2, - "rejected": 7, - "largest": 2, - "completed": 4 - }, - "flush": { - "threads": 3, - "queue": 8, - "active": 0, - "rejected": 1, - "largest": 5, - "completed": 3 - }, - "optimize": { - "threads": 3, - "queue": 4, - "active": 1, - "rejected": 2, - "largest": 7, - "completed": 3 - }, - "fetch_shard_store": { - "threads": 1, - "queue": 7, - "active": 4, - "rejected": 2, - "largest": 4, - "completed": 1 - }, - "management": { - "threads": 2, - "queue": 3, - "active": 1, - "rejected": 6, - "largest": 2, - "completed": 22 - }, - "get": { - "threads": 1, - "queue": 8, - "active": 4, - "rejected": 3, - "largest": 2, - "completed": 1 - }, - "merge": { - "threads": 6, - "queue": 4, - "active": 5, - "rejected": 2, - "largest": 5, - "completed": 1 - }, - "bulk": { - "threads": 4, - "queue": 5, - "active": 7, - "rejected": 3, - "largest": 1, - "completed": 4 - }, - "snapshot": { - "threads": 8, - "queue": 5, - "active": 6, - "rejected": 2, - "largest": 1, - "completed": 0 - } - }, - "fs": { - "timestamp": 1436460392946, - "total": { - "total_in_bytes": 19507089408, - "free_in_bytes": 16909316096, - "available_in_bytes": 15894814720 - }, - "data": [ - { - "path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0", - "mount": "/usr/share/elasticsearch/data", - "type": "ext4", - "total_in_bytes": 19507089408, - "free_in_bytes": 16909316096, - "available_in_bytes": 15894814720 - } - ] - }, - "transport": { - "server_open": 13, - "rx_count": 6, - "rx_size_in_bytes": 1380, - "tx_count": 6, - "tx_size_in_bytes": 1380 - }, - "http": { - "current_open": 3, - "total_opened": 3 - }, - "breakers": { - "fielddata": { - "limit_size_in_bytes": 623326003, - "limit_size": "594.4mb", - "estimated_size_in_bytes": 0, - "estimated_size": "0b", - "overhead": 1.03, - "tripped": 0 - }, - "request": { - "limit_size_in_bytes": 415550668, - "limit_size": "396.2mb", - "estimated_size_in_bytes": 0, - "estimated_size": "0b", - "overhead": 1.0, - "tripped": 0 - }, - "parent": { - "limit_size_in_bytes": 727213670, - "limit_size": "693.5mb", - "estimated_size_in_bytes": 0, - "estimated_size": "0b", - "overhead": 1.0, - "tripped": 0 - } - } - } - } -} -` - -var indicesExpected = map[string]float64{ - "indices_id_cache_memory_size_in_bytes": 0, - "indices_completion_size_in_bytes": 0, - "indices_suggest_total": 0, - "indices_suggest_time_in_millis": 0, - "indices_suggest_current": 0, - "indices_query_cache_memory_size_in_bytes": 0, - "indices_query_cache_evictions": 0, - "indices_query_cache_hit_count": 0, - "indices_query_cache_miss_count": 0, - "indices_store_size_in_bytes": 37715234, - "indices_store_throttle_time_in_millis": 215, - "indices_merges_current_docs": 0, - "indices_merges_current_size_in_bytes": 0, - "indices_merges_total": 133, - "indices_merges_total_time_in_millis": 21060, - "indices_merges_total_docs": 203672, - "indices_merges_total_size_in_bytes": 142900226, - "indices_merges_current": 0, - "indices_filter_cache_memory_size_in_bytes": 7384, - "indices_filter_cache_evictions": 0, - "indices_indexing_index_total": 84790, - "indices_indexing_index_time_in_millis": 29680, - "indices_indexing_index_current": 0, - "indices_indexing_noop_update_total": 0, - "indices_indexing_throttle_time_in_millis": 0, - "indices_indexing_delete_total": 13879, - "indices_indexing_delete_time_in_millis": 1139, - "indices_indexing_delete_current": 0, - "indices_get_exists_time_in_millis": 0, - "indices_get_missing_total": 1, - "indices_get_missing_time_in_millis": 2, - "indices_get_current": 0, - "indices_get_total": 1, - "indices_get_time_in_millis": 2, - "indices_get_exists_total": 0, - "indices_refresh_total": 1076, - "indices_refresh_total_time_in_millis": 20078, - "indices_percolate_current": 0, - "indices_percolate_memory_size_in_bytes": -1, - "indices_percolate_queries": 0, - "indices_percolate_total": 0, - "indices_percolate_time_in_millis": 0, - "indices_translog_operations": 17702, - "indices_translog_size_in_bytes": 17, - "indices_recovery_current_as_source": 0, - "indices_recovery_current_as_target": 0, - "indices_recovery_throttle_time_in_millis": 0, - "indices_docs_count": 29652, - "indices_docs_deleted": 5229, - "indices_flush_total_time_in_millis": 2401, - "indices_flush_total": 115, - "indices_fielddata_memory_size_in_bytes": 12996, - "indices_fielddata_evictions": 0, - "indices_search_fetch_current": 0, - "indices_search_open_contexts": 0, - "indices_search_query_total": 1452, - "indices_search_query_time_in_millis": 5695, - "indices_search_query_current": 0, - "indices_search_fetch_total": 414, - "indices_search_fetch_time_in_millis": 146, - "indices_warmer_current": 0, - "indices_warmer_total": 2319, - "indices_warmer_total_time_in_millis": 448, - "indices_segments_count": 134, - "indices_segments_memory_in_bytes": 1285212, - "indices_segments_index_writer_memory_in_bytes": 0, - "indices_segments_index_writer_max_memory_in_bytes": 172368955, - "indices_segments_version_map_memory_in_bytes": 611844, - "indices_segments_fixed_bit_set_memory_in_bytes": 0, -} - -var osExpected = map[string]float64{ - "os_swap_used_in_bytes": 0, - "os_swap_free_in_bytes": 487997440, - "os_timestamp": 1436460392944, - "os_mem_free_percent": 74, - "os_mem_used_percent": 25, - "os_mem_actual_free_in_bytes": 1565470720, - "os_mem_actual_used_in_bytes": 534159360, - "os_mem_free_in_bytes": 477761536, - "os_mem_used_in_bytes": 1621868544, -} - -var processExpected = map[string]float64{ - "process_mem_total_virtual_in_bytes": 4747890688, - "process_timestamp": 1436460392945, - "process_open_file_descriptors": 160, - "process_cpu_total_in_millis": 15480, - "process_cpu_percent": 2, - "process_cpu_sys_in_millis": 1870, - "process_cpu_user_in_millis": 13610, -} - -var jvmExpected = map[string]float64{ - "jvm_timestamp": 1436460392945, - "jvm_uptime_in_millis": 202245, - "jvm_mem_non_heap_used_in_bytes": 39634576, - "jvm_mem_non_heap_committed_in_bytes": 40841216, - "jvm_mem_pools_young_max_in_bytes": 279183360, - "jvm_mem_pools_young_peak_used_in_bytes": 71630848, - "jvm_mem_pools_young_peak_max_in_bytes": 279183360, - "jvm_mem_pools_young_used_in_bytes": 32685760, - "jvm_mem_pools_survivor_peak_used_in_bytes": 8912888, - "jvm_mem_pools_survivor_peak_max_in_bytes": 34865152, - "jvm_mem_pools_survivor_used_in_bytes": 8912880, - "jvm_mem_pools_survivor_max_in_bytes": 34865152, - "jvm_mem_pools_old_peak_max_in_bytes": 724828160, - "jvm_mem_pools_old_used_in_bytes": 11110928, - "jvm_mem_pools_old_max_in_bytes": 724828160, - "jvm_mem_pools_old_peak_used_in_bytes": 14354608, - "jvm_mem_heap_used_in_bytes": 52709568, - "jvm_mem_heap_used_percent": 5, - "jvm_mem_heap_committed_in_bytes": 259522560, - "jvm_mem_heap_max_in_bytes": 1038876672, - "jvm_threads_peak_count": 45, - "jvm_threads_count": 44, - "jvm_gc_collectors_young_collection_count": 2, - "jvm_gc_collectors_young_collection_time_in_millis": 98, - "jvm_gc_collectors_old_collection_count": 1, - "jvm_gc_collectors_old_collection_time_in_millis": 24, - "jvm_buffer_pools_direct_count": 40, - "jvm_buffer_pools_direct_used_in_bytes": 6304239, - "jvm_buffer_pools_direct_total_capacity_in_bytes": 6304239, - "jvm_buffer_pools_mapped_count": 0, - "jvm_buffer_pools_mapped_used_in_bytes": 0, - "jvm_buffer_pools_mapped_total_capacity_in_bytes": 0, -} - -var threadPoolExpected = map[string]float64{ - "thread_pool_merge_threads": 6, - "thread_pool_merge_queue": 4, - "thread_pool_merge_active": 5, - "thread_pool_merge_rejected": 2, - "thread_pool_merge_largest": 5, - "thread_pool_merge_completed": 1, - "thread_pool_bulk_threads": 4, - "thread_pool_bulk_queue": 5, - "thread_pool_bulk_active": 7, - "thread_pool_bulk_rejected": 3, - "thread_pool_bulk_largest": 1, - "thread_pool_bulk_completed": 4, - "thread_pool_warmer_threads": 2, - "thread_pool_warmer_queue": 7, - "thread_pool_warmer_active": 3, - "thread_pool_warmer_rejected": 2, - "thread_pool_warmer_largest": 3, - "thread_pool_warmer_completed": 1, - "thread_pool_get_largest": 2, - "thread_pool_get_completed": 1, - "thread_pool_get_threads": 1, - "thread_pool_get_queue": 8, - "thread_pool_get_active": 4, - "thread_pool_get_rejected": 3, - "thread_pool_index_threads": 6, - "thread_pool_index_queue": 8, - "thread_pool_index_active": 4, - "thread_pool_index_rejected": 2, - "thread_pool_index_largest": 3, - "thread_pool_index_completed": 6, - "thread_pool_suggest_threads": 2, - "thread_pool_suggest_queue": 7, - "thread_pool_suggest_active": 2, - "thread_pool_suggest_rejected": 1, - "thread_pool_suggest_largest": 8, - "thread_pool_suggest_completed": 3, - "thread_pool_fetch_shard_store_queue": 7, - "thread_pool_fetch_shard_store_active": 4, - "thread_pool_fetch_shard_store_rejected": 2, - "thread_pool_fetch_shard_store_largest": 4, - "thread_pool_fetch_shard_store_completed": 1, - "thread_pool_fetch_shard_store_threads": 1, - "thread_pool_management_threads": 2, - "thread_pool_management_queue": 3, - "thread_pool_management_active": 1, - "thread_pool_management_rejected": 6, - "thread_pool_management_largest": 2, - "thread_pool_management_completed": 22, - "thread_pool_percolate_queue": 23, - "thread_pool_percolate_active": 13, - "thread_pool_percolate_rejected": 235, - "thread_pool_percolate_largest": 23, - "thread_pool_percolate_completed": 33, - "thread_pool_percolate_threads": 123, - "thread_pool_listener_active": 4, - "thread_pool_listener_rejected": 8, - "thread_pool_listener_largest": 1, - "thread_pool_listener_completed": 1, - "thread_pool_listener_threads": 1, - "thread_pool_listener_queue": 2, - "thread_pool_search_rejected": 7, - "thread_pool_search_largest": 2, - "thread_pool_search_completed": 4, - "thread_pool_search_threads": 5, - "thread_pool_search_queue": 7, - "thread_pool_search_active": 2, - "thread_pool_fetch_shard_started_threads": 3, - "thread_pool_fetch_shard_started_queue": 1, - "thread_pool_fetch_shard_started_active": 5, - "thread_pool_fetch_shard_started_rejected": 6, - "thread_pool_fetch_shard_started_largest": 4, - "thread_pool_fetch_shard_started_completed": 54, - "thread_pool_refresh_rejected": 4, - "thread_pool_refresh_largest": 8, - "thread_pool_refresh_completed": 3, - "thread_pool_refresh_threads": 23, - "thread_pool_refresh_queue": 7, - "thread_pool_refresh_active": 3, - "thread_pool_optimize_threads": 3, - "thread_pool_optimize_queue": 4, - "thread_pool_optimize_active": 1, - "thread_pool_optimize_rejected": 2, - "thread_pool_optimize_largest": 7, - "thread_pool_optimize_completed": 3, - "thread_pool_snapshot_largest": 1, - "thread_pool_snapshot_completed": 0, - "thread_pool_snapshot_threads": 8, - "thread_pool_snapshot_queue": 5, - "thread_pool_snapshot_active": 6, - "thread_pool_snapshot_rejected": 2, - "thread_pool_generic_threads": 1, - "thread_pool_generic_queue": 4, - "thread_pool_generic_active": 6, - "thread_pool_generic_rejected": 3, - "thread_pool_generic_largest": 2, - "thread_pool_generic_completed": 27, - "thread_pool_flush_threads": 3, - "thread_pool_flush_queue": 8, - "thread_pool_flush_active": 0, - "thread_pool_flush_rejected": 1, - "thread_pool_flush_largest": 5, - "thread_pool_flush_completed": 3, -} - -var fsExpected = map[string]float64{ - "fs_timestamp": 1436460392946, - "fs_total_free_in_bytes": 16909316096, - "fs_total_available_in_bytes": 15894814720, - "fs_total_total_in_bytes": 19507089408, -} - -var transportExpected = map[string]float64{ - "transport_server_open": 13, - "transport_rx_count": 6, - "transport_rx_size_in_bytes": 1380, - "transport_tx_count": 6, - "transport_tx_size_in_bytes": 1380, -} - -var httpExpected = map[string]float64{ - "http_current_open": 3, - "http_total_opened": 3, -} - -var breakersExpected = map[string]float64{ - "breakers_fielddata_estimated_size_in_bytes": 0, - "breakers_fielddata_overhead": 1.03, - "breakers_fielddata_tripped": 0, - "breakers_fielddata_limit_size_in_bytes": 623326003, - "breakers_request_estimated_size_in_bytes": 0, - "breakers_request_overhead": 1.0, - "breakers_request_tripped": 0, - "breakers_request_limit_size_in_bytes": 415550668, - "breakers_parent_overhead": 1.0, - "breakers_parent_tripped": 0, - "breakers_parent_limit_size_in_bytes": 727213670, - "breakers_parent_estimated_size_in_bytes": 0, -} diff --git a/plugins/exec/README.md b/plugins/exec/README.md deleted file mode 100644 index 7aa52db33..000000000 --- a/plugins/exec/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Exec Plugin - -The exec plugin can execute arbitrary commands which output JSON. Then it flattens JSON and finds -all numeric values, treating them as floats. - -For example, if you have a json-returning command called mycollector, you could -setup the exec plugin with: - -``` -[[exec.commands]] -command = "/usr/bin/mycollector --output=json" -name = "mycollector" -interval = 10 -``` - -The name is used as a prefix for the measurements. - -The interval is used to determine how often a particular command should be run. Each -time the exec plugin runs, it will only run a particular command if it has been at least -`interval` seconds since the exec plugin last ran the command. - - -# Sample - -Let's say that we have a command named "mycollector", which gives the following output: -```json -{ - "a": 0.5, - "b": { - "c": "some text", - "d": 0.1, - "e": 5 - } -} -``` - -The collected metrics will be: -``` -exec_mycollector_a value=0.5 -exec_mycollector_b_d value=0.1 -exec_mycollector_b_e value=5 -``` diff --git a/plugins/exec/exec.go b/plugins/exec/exec.go deleted file mode 100644 index d4a42b6c4..000000000 --- a/plugins/exec/exec.go +++ /dev/null @@ -1,162 +0,0 @@ -package exec - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "github.com/gonuts/go-shellquote" - "github.com/influxdb/telegraf/plugins" - "math" - "os/exec" - "strings" - "sync" - "time" -) - -const sampleConfig = ` - # specify commands via an array of tables - [[plugins.exec.commands]] - # the command to run - command = "/usr/bin/mycollector --foo=bar" - - # name of the command (used as a prefix for measurements) - name = "mycollector" - - # Only run this command if it has been at least this many - # seconds since it last ran - interval = 10 -` - -type Exec struct { - Commands []*Command - runner Runner - clock Clock -} - -type Command struct { - Command string - Name string - Interval int - lastRunAt time.Time -} - -type Runner interface { - Run(*Command) ([]byte, error) -} - -type Clock interface { - Now() time.Time -} - -type CommandRunner struct{} - -type RealClock struct{} - -func (c CommandRunner) Run(command *Command) ([]byte, error) { - command.lastRunAt = time.Now() - split_cmd, err := shellquote.Split(command.Command) - if err != nil || len(split_cmd) == 0 { - return nil, fmt.Errorf("exec: unable to parse command, %s", err) - } - - cmd := exec.Command(split_cmd[0], split_cmd[1:]...) - var out bytes.Buffer - cmd.Stdout = &out - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("exec: %s for command '%s'", err, command.Command) - } - - return out.Bytes(), nil -} - -func (c RealClock) Now() time.Time { - return time.Now() -} - -func NewExec() *Exec { - return &Exec{runner: CommandRunner{}, clock: RealClock{}} -} - -func (e *Exec) SampleConfig() string { - return sampleConfig -} - -func (e *Exec) Description() string { - return "Read flattened metrics from one or more commands that output JSON to stdout" -} - -func (e *Exec) Gather(acc plugins.Accumulator) error { - var wg sync.WaitGroup - - errorChannel := make(chan error, len(e.Commands)) - - for _, c := range e.Commands { - wg.Add(1) - go func(c *Command, acc plugins.Accumulator) { - defer wg.Done() - err := e.gatherCommand(c, acc) - if err != nil { - errorChannel <- err - } - }(c, acc) - } - - wg.Wait() - close(errorChannel) - - // Get all errors and return them as one giant error - errorStrings := []string{} - for err := range errorChannel { - errorStrings = append(errorStrings, err.Error()) - } - - if len(errorStrings) == 0 { - return nil - } - return errors.New(strings.Join(errorStrings, "\n")) -} - -func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error { - secondsSinceLastRun := 0.0 - - if c.lastRunAt.Unix() == 0 { // means time is uninitialized - secondsSinceLastRun = math.Inf(1) - } else { - secondsSinceLastRun = (e.clock.Now().Sub(c.lastRunAt)).Seconds() - } - - if secondsSinceLastRun >= float64(c.Interval) { - out, err := e.runner.Run(c) - if err != nil { - return err - } - - var jsonOut interface{} - err = json.Unmarshal(out, &jsonOut) - if err != nil { - return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", c.Command, err) - } - - processResponse(acc, c.Name, map[string]string{}, jsonOut) - } - return nil -} - -func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) { - switch t := v.(type) { - case map[string]interface{}: - for k, v := range t { - processResponse(acc, prefix+"_"+k, tags, v) - } - case float64: - acc.Add(prefix, v, tags) - } -} - -func init() { - plugins.Add("exec", func() plugins.Plugin { - return NewExec() - }) -} diff --git a/plugins/exec/exec_test.go b/plugins/exec/exec_test.go deleted file mode 100644 index 3f0b6f4ce..000000000 --- a/plugins/exec/exec_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package exec - -import ( - "fmt" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "math" - "testing" - "time" -) - -// Midnight 9/22/2015 -const baseTimeSeconds = 1442905200 - -const validJson = ` -{ - "status": "green", - "num_processes": 82, - "cpu": { - "status": "red", - "nil_status": null, - "used": 8234, - "free": 32 - }, - "percent": 0.81, - "users": [0, 1, 2, 3] -}` - -const malformedJson = ` -{ - "status": "green", -` - -type runnerMock struct { - out []byte - err error -} - -type clockMock struct { - now time.Time -} - -func newRunnerMock(out []byte, err error) Runner { - return &runnerMock{ - out: out, - err: err, - } -} - -func (r runnerMock) Run(command *Command) ([]byte, error) { - if r.err != nil { - return nil, r.err - } - return r.out, nil -} - -func newClockMock(now time.Time) Clock { - return &clockMock{now: now} -} - -func (c clockMock) Now() time.Time { - return c.now -} - -func TestExec(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+20, 0)) - command := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.NoError(t, err) - - checkFloat := []struct { - name string - value float64 - }{ - {"mycollector_num_processes", 82}, - {"mycollector_cpu_used", 8234}, - {"mycollector_cpu_free", 32}, - {"mycollector_percent", 0.81}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored") -} - -func TestExecMalformed(t *testing.T) { - runner := newRunnerMock([]byte(malformedJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+20, 0)) - command := Command{ - Command: "badcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.Error(t, err) - - assert.Equal(t, deltaPoints, 0, "No new points should have been added") -} - -func TestCommandError(t *testing.T) { - runner := newRunnerMock(nil, fmt.Errorf("exit status code 1")) - clock := newClockMock(time.Unix(baseTimeSeconds+20, 0)) - command := Command{ - Command: "badcommand", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.Error(t, err) - - assert.Equal(t, deltaPoints, 0, "No new points should have been added") -} - -func TestExecNotEnoughTime(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+5, 0)) - command := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.NoError(t, err) - - assert.Equal(t, deltaPoints, 0, "No new points should have been added") -} - -func TestExecUninitializedLastRunAt(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds, 0)) - command := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: math.MaxInt32, - // Uninitialized lastRunAt should default to time.Unix(0, 0), so this should - // run no matter what the interval is - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.NoError(t, err) - - checkFloat := []struct { - name string - value float64 - }{ - {"mycollector_num_processes", 82}, - {"mycollector_cpu_used", 8234}, - {"mycollector_cpu_free", 32}, - {"mycollector_percent", 0.81}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored") -} -func TestExecOneNotEnoughTimeAndOneEnoughTime(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+5, 0)) - notEnoughTimeCommand := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - enoughTimeCommand := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 3, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{¬EnoughTimeCommand, &enoughTimeCommand}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.NoError(t, err) - - checkFloat := []struct { - name string - value float64 - }{ - {"mycollector_num_processes", 82}, - {"mycollector_cpu_used", 8234}, - {"mycollector_cpu_free", 32}, - {"mycollector_percent", 0.81}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - assert.Equal(t, deltaPoints, 4, "Only one command should have been run") -} diff --git a/plugins/aerospike/README.md b/plugins/inputs/aerospike/README.md similarity index 100% rename from plugins/aerospike/README.md rename to plugins/inputs/aerospike/README.md diff --git a/plugins/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go similarity index 92% rename from plugins/aerospike/aerospike.go rename to plugins/inputs/aerospike/aerospike.go index cf372aeb4..aa015a4c0 100644 --- a/plugins/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/binary" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "net" "strconv" "strings" @@ -119,7 +119,7 @@ func (a *Aerospike) Description() string { return "Read stats from an aerospike server" } -func (a *Aerospike) Gather(acc plugins.Accumulator) error { +func (a *Aerospike) Gather(acc inputs.Accumulator) error { if len(a.Servers) == 0 { return a.gatherServer("127.0.0.1:3000", acc) } @@ -140,7 +140,7 @@ func (a *Aerospike) Gather(acc plugins.Accumulator) error { return outerr } -func (a *Aerospike) gatherServer(host string, acc plugins.Accumulator) error { +func (a *Aerospike) gatherServer(host string, acc inputs.Accumulator) error { aerospikeInfo, err := getMap(STATISTICS_COMMAND, host) if err != nil { return fmt.Errorf("Aerospike info failed: %s", err) @@ -247,26 +247,32 @@ func get(key []byte, host string) (map[string]string, error) { return data, err } -func readAerospikeStats(stats map[string]string, acc plugins.Accumulator, host, namespace string) { +func readAerospikeStats( + stats map[string]string, + acc inputs.Accumulator, + host string, + namespace string, +) { + fields := make(map[string]interface{}) + tags := map[string]string{ + "aerospike_host": host, + "namespace": "_service", + } + + if namespace != "" { + tags["namespace"] = namespace + } for key, value := range stats { - tags := map[string]string{ - "aerospike_host": host, - "namespace": "_service", - } - - if namespace != "" { - tags["namespace"] = namespace - } - // We are going to ignore all string based keys val, err := strconv.ParseInt(value, 10, 64) if err == nil { if strings.Contains(key, "-") { key = strings.Replace(key, "-", "_", -1) } - acc.Add(key, val, tags) + fields[key] = val } } + acc.AddFields("aerospike", fields, tags) } func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) { @@ -330,7 +336,7 @@ func msgLenFromBytes(buf [6]byte) int64 { } func init() { - plugins.Add("aerospike", func() plugins.Plugin { + inputs.Add("aerospike", func() inputs.Input { return &Aerospike{} }) } diff --git a/plugins/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go similarity index 79% rename from plugins/aerospike/aerospike_test.go rename to plugins/inputs/aerospike/aerospike_test.go index 532ebaafb..74b70eb1d 100644 --- a/plugins/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -1,11 +1,12 @@ package aerospike import ( - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "reflect" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAerospikeStatistics(t *testing.T) { @@ -31,7 +32,7 @@ func TestAerospikeStatistics(t *testing.T) { } for _, metric := range asMetrics { - assert.True(t, acc.HasIntValue(metric), metric) + assert.True(t, acc.HasIntField("aerospike", metric), metric) } } @@ -49,13 +50,16 @@ func TestReadAerospikeStatsNoNamespace(t *testing.T) { "stat_read_reqs": "12345", } readAerospikeStats(stats, &acc, "host1", "") - for k := range stats { - if k == "stat-write-errs" { - k = "stat_write_errs" - } - assert.True(t, acc.HasMeasurement(k)) - assert.True(t, acc.CheckValue(k, int64(12345))) + + fields := map[string]interface{}{ + "stat_write_errs": int64(12345), + "stat_read_reqs": int64(12345), } + tags := map[string]string{ + "aerospike_host": "host1", + "namespace": "_service", + } + acc.AssertContainsTaggedFields(t, "aerospike", fields, tags) } func TestReadAerospikeStatsNamespace(t *testing.T) { @@ -66,13 +70,15 @@ func TestReadAerospikeStatsNamespace(t *testing.T) { } readAerospikeStats(stats, &acc, "host1", "test") + fields := map[string]interface{}{ + "stat_write_errs": int64(12345), + "stat_read_reqs": int64(12345), + } tags := map[string]string{ "aerospike_host": "host1", "namespace": "test", } - for k := range stats { - assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil) - } + acc.AssertContainsTaggedFields(t, "aerospike", fields, tags) } func TestAerospikeUnmarshalList(t *testing.T) { diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go new file mode 100644 index 000000000..cfd802438 --- /dev/null +++ b/plugins/inputs/all/all.go @@ -0,0 +1,41 @@ +package all + +import ( + _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" + _ "github.com/influxdata/telegraf/plugins/inputs/apache" + _ "github.com/influxdata/telegraf/plugins/inputs/bcache" + _ "github.com/influxdata/telegraf/plugins/inputs/disque" + _ "github.com/influxdata/telegraf/plugins/inputs/docker" + _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/inputs/exec" + _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" + _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" + _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/leofs" + _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" + _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" + _ "github.com/influxdata/telegraf/plugins/inputs/memcached" + _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" + _ "github.com/influxdata/telegraf/plugins/inputs/mysql" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx" + _ "github.com/influxdata/telegraf/plugins/inputs/nsq" + _ "github.com/influxdata/telegraf/plugins/inputs/passenger" + _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" + _ "github.com/influxdata/telegraf/plugins/inputs/ping" + _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" + _ "github.com/influxdata/telegraf/plugins/inputs/procstat" + _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" + _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" + _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" + _ "github.com/influxdata/telegraf/plugins/inputs/redis" + _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" + _ "github.com/influxdata/telegraf/plugins/inputs/sensors" + _ "github.com/influxdata/telegraf/plugins/inputs/statsd" + _ "github.com/influxdata/telegraf/plugins/inputs/system" + _ "github.com/influxdata/telegraf/plugins/inputs/trig" + _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" + _ "github.com/influxdata/telegraf/plugins/inputs/zfs" + _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" +) diff --git a/plugins/apache/README.md b/plugins/inputs/apache/README.md similarity index 100% rename from plugins/apache/README.md rename to plugins/inputs/apache/README.md diff --git a/plugins/apache/apache.go b/plugins/inputs/apache/apache.go similarity index 71% rename from plugins/apache/apache.go rename to plugins/inputs/apache/apache.go index 3cebecb22..317a635d3 100644 --- a/plugins/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Apache struct { @@ -31,7 +31,7 @@ func (n *Apache) Description() string { return "Read Apache status information (mod_status)" } -func (n *Apache) Gather(acc plugins.Accumulator) error { +func (n *Apache) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup var outerr error @@ -59,7 +59,7 @@ var tr = &http.Transport{ var client = &http.Client{Transport: tr} -func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { +func (n *Apache) gatherUrl(addr *url.URL, acc inputs.Accumulator) error { resp, err := client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) @@ -72,32 +72,33 @@ func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { tags := getTags(addr) sc := bufio.NewScanner(resp.Body) + fields := make(map[string]interface{}) for sc.Scan() { line := sc.Text() if strings.Contains(line, ":") { - parts := strings.SplitN(line, ":", 2) key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1]) switch key { - case "Scoreboard": - n.gatherScores(part, acc, tags) + for field, value := range n.gatherScores(part) { + fields[field] = value + } default: value, err := strconv.ParseFloat(part, 64) if err != nil { continue } - acc.Add(key, value, tags) + fields[key] = value } } } + acc.AddFields("apache", fields, tags) return nil } -func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[string]string) { - +func (n *Apache) gatherScores(data string) map[string]interface{} { var waiting, open int = 0, 0 var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0 @@ -129,17 +130,20 @@ func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[str } } - acc.Add("scboard_waiting", float64(waiting), tags) - acc.Add("scboard_starting", float64(S), tags) - acc.Add("scboard_reading", float64(R), tags) - acc.Add("scboard_sending", float64(W), tags) - acc.Add("scboard_keepalive", float64(K), tags) - acc.Add("scboard_dnslookup", float64(D), tags) - acc.Add("scboard_closing", float64(C), tags) - acc.Add("scboard_logging", float64(L), tags) - acc.Add("scboard_finishing", float64(G), tags) - acc.Add("scboard_idle_cleanup", float64(I), tags) - acc.Add("scboard_open", float64(open), tags) + fields := map[string]interface{}{ + "scboard_waiting": float64(waiting), + "scboard_starting": float64(S), + "scboard_reading": float64(R), + "scboard_sending": float64(W), + "scboard_keepalive": float64(K), + "scboard_dnslookup": float64(D), + "scboard_closing": float64(C), + "scboard_logging": float64(L), + "scboard_finishing": float64(G), + "scboard_idle_cleanup": float64(I), + "scboard_open": float64(open), + } + return fields } // Get tag(s) for the apache plugin @@ -160,7 +164,7 @@ func getTags(addr *url.URL) map[string]string { } func init() { - plugins.Add("apache", func() plugins.Plugin { + inputs.Add("apache", func() inputs.Input { return &Apache{} }) } diff --git a/plugins/apache/apache_test.go b/plugins/inputs/apache/apache_test.go similarity index 80% rename from plugins/apache/apache_test.go rename to plugins/inputs/apache/apache_test.go index 9688302ac..8eed61ca6 100644 --- a/plugins/apache/apache_test.go +++ b/plugins/inputs/apache/apache_test.go @@ -6,9 +6,8 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -44,37 +43,31 @@ func TestHTTPApache(t *testing.T) { err := a.Gather(&acc) require.NoError(t, err) - testInt := []struct { - measurement string - value float64 - }{ - {"TotalAccesses", 1.29811861e+08}, - {"TotalkBytes", 5.213701865e+09}, - {"CPULoad", 6.51929}, - {"Uptime", 941553}, - {"ReqPerSec", 137.87}, - {"BytesPerSec", 5.67024e+06}, - {"BytesPerReq", 41127.4}, - {"BusyWorkers", 270}, - {"IdleWorkers", 630}, - {"ConnsTotal", 1451}, - {"ConnsAsyncWriting", 32}, - {"ConnsAsyncKeepAlive", 945}, - {"ConnsAsyncClosing", 205}, - {"scboard_waiting", 630}, - {"scboard_starting", 0}, - {"scboard_reading", 157}, - {"scboard_sending", 113}, - {"scboard_keepalive", 0}, - {"scboard_dnslookup", 0}, - {"scboard_closing", 0}, - {"scboard_logging", 0}, - {"scboard_finishing", 0}, - {"scboard_idle_cleanup", 0}, - {"scboard_open", 2850}, - } - - for _, test := range testInt { - assert.True(t, acc.CheckValue(test.measurement, test.value)) + fields := map[string]interface{}{ + "TotalAccesses": float64(1.29811861e+08), + "TotalkBytes": float64(5.213701865e+09), + "CPULoad": float64(6.51929), + "Uptime": float64(941553), + "ReqPerSec": float64(137.87), + "BytesPerSec": float64(5.67024e+06), + "BytesPerReq": float64(41127.4), + "BusyWorkers": float64(270), + "IdleWorkers": float64(630), + "ConnsTotal": float64(1451), + "ConnsAsyncWriting": float64(32), + "ConnsAsyncKeepAlive": float64(945), + "ConnsAsyncClosing": float64(205), + "scboard_waiting": float64(630), + "scboard_starting": float64(0), + "scboard_reading": float64(157), + "scboard_sending": float64(113), + "scboard_keepalive": float64(0), + "scboard_dnslookup": float64(0), + "scboard_closing": float64(0), + "scboard_logging": float64(0), + "scboard_finishing": float64(0), + "scboard_idle_cleanup": float64(0), + "scboard_open": float64(2850), } + acc.AssertContainsFields(t, "apache", fields) } diff --git a/plugins/bcache/README.md b/plugins/inputs/bcache/README.md similarity index 97% rename from plugins/bcache/README.md rename to plugins/inputs/bcache/README.md index 27062b915..98a841bf5 100644 --- a/plugins/bcache/README.md +++ b/plugins/inputs/bcache/README.md @@ -26,27 +26,27 @@ Measurement names: dirty_data Amount of dirty data for this backing device in the cache. Continuously updated unlike the cache set's version, but may be slightly off. - + bypassed Amount of IO (both reads and writes) that has bypassed the cache - + cache_bypass_hits cache_bypass_misses Hits and misses for IO that is intended to skip the cache are still counted, but broken out here. - + cache_hits cache_misses cache_hit_ratio Hits and misses are counted per individual IO as bcache sees them; a partial hit is counted as a miss. - + cache_miss_collisions Counts instances where data was going to be inserted into the cache from a cache miss, but raced with a write and data was already present (usually 0 since the synchronization for cache misses was rewritten) - + cache_readaheads Count of times readahead occurred. ``` @@ -70,7 +70,7 @@ Using this configuration: When run with: ``` -./telegraf -config telegraf.conf -filter bcache -test +./telegraf -config telegraf.conf -input-filter bcache -test ``` It produces: diff --git a/plugins/bcache/bcache.go b/plugins/inputs/bcache/bcache.go similarity index 86% rename from plugins/bcache/bcache.go rename to plugins/inputs/bcache/bcache.go index 76e638ea4..b6d6eb130 100644 --- a/plugins/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Bcache struct { @@ -69,7 +69,7 @@ func prettyToBytes(v string) uint64 { return uint64(result) } -func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error { +func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error { tags := getTags(bdev) metrics, err := filepath.Glob(bdev + "/stats_total/*") if len(metrics) < 0 { @@ -81,7 +81,9 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error { } rawValue := strings.TrimSpace(string(file)) value := prettyToBytes(rawValue) - acc.Add("dirty_data", value, tags) + + fields := make(map[string]interface{}) + fields["dirty_data"] = value for _, path := range metrics { key := filepath.Base(path) @@ -92,16 +94,17 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error { } if key == "bypassed" { value := prettyToBytes(rawValue) - acc.Add(key, value, tags) + fields[key] = value } else { value, _ := strconv.ParseUint(rawValue, 10, 64) - acc.Add(key, value, tags) + fields[key] = value } } + acc.AddFields("bcache", fields, tags) return nil } -func (b *Bcache) Gather(acc plugins.Accumulator) error { +func (b *Bcache) Gather(acc inputs.Accumulator) error { bcacheDevsChecked := make(map[string]bool) var restrictDevs bool if len(b.BcacheDevs) != 0 { @@ -117,7 +120,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error { } bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*") if len(bdevs) < 1 { - return errors.New("Can't found any bcache device") + return errors.New("Can't find any bcache device") } for _, bdev := range bdevs { if restrictDevs { @@ -132,7 +135,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("bcache", func() plugins.Plugin { + inputs.Add("bcache", func() inputs.Input { return &Bcache{} }) } diff --git a/plugins/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go similarity index 62% rename from plugins/bcache/bcache_test.go rename to plugins/inputs/bcache/bcache_test.go index b2b83bfec..bd191528f 100644 --- a/plugins/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -5,8 +5,7 @@ import ( "os" "testing" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -29,11 +28,6 @@ var ( testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10" ) -type metrics struct { - name string - value uint64 -} - func TestBcacheGeneratesMetrics(t *testing.T) { err := os.MkdirAll(testBcacheUuidPath, 0755) require.NoError(t, err) @@ -53,70 +47,52 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", []byte(dirty_data), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", + []byte(dirty_data), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", + []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cache_bypass_hits), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", + []byte(cache_bypass_hits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cache_bypass_misses), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", + []byte(cache_bypass_misses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cache_hit_ratio), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", + []byte(cache_hit_ratio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", []byte(cache_hits), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", + []byte(cache_hits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cache_miss_collisions), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", + []byte(cache_miss_collisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", []byte(cache_misses), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", + []byte(cache_misses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", []byte(cache_readaheads), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", + []byte(cache_readaheads), 0644) require.NoError(t, err) - intMetrics := []*metrics{ - { - name: "dirty_data", - value: 1610612736, - }, - { - name: "bypassed", - value: 5167704440832, - }, - { - name: "cache_bypass_hits", - value: 146155333, - }, - { - name: "cache_bypass_misses", - value: 0, - }, - { - name: "cache_hit_ratio", - value: 90, - }, - { - name: "cache_hits", - value: 511469583, - }, - { - name: "cache_miss_collisions", - value: 157567, - }, - { - name: "cache_misses", - value: 50616331, - }, - { - name: "cache_readaheads", - value: 2, - }, + fields := map[string]interface{}{ + "dirty_data": uint64(1610612736), + "bypassed": uint64(5167704440832), + "cache_bypass_hits": uint64(146155333), + "cache_bypass_misses": uint64(0), + "cache_hit_ratio": uint64(90), + "cache_hits": uint64(511469583), + "cache_miss_collisions": uint64(157567), + "cache_misses": uint64(50616331), + "cache_readaheads": uint64(2), } tags := map[string]string{ @@ -126,27 +102,19 @@ func TestBcacheGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - //all devs + // all devs b := &Bcache{BcachePath: testBcachePath} err = b.Gather(&acc) require.NoError(t, err) + acc.AssertContainsTaggedFields(t, "bcache", fields, tags) - for _, metric := range intMetrics { - assert.True(t, acc.HasUIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } - - //one exist dev + // one exist dev b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}} err = b.Gather(&acc) require.NoError(t, err) - - for _, metric := range intMetrics { - assert.True(t, acc.HasUIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + acc.AssertContainsTaggedFields(t, "bcache", fields, tags) err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) diff --git a/plugins/disque/disque.go b/plugins/inputs/disque/disque.go similarity index 90% rename from plugins/disque/disque.go rename to plugins/inputs/disque/disque.go index 004aa3c0f..364e78fbc 100644 --- a/plugins/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Disque struct { @@ -61,7 +61,7 @@ var ErrProtocolError = errors.New("disque protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Disque) Gather(acc plugins.Accumulator) error { +func (g *Disque) Gather(acc inputs.Accumulator) error { if len(g.Servers) == 0 { url := &url.URL{ Host: ":7711", @@ -98,7 +98,7 @@ func (g *Disque) Gather(acc plugins.Accumulator) error { const defaultPort = "7711" -func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { +func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error { if g.c == nil { _, _, err := net.SplitHostPort(addr.Host) @@ -155,6 +155,8 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { var read int + fields := make(map[string]interface{}) + tags := map[string]string{"host": addr.String()} for read < sz { line, err := r.ReadString('\n') if err != nil { @@ -176,12 +178,11 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { continue } - tags := map[string]string{"host": addr.String()} val := strings.TrimSpace(parts[1]) ival, err := strconv.ParseUint(val, 10, 64) if err == nil { - acc.Add(metric, ival, tags) + fields[metric] = ival continue } @@ -190,14 +191,14 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { return err } - acc.Add(metric, fval, tags) + fields[metric] = fval } - + acc.AddFields("disque", fields, tags) return nil } func init() { - plugins.Add("disque", func() plugins.Plugin { + inputs.Add("disque", func() inputs.Input { return &Disque{} }) } diff --git a/plugins/disque/disque_test.go b/plugins/inputs/disque/disque_test.go similarity index 62% rename from plugins/disque/disque_test.go rename to plugins/inputs/disque/disque_test.go index 0a4722d93..f060e9568 100644 --- a/plugins/disque/disque_test.go +++ b/plugins/inputs/disque/disque_test.go @@ -6,8 +6,7 @@ import ( "net" "testing" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -55,42 +54,26 @@ func TestDisqueGeneratesMetrics(t *testing.T) { err = r.Gather(&acc) require.NoError(t, err) - checkInt := []struct { - name string - value uint64 - }{ - {"uptime", 1452705}, - {"clients", 31}, - {"blocked_clients", 13}, - {"used_memory", 1840104}, - {"used_memory_rss", 3227648}, - {"used_memory_peak", 89603656}, - {"total_connections_received", 5062777}, - {"total_commands_processed", 12308396}, - {"instantaneous_ops_per_sec", 18}, - {"latest_fork_usec", 1644}, - {"registered_jobs", 360}, - {"registered_queues", 12}, - } - - for _, c := range checkInt { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"mem_fragmentation_ratio", 1.75}, - {"used_cpu_sys", 19585.73}, - {"used_cpu_user", 11255.96}, - {"used_cpu_sys_children", 1.75}, - {"used_cpu_user_children", 1.91}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) + fields := map[string]interface{}{ + "uptime": uint64(1452705), + "clients": uint64(31), + "blocked_clients": uint64(13), + "used_memory": uint64(1840104), + "used_memory_rss": uint64(3227648), + "used_memory_peak": uint64(89603656), + "total_connections_received": uint64(5062777), + "total_commands_processed": uint64(12308396), + "instantaneous_ops_per_sec": uint64(18), + "latest_fork_usec": uint64(1644), + "registered_jobs": uint64(360), + "registered_queues": uint64(12), + "mem_fragmentation_ratio": float64(1.75), + "used_cpu_sys": float64(19585.73), + "used_cpu_user": float64(11255.96), + "used_cpu_sys_children": float64(1.75), + "used_cpu_user_children": float64(1.91), } + acc.AssertContainsFields(t, "disque", fields) } func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { @@ -137,42 +120,26 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { err = r.Gather(&acc) require.NoError(t, err) - checkInt := []struct { - name string - value uint64 - }{ - {"uptime", 1452705}, - {"clients", 31}, - {"blocked_clients", 13}, - {"used_memory", 1840104}, - {"used_memory_rss", 3227648}, - {"used_memory_peak", 89603656}, - {"total_connections_received", 5062777}, - {"total_commands_processed", 12308396}, - {"instantaneous_ops_per_sec", 18}, - {"latest_fork_usec", 1644}, - {"registered_jobs", 360}, - {"registered_queues", 12}, - } - - for _, c := range checkInt { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"mem_fragmentation_ratio", 1.75}, - {"used_cpu_sys", 19585.73}, - {"used_cpu_user", 11255.96}, - {"used_cpu_sys_children", 1.75}, - {"used_cpu_user_children", 1.91}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) + fields := map[string]interface{}{ + "uptime": uint64(1452705), + "clients": uint64(31), + "blocked_clients": uint64(13), + "used_memory": uint64(1840104), + "used_memory_rss": uint64(3227648), + "used_memory_peak": uint64(89603656), + "total_connections_received": uint64(5062777), + "total_commands_processed": uint64(12308396), + "instantaneous_ops_per_sec": uint64(18), + "latest_fork_usec": uint64(1644), + "registered_jobs": uint64(360), + "registered_queues": uint64(12), + "mem_fragmentation_ratio": float64(1.75), + "used_cpu_sys": float64(19585.73), + "used_cpu_user": float64(11255.96), + "used_cpu_sys_children": float64(1.75), + "used_cpu_user_children": float64(1.91), } + acc.AssertContainsFields(t, "disque", fields) } const testOutput = `# Server diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md new file mode 100644 index 000000000..fa662ca80 --- /dev/null +++ b/plugins/inputs/docker/README.md @@ -0,0 +1,148 @@ +# Docker Input Plugin + +The docker plugin uses the docker remote API to gather metrics on running +docker containers. You can read Docker's documentation for their remote API +[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage) + +The docker plugin uses the excellent +[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to +gather stats. Documentation for the library can be found +[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation +for the stat structure can be found +[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats) + +### Configuration: + +``` +# Read metrics about docker containers +[[inputs.docker]] + # Docker Endpoint + # To use TCP, set endpoint = "tcp://[ip]:[port]" + # To use environment variables (ie, docker-machine), set endpoint = "ENV" + endpoint = "unix:///var/run/docker.sock" + # Only collect metrics for these containers, collect all if empty + container_names = [] +``` + +### Measurements & Fields: + +Every effort was made to preserve the names based on the JSON response from the +docker API. + +Note that the docker_cpu metric may appear multiple times per collection, based +on the availability of per-cpu stats on your system. + +- docker_mem + - total_pgmafault + - cache + - mapped_file + - total_inactive_file + - pgpgout + - rss + - total_mapped_file + - writeback + - unevictable + - pgpgin + - total_unevictable + - pgmajfault + - total_rss + - total_rss_huge + - total_writeback + - total_inactive_anon + - rss_huge + - hierarchical_memory_limit + - total_pgfault + - total_active_file + - active_anon + - total_active_anon + - total_pgpgout + - total_cache + - inactive_anon + - active_file + - pgfault + - inactive_file + - total_pgpgin + - max_usage + - usage + - failcnt + - limit +- docker_cpu + - throttling_periods + - throttling_throttled_periods + - throttling_throttled_time + - usage_in_kernelmode + - usage_in_usermode + - usage_system + - usage_total +- docker_net + - rx_dropped + - rx_bytes + - rx_errors + - tx_packets + - tx_dropped + - rx_packets + - tx_errors + - tx_bytes +- docker_blkio + - io_service_bytes_recursive_async + - io_service_bytes_recursive_read + - io_service_bytes_recursive_sync + - io_service_bytes_recursive_total + - io_service_bytes_recursive_write + - io_serviced_recursive_async + - io_serviced_recursive_read + - io_serviced_recursive_sync + - io_serviced_recursive_total + - io_serviced_recursive_write + +### Tags: + +- All stats have the following tags: + - cont_id (container ID) + - cont_image (container image) + - cont_name (container name) +- docker_cpu specific: + - cpu +- docker_net specific: + - network +- docker_blkio specific: + - device + +### Example Output: + +``` +% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test +* Plugin: docker, Collection 1 +> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka \ +active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\ +hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\ +inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\ +max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\ +pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\ +total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\ +total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\ +total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\ +total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\ +total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713 +> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \ +throttling_periods=0i,throttling_throttled_periods=0i,\ +throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\ +usage_in_usermode=2290000000i,usage_system=84795360000000i,\ +usage_total=6628208865i 1453409536840126713 +> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \ +usage_total=6628208865i 1453409536840126713 +> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,network=eth0 \ +rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\ +tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713 +> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,device=8:0 \ +io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\ +io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\ +io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\ +io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\ +io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713 +``` diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go new file mode 100644 index 000000000..70fcaa19a --- /dev/null +++ b/plugins/inputs/docker/docker.go @@ -0,0 +1,312 @@ +package system + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/fsouza/go-dockerclient" +) + +type Docker struct { + Endpoint string + ContainerNames []string + + client *docker.Client +} + +var sampleConfig = ` + # Docker Endpoint + # To use TCP, set endpoint = "tcp://[ip]:[port]" + # To use environment variables (ie, docker-machine), set endpoint = "ENV" + endpoint = "unix:///var/run/docker.sock" + # Only collect metrics for these containers, collect all if empty + container_names = [] +` + +func (d *Docker) Description() string { + return "Read metrics about docker containers" +} + +func (d *Docker) SampleConfig() string { return sampleConfig } + +func (d *Docker) Gather(acc inputs.Accumulator) error { + if d.client == nil { + var c *docker.Client + var err error + if d.Endpoint == "ENV" { + c, err = docker.NewClientFromEnv() + if err != nil { + return err + } + } else if d.Endpoint == "" { + c, err = docker.NewClient("unix:///var/run/docker.sock") + if err != nil { + return err + } + } else { + c, err = docker.NewClient(d.Endpoint) + if err != nil { + return err + } + } + d.client = c + } + + opts := docker.ListContainersOptions{} + containers, err := d.client.ListContainers(opts) + if err != nil { + return err + } + + var wg sync.WaitGroup + wg.Add(len(containers)) + for _, container := range containers { + go func(c docker.APIContainers) { + defer wg.Done() + err := d.gatherContainer(c, acc) + if err != nil { + fmt.Println(err.Error()) + } + }(container) + } + wg.Wait() + + return nil +} + +func (d *Docker) gatherContainer( + container docker.APIContainers, + acc inputs.Accumulator, +) error { + // Parse container name + cname := "unknown" + if len(container.Names) > 0 { + // Not sure what to do with other names, just take the first. + cname = strings.TrimPrefix(container.Names[0], "/") + } + + tags := map[string]string{ + "cont_id": container.ID, + "cont_name": cname, + "cont_image": container.Image, + } + if len(d.ContainerNames) > 0 { + if !sliceContains(cname, d.ContainerNames) { + return nil + } + } + + statChan := make(chan *docker.Stats) + done := make(chan bool) + statOpts := docker.StatsOptions{ + Stream: false, + ID: container.ID, + Stats: statChan, + Done: done, + Timeout: time.Duration(time.Second * 5), + } + + var err error + go func() { + err = d.client.Stats(statOpts) + }() + + stat := <-statChan + if err != nil { + return err + } + + // Add labels to tags + for k, v := range container.Labels { + tags[k] = v + } + + gatherContainerStats(stat, acc, tags) + + return nil +} + +func gatherContainerStats( + stat *docker.Stats, + acc inputs.Accumulator, + tags map[string]string, +) { + now := stat.Read + + memfields := map[string]interface{}{ + "max_usage": stat.MemoryStats.MaxUsage, + "usage": stat.MemoryStats.Usage, + "fail_count": stat.MemoryStats.Failcnt, + "limit": stat.MemoryStats.Limit, + "total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault, + "cache": stat.MemoryStats.Stats.Cache, + "mapped_file": stat.MemoryStats.Stats.MappedFile, + "total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile, + "pgpgout": stat.MemoryStats.Stats.Pgpgout, + "rss": stat.MemoryStats.Stats.Rss, + "total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile, + "writeback": stat.MemoryStats.Stats.Writeback, + "unevictable": stat.MemoryStats.Stats.Unevictable, + "pgpgin": stat.MemoryStats.Stats.Pgpgin, + "total_unevictable": stat.MemoryStats.Stats.TotalUnevictable, + "pgmajfault": stat.MemoryStats.Stats.Pgmajfault, + "total_rss": stat.MemoryStats.Stats.TotalRss, + "total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge, + "total_writeback": stat.MemoryStats.Stats.TotalWriteback, + "total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon, + "rss_huge": stat.MemoryStats.Stats.RssHuge, + "hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit, + "total_pgfault": stat.MemoryStats.Stats.TotalPgfault, + "total_active_file": stat.MemoryStats.Stats.TotalActiveFile, + "active_anon": stat.MemoryStats.Stats.ActiveAnon, + "total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon, + "total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout, + "total_cache": stat.MemoryStats.Stats.TotalCache, + "inactive_anon": stat.MemoryStats.Stats.InactiveAnon, + "active_file": stat.MemoryStats.Stats.ActiveFile, + "pgfault": stat.MemoryStats.Stats.Pgfault, + "inactive_file": stat.MemoryStats.Stats.InactiveFile, + "total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin, + } + acc.AddFields("docker_mem", memfields, tags, now) + + cpufields := map[string]interface{}{ + "usage_total": stat.CPUStats.CPUUsage.TotalUsage, + "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, + "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, + "usage_system": stat.CPUStats.SystemCPUUsage, + "throttling_periods": stat.CPUStats.ThrottlingData.Periods, + "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, + "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, + } + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + acc.AddFields("docker_cpu", cpufields, cputags, now) + + for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage { + percputags := copyTags(tags) + percputags["cpu"] = fmt.Sprintf("cpu%d", i) + acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now) + } + + for network, netstats := range stat.Networks { + netfields := map[string]interface{}{ + "rx_dropped": netstats.RxDropped, + "rx_bytes": netstats.RxBytes, + "rx_errors": netstats.RxErrors, + "tx_packets": netstats.TxPackets, + "tx_dropped": netstats.TxDropped, + "rx_packets": netstats.RxPackets, + "tx_errors": netstats.TxErrors, + "tx_bytes": netstats.TxBytes, + } + // Create a new network tag dictionary for the "network" tag + nettags := copyTags(tags) + nettags["network"] = network + acc.AddFields("docker_net", netfields, nettags, now) + } + + gatherBlockIOMetrics(stat, acc, tags, now) +} + +func gatherBlockIOMetrics( + stat *docker.Stats, + acc inputs.Accumulator, + tags map[string]string, + now time.Time, +) { + blkioStats := stat.BlkioStats + // Make a map of devices to their block io stats + deviceStatMap := make(map[string]map[string]interface{}) + + for _, metric := range blkioStats.IOServiceBytesRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOServicedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOQueueRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOServiceTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOWaitTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOMergedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.SectorsRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for device, fields := range deviceStatMap { + iotags := copyTags(tags) + iotags["device"] = device + acc.AddFields("docker_blkio", fields, iotags, now) + } +} + +func copyTags(in map[string]string) map[string]string { + out := make(map[string]string) + for k, v := range in { + out[k] = v + } + return out +} + +func sliceContains(in string, sl []string) bool { + for _, str := range sl { + if str == in { + return true + } + } + return false +} + +func init() { + inputs.Add("docker", func() inputs.Input { + return &Docker{} + }) +} diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go new file mode 100644 index 000000000..9b85d1029 --- /dev/null +++ b/plugins/inputs/docker/docker_test.go @@ -0,0 +1,190 @@ +package system + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + + "github.com/fsouza/go-dockerclient" +) + +func TestDockerGatherContainerStats(t *testing.T) { + var acc testutil.Accumulator + stats := testStats() + + tags := map[string]string{ + "cont_id": "foobarbaz", + "cont_name": "redis", + "cont_image": "redis/image", + } + gatherContainerStats(stats, &acc, tags) + + // test docker_net measurement + netfields := map[string]interface{}{ + "rx_dropped": uint64(1), + "rx_bytes": uint64(2), + "rx_errors": uint64(3), + "tx_packets": uint64(4), + "tx_dropped": uint64(1), + "rx_packets": uint64(2), + "tx_errors": uint64(3), + "tx_bytes": uint64(4), + } + nettags := copyTags(tags) + nettags["network"] = "eth0" + acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags) + + // test docker_blkio measurement + blkiotags := copyTags(tags) + blkiotags["device"] = "6:0" + blkiofields := map[string]interface{}{ + "io_service_bytes_recursive_read": uint64(100), + "io_serviced_recursive_write": uint64(101), + } + acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags) + + // test docker_mem measurement + memfields := map[string]interface{}{ + "max_usage": uint64(1001), + "usage": uint64(1111), + "fail_count": uint64(1), + "limit": uint64(20), + "total_pgmafault": uint64(0), + "cache": uint64(0), + "mapped_file": uint64(0), + "total_inactive_file": uint64(0), + "pgpgout": uint64(0), + "rss": uint64(0), + "total_mapped_file": uint64(0), + "writeback": uint64(0), + "unevictable": uint64(0), + "pgpgin": uint64(0), + "total_unevictable": uint64(0), + "pgmajfault": uint64(0), + "total_rss": uint64(44), + "total_rss_huge": uint64(444), + "total_writeback": uint64(55), + "total_inactive_anon": uint64(0), + "rss_huge": uint64(0), + "hierarchical_memory_limit": uint64(0), + "total_pgfault": uint64(0), + "total_active_file": uint64(0), + "active_anon": uint64(0), + "total_active_anon": uint64(0), + "total_pgpgout": uint64(0), + "total_cache": uint64(0), + "inactive_anon": uint64(0), + "active_file": uint64(1), + "pgfault": uint64(2), + "inactive_file": uint64(3), + "total_pgpgin": uint64(4), + } + acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags) + + // test docker_cpu measurement + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + cpufields := map[string]interface{}{ + "usage_total": uint64(500), + "usage_in_usermode": uint64(100), + "usage_in_kernelmode": uint64(200), + "usage_system": uint64(100), + "throttling_periods": uint64(1), + "throttling_throttled_periods": uint64(0), + "throttling_throttled_time": uint64(0), + } + acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags) + + cputags["cpu"] = "cpu0" + cpu0fields := map[string]interface{}{ + "usage_total": uint64(1), + } + acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags) + + cputags["cpu"] = "cpu1" + cpu1fields := map[string]interface{}{ + "usage_total": uint64(1002), + } + acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags) +} + +func testStats() *docker.Stats { + stats := &docker.Stats{ + Read: time.Now(), + Networks: make(map[string]docker.NetworkStats), + } + + stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002} + stats.CPUStats.CPUUsage.UsageInUsermode = 100 + stats.CPUStats.CPUUsage.TotalUsage = 500 + stats.CPUStats.CPUUsage.UsageInKernelmode = 200 + stats.CPUStats.SystemCPUUsage = 100 + stats.CPUStats.ThrottlingData.Periods = 1 + + stats.MemoryStats.Stats.TotalPgmafault = 0 + stats.MemoryStats.Stats.Cache = 0 + stats.MemoryStats.Stats.MappedFile = 0 + stats.MemoryStats.Stats.TotalInactiveFile = 0 + stats.MemoryStats.Stats.Pgpgout = 0 + stats.MemoryStats.Stats.Rss = 0 + stats.MemoryStats.Stats.TotalMappedFile = 0 + stats.MemoryStats.Stats.Writeback = 0 + stats.MemoryStats.Stats.Unevictable = 0 + stats.MemoryStats.Stats.Pgpgin = 0 + stats.MemoryStats.Stats.TotalUnevictable = 0 + stats.MemoryStats.Stats.Pgmajfault = 0 + stats.MemoryStats.Stats.TotalRss = 44 + stats.MemoryStats.Stats.TotalRssHuge = 444 + stats.MemoryStats.Stats.TotalWriteback = 55 + stats.MemoryStats.Stats.TotalInactiveAnon = 0 + stats.MemoryStats.Stats.RssHuge = 0 + stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0 + stats.MemoryStats.Stats.TotalPgfault = 0 + stats.MemoryStats.Stats.TotalActiveFile = 0 + stats.MemoryStats.Stats.ActiveAnon = 0 + stats.MemoryStats.Stats.TotalActiveAnon = 0 + stats.MemoryStats.Stats.TotalPgpgout = 0 + stats.MemoryStats.Stats.TotalCache = 0 + stats.MemoryStats.Stats.InactiveAnon = 0 + stats.MemoryStats.Stats.ActiveFile = 1 + stats.MemoryStats.Stats.Pgfault = 2 + stats.MemoryStats.Stats.InactiveFile = 3 + stats.MemoryStats.Stats.TotalPgpgin = 4 + + stats.MemoryStats.MaxUsage = 1001 + stats.MemoryStats.Usage = 1111 + stats.MemoryStats.Failcnt = 1 + stats.MemoryStats.Limit = 20 + + stats.Networks["eth0"] = docker.NetworkStats{ + RxDropped: 1, + RxBytes: 2, + RxErrors: 3, + TxPackets: 4, + TxDropped: 1, + RxPackets: 2, + TxErrors: 3, + TxBytes: 4, + } + + sbr := docker.BlkioStatsEntry{ + Major: 6, + Minor: 0, + Op: "read", + Value: 100, + } + sr := docker.BlkioStatsEntry{ + Major: 6, + Minor: 0, + Op: "write", + Value: 101, + } + + stats.BlkioStats.IOServiceBytesRecursive = append( + stats.BlkioStats.IOServiceBytesRecursive, sbr) + stats.BlkioStats.IOServicedRecursive = append( + stats.BlkioStats.IOServicedRecursive, sr) + + return stats +} diff --git a/plugins/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md similarity index 98% rename from plugins/elasticsearch/README.md rename to plugins/inputs/elasticsearch/README.md index dbc9a3587..03acad034 100644 --- a/plugins/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -31,8 +31,9 @@ contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`, `initializing_shards`, `unassigned_shards` fields - elasticsearch_cluster_health -contains `status`, `number_of_shards`, `number_of_replicas`, `active_primary_shards`, -`active_shards`, `relocating_shards`, `initializing_shards`, `unassigned_shards` fields +contains `status`, `number_of_shards`, `number_of_replicas`, +`active_primary_shards`, `active_shards`, `relocating_shards`, +`initializing_shards`, `unassigned_shards` fields - elasticsearch_indices #### node measurements: @@ -316,4 +317,4 @@ Transport statistics about sent and received bytes in cluster communication meas - elasticsearch_transport_rx_count value=6 - elasticsearch_transport_rx_size_in_bytes value=1380 - elasticsearch_transport_tx_count value=6 -- elasticsearch_transport_tx_size_in_bytes value=1380 \ No newline at end of file +- elasticsearch_transport_tx_size_in_bytes value=1380 diff --git a/plugins/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go similarity index 80% rename from plugins/elasticsearch/elasticsearch.go rename to plugins/inputs/elasticsearch/elasticsearch.go index bfe6f20bb..304e0e3d7 100644 --- a/plugins/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -2,11 +2,15 @@ package elasticsearch import ( "encoding/json" + "errors" "fmt" "net/http" + "strings" + "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) const statsPath = "/_nodes/stats" @@ -91,25 +95,45 @@ func (e *Elasticsearch) Description() string { // Gather reads the stats from Elasticsearch and writes it to the // Accumulator. -func (e *Elasticsearch) Gather(acc plugins.Accumulator) error { +func (e *Elasticsearch) Gather(acc inputs.Accumulator) error { + errChan := make(chan error, len(e.Servers)) + var wg sync.WaitGroup + wg.Add(len(e.Servers)) + for _, serv := range e.Servers { - var url string - if e.Local { - url = serv + statsPathLocal - } else { - url = serv + statsPath - } - if err := e.gatherNodeStats(url, acc); err != nil { - return err - } - if e.ClusterHealth { - e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", serv), acc) - } + go func(s string, acc inputs.Accumulator) { + defer wg.Done() + var url string + if e.Local { + url = s + statsPathLocal + } else { + url = s + statsPath + } + if err := e.gatherNodeStats(url, acc); err != nil { + errChan <- err + return + } + if e.ClusterHealth { + e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc) + } + }(serv, acc) } - return nil + + wg.Wait() + close(errChan) + // Get all errors and return them as one giant error + errStrings := []string{} + for err := range errChan { + errStrings = append(errStrings, err.Error()) + } + + if len(errStrings) == 0 { + return nil + } + return errors.New(strings.Join(errStrings, "\n")) } -func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) error { +func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error { nodeStats := &struct { ClusterName string `json:"cluster_name"` Nodes map[string]*node `json:"nodes"` @@ -141,16 +165,20 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) err "breakers": n.Breakers, } + now := time.Now() for p, s := range stats { - if err := e.parseInterface(acc, p, tags, s); err != nil { + f := internal.JSONFlattener{} + err := f.FlattenJSON("", s) + if err != nil { return err } + acc.AddFields("elasticsearch_"+p, f.Fields, tags, now) } } return nil } -func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) error { +func (e *Elasticsearch) gatherClusterStats(url string, acc inputs.Accumulator) error { clusterStats := &clusterHealth{} if err := e.gatherData(url, clusterStats); err != nil { return err @@ -168,7 +196,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) "unassigned_shards": clusterStats.UnassignedShards, } acc.AddFields( - "cluster_health", + "elasticsearch_cluster_health", clusterFields, map[string]string{"name": clusterStats.ClusterName}, measurementTime, @@ -186,7 +214,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) "unassigned_shards": health.UnassignedShards, } acc.AddFields( - "indices", + "elasticsearch_indices", indexFields, map[string]string{"index": name}, measurementTime, @@ -205,7 +233,8 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error { // NOTE: we are not going to read/discard r.Body under the assumption we'd prefer // to let the underlying transport close the connection and re-establish a new one for // future calls. - return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) + return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d", + r.StatusCode, http.StatusOK) } if err = json.NewDecoder(r.Body).Decode(v); err != nil { return err @@ -213,27 +242,8 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error { return nil } -func (e *Elasticsearch) parseInterface(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) error { - switch t := v.(type) { - case map[string]interface{}: - for k, v := range t { - if err := e.parseInterface(acc, prefix+"_"+k, tags, v); err != nil { - return err - } - } - case float64: - acc.Add(prefix, t, tags) - case bool, string, []interface{}: - // ignored types - return nil - default: - return fmt.Errorf("elasticsearch: got unexpected type %T with value %v (%s)", t, t, prefix) - } - return nil -} - func init() { - plugins.Add("elasticsearch", func() plugins.Plugin { + inputs.Add("elasticsearch", func() inputs.Input { return NewElasticsearch() }) } diff --git a/plugins/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go similarity index 55% rename from plugins/elasticsearch/elasticsearch_test.go rename to plugins/inputs/elasticsearch/elasticsearch_test.go index c697593e9..f94d3f9ac 100644 --- a/plugins/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) @@ -52,23 +52,15 @@ func TestElasticsearch(t *testing.T) { "node_host": "test", } - testTables := []map[string]float64{ - indicesExpected, - osExpected, - processExpected, - jvmExpected, - threadPoolExpected, - fsExpected, - transportExpected, - httpExpected, - breakersExpected, - } - - for _, testTable := range testTables { - for k, v := range testTable { - assert.NoError(t, acc.ValidateTaggedValue(k, v, tags)) - } - } + acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags) } func TestGatherClusterStats(t *testing.T) { @@ -80,29 +72,15 @@ func TestGatherClusterStats(t *testing.T) { var acc testutil.Accumulator require.NoError(t, es.Gather(&acc)) - var clusterHealthTests = []struct { - measurement string - fields map[string]interface{} - tags map[string]string - }{ - { - "cluster_health", - clusterHealthExpected, - map[string]string{"name": "elasticsearch_telegraf"}, - }, - { - "indices", - v1IndexExpected, - map[string]string{"index": "v1"}, - }, - { - "indices", - v2IndexExpected, - map[string]string{"index": "v2"}, - }, - } + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", + clusterHealthExpected, + map[string]string{"name": "elasticsearch_telegraf"}) - for _, exp := range clusterHealthTests { - assert.NoError(t, acc.ValidateTaggedFields(exp.measurement, exp.fields, exp.tags)) - } + acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + v1IndexExpected, + map[string]string{"index": "v1"}) + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + v2IndexExpected, + map[string]string{"index": "v2"}) } diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go new file mode 100644 index 000000000..bca1f9e45 --- /dev/null +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -0,0 +1,765 @@ +package elasticsearch + +const clusterResponse = ` +{ + "cluster_name": "elasticsearch_telegraf", + "status": "green", + "timed_out": false, + "number_of_nodes": 3, + "number_of_data_nodes": 3, + "active_primary_shards": 5, + "active_shards": 15, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 0, + "indices": { + "v1": { + "status": "green", + "number_of_shards": 10, + "number_of_replicas": 1, + "active_primary_shards": 10, + "active_shards": 20, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 0 + }, + "v2": { + "status": "red", + "number_of_shards": 10, + "number_of_replicas": 1, + "active_primary_shards": 0, + "active_shards": 0, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 20 + } + } +} +` + +var clusterHealthExpected = map[string]interface{}{ + "status": "green", + "timed_out": false, + "number_of_nodes": 3, + "number_of_data_nodes": 3, + "active_primary_shards": 5, + "active_shards": 15, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 0, +} + +var v1IndexExpected = map[string]interface{}{ + "status": "green", + "number_of_shards": 10, + "number_of_replicas": 1, + "active_primary_shards": 10, + "active_shards": 20, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 0, +} + +var v2IndexExpected = map[string]interface{}{ + "status": "red", + "number_of_shards": 10, + "number_of_replicas": 1, + "active_primary_shards": 0, + "active_shards": 0, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 20, +} + +const statsResponse = ` +{ + "cluster_name": "es-testcluster", + "nodes": { + "SDFsfSDFsdfFSDSDfSFDSDF": { + "timestamp": 1436365550135, + "name": "test.host.com", + "transport_address": "inet[/127.0.0.1:9300]", + "host": "test", + "ip": [ + "inet[/127.0.0.1:9300]", + "NONE" + ], + "attributes": { + "master": "true" + }, + "indices": { + "docs": { + "count": 29652, + "deleted": 5229 + }, + "store": { + "size_in_bytes": 37715234, + "throttle_time_in_millis": 215 + }, + "indexing": { + "index_total": 84790, + "index_time_in_millis": 29680, + "index_current": 0, + "delete_total": 13879, + "delete_time_in_millis": 1139, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 1, + "time_in_millis": 2, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 1, + "missing_time_in_millis": 2, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 1452, + "query_time_in_millis": 5695, + "query_current": 0, + "fetch_total": 414, + "fetch_time_in_millis": 146, + "fetch_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 133, + "total_time_in_millis": 21060, + "total_docs": 203672, + "total_size_in_bytes": 142900226 + }, + "refresh": { + "total": 1076, + "total_time_in_millis": 20078 + }, + "flush": { + "total": 115, + "total_time_in_millis": 2401 + }, + "warmer": { + "current": 0, + "total": 2319, + "total_time_in_millis": 448 + }, + "filter_cache": { + "memory_size_in_bytes": 7384, + "evictions": 0 + }, + "id_cache": { + "memory_size_in_bytes": 0 + }, + "fielddata": { + "memory_size_in_bytes": 12996, + "evictions": 0 + }, + "percolate": { + "total": 0, + "time_in_millis": 0, + "current": 0, + "memory_size_in_bytes": -1, + "memory_size": "-1b", + "queries": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 134, + "memory_in_bytes": 1285212, + "index_writer_memory_in_bytes": 0, + "index_writer_max_memory_in_bytes": 172368955, + "version_map_memory_in_bytes": 611844, + "fixed_bit_set_memory_in_bytes": 0 + }, + "translog": { + "operations": 17702, + "size_in_bytes": 17 + }, + "suggest": { + "total": 0, + "time_in_millis": 0, + "current": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "os": { + "timestamp": 1436460392944, + "load_average": [ + 0.01, + 0.04, + 0.05 + ], + "mem": { + "free_in_bytes": 477761536, + "used_in_bytes": 1621868544, + "free_percent": 74, + "used_percent": 25, + "actual_free_in_bytes": 1565470720, + "actual_used_in_bytes": 534159360 + }, + "swap": { + "used_in_bytes": 0, + "free_in_bytes": 487997440 + } + }, + "process": { + "timestamp": 1436460392945, + "open_file_descriptors": 160, + "cpu": { + "percent": 2, + "sys_in_millis": 1870, + "user_in_millis": 13610, + "total_in_millis": 15480 + }, + "mem": { + "total_virtual_in_bytes": 4747890688 + } + }, + "jvm": { + "timestamp": 1436460392945, + "uptime_in_millis": 202245, + "mem": { + "heap_used_in_bytes": 52709568, + "heap_used_percent": 5, + "heap_committed_in_bytes": 259522560, + "heap_max_in_bytes": 1038876672, + "non_heap_used_in_bytes": 39634576, + "non_heap_committed_in_bytes": 40841216, + "pools": { + "young": { + "used_in_bytes": 32685760, + "max_in_bytes": 279183360, + "peak_used_in_bytes": 71630848, + "peak_max_in_bytes": 279183360 + }, + "survivor": { + "used_in_bytes": 8912880, + "max_in_bytes": 34865152, + "peak_used_in_bytes": 8912888, + "peak_max_in_bytes": 34865152 + }, + "old": { + "used_in_bytes": 11110928, + "max_in_bytes": 724828160, + "peak_used_in_bytes": 14354608, + "peak_max_in_bytes": 724828160 + } + } + }, + "threads": { + "count": 44, + "peak_count": 45 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 2, + "collection_time_in_millis": 98 + }, + "old": { + "collection_count": 1, + "collection_time_in_millis": 24 + } + } + }, + "buffer_pools": { + "direct": { + "count": 40, + "used_in_bytes": 6304239, + "total_capacity_in_bytes": 6304239 + }, + "mapped": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + } + }, + "thread_pool": { + "percolate": { + "threads": 123, + "queue": 23, + "active": 13, + "rejected": 235, + "largest": 23, + "completed": 33 + }, + "fetch_shard_started": { + "threads": 3, + "queue": 1, + "active": 5, + "rejected": 6, + "largest": 4, + "completed": 54 + }, + "listener": { + "threads": 1, + "queue": 2, + "active": 4, + "rejected": 8, + "largest": 1, + "completed": 1 + }, + "index": { + "threads": 6, + "queue": 8, + "active": 4, + "rejected": 2, + "largest": 3, + "completed": 6 + }, + "refresh": { + "threads": 23, + "queue": 7, + "active": 3, + "rejected": 4, + "largest": 8, + "completed": 3 + }, + "suggest": { + "threads": 2, + "queue": 7, + "active": 2, + "rejected": 1, + "largest": 8, + "completed": 3 + }, + "generic": { + "threads": 1, + "queue": 4, + "active": 6, + "rejected": 3, + "largest": 2, + "completed": 27 + }, + "warmer": { + "threads": 2, + "queue": 7, + "active": 3, + "rejected": 2, + "largest": 3, + "completed": 1 + }, + "search": { + "threads": 5, + "queue": 7, + "active": 2, + "rejected": 7, + "largest": 2, + "completed": 4 + }, + "flush": { + "threads": 3, + "queue": 8, + "active": 0, + "rejected": 1, + "largest": 5, + "completed": 3 + }, + "optimize": { + "threads": 3, + "queue": 4, + "active": 1, + "rejected": 2, + "largest": 7, + "completed": 3 + }, + "fetch_shard_store": { + "threads": 1, + "queue": 7, + "active": 4, + "rejected": 2, + "largest": 4, + "completed": 1 + }, + "management": { + "threads": 2, + "queue": 3, + "active": 1, + "rejected": 6, + "largest": 2, + "completed": 22 + }, + "get": { + "threads": 1, + "queue": 8, + "active": 4, + "rejected": 3, + "largest": 2, + "completed": 1 + }, + "merge": { + "threads": 6, + "queue": 4, + "active": 5, + "rejected": 2, + "largest": 5, + "completed": 1 + }, + "bulk": { + "threads": 4, + "queue": 5, + "active": 7, + "rejected": 3, + "largest": 1, + "completed": 4 + }, + "snapshot": { + "threads": 8, + "queue": 5, + "active": 6, + "rejected": 2, + "largest": 1, + "completed": 0 + } + }, + "fs": { + "timestamp": 1436460392946, + "total": { + "total_in_bytes": 19507089408, + "free_in_bytes": 16909316096, + "available_in_bytes": 15894814720 + }, + "data": [ + { + "path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0", + "mount": "/usr/share/elasticsearch/data", + "type": "ext4", + "total_in_bytes": 19507089408, + "free_in_bytes": 16909316096, + "available_in_bytes": 15894814720 + } + ] + }, + "transport": { + "server_open": 13, + "rx_count": 6, + "rx_size_in_bytes": 1380, + "tx_count": 6, + "tx_size_in_bytes": 1380 + }, + "http": { + "current_open": 3, + "total_opened": 3 + }, + "breakers": { + "fielddata": { + "limit_size_in_bytes": 623326003, + "limit_size": "594.4mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.03, + "tripped": 0 + }, + "request": { + "limit_size_in_bytes": 415550668, + "limit_size": "396.2mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 727213670, + "limit_size": "693.5mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + } + } + } + } +} +` + +var indicesExpected = map[string]interface{}{ + "id_cache_memory_size_in_bytes": float64(0), + "completion_size_in_bytes": float64(0), + "suggest_total": float64(0), + "suggest_time_in_millis": float64(0), + "suggest_current": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_miss_count": float64(0), + "store_size_in_bytes": float64(37715234), + "store_throttle_time_in_millis": float64(215), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(133), + "merges_total_time_in_millis": float64(21060), + "merges_total_docs": float64(203672), + "merges_total_size_in_bytes": float64(142900226), + "merges_current": float64(0), + "filter_cache_memory_size_in_bytes": float64(7384), + "filter_cache_evictions": float64(0), + "indexing_index_total": float64(84790), + "indexing_index_time_in_millis": float64(29680), + "indexing_index_current": float64(0), + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "indexing_delete_total": float64(13879), + "indexing_delete_time_in_millis": float64(1139), + "indexing_delete_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_missing_total": float64(1), + "get_missing_time_in_millis": float64(2), + "get_current": float64(0), + "get_total": float64(1), + "get_time_in_millis": float64(2), + "get_exists_total": float64(0), + "refresh_total": float64(1076), + "refresh_total_time_in_millis": float64(20078), + "percolate_current": float64(0), + "percolate_memory_size_in_bytes": float64(-1), + "percolate_queries": float64(0), + "percolate_total": float64(0), + "percolate_time_in_millis": float64(0), + "translog_operations": float64(17702), + "translog_size_in_bytes": float64(17), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "docs_count": float64(29652), + "docs_deleted": float64(5229), + "flush_total_time_in_millis": float64(2401), + "flush_total": float64(115), + "fielddata_memory_size_in_bytes": float64(12996), + "fielddata_evictions": float64(0), + "search_fetch_current": float64(0), + "search_open_contexts": float64(0), + "search_query_total": float64(1452), + "search_query_time_in_millis": float64(5695), + "search_query_current": float64(0), + "search_fetch_total": float64(414), + "search_fetch_time_in_millis": float64(146), + "warmer_current": float64(0), + "warmer_total": float64(2319), + "warmer_total_time_in_millis": float64(448), + "segments_count": float64(134), + "segments_memory_in_bytes": float64(1285212), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_index_writer_max_memory_in_bytes": float64(172368955), + "segments_version_map_memory_in_bytes": float64(611844), + "segments_fixed_bit_set_memory_in_bytes": float64(0), +} + +var osExpected = map[string]interface{}{ + "load_average_0": float64(0.01), + "load_average_1": float64(0.04), + "load_average_2": float64(0.05), + "swap_used_in_bytes": float64(0), + "swap_free_in_bytes": float64(487997440), + "timestamp": float64(1436460392944), + "mem_free_percent": float64(74), + "mem_used_percent": float64(25), + "mem_actual_free_in_bytes": float64(1565470720), + "mem_actual_used_in_bytes": float64(534159360), + "mem_free_in_bytes": float64(477761536), + "mem_used_in_bytes": float64(1621868544), +} + +var processExpected = map[string]interface{}{ + "mem_total_virtual_in_bytes": float64(4747890688), + "timestamp": float64(1436460392945), + "open_file_descriptors": float64(160), + "cpu_total_in_millis": float64(15480), + "cpu_percent": float64(2), + "cpu_sys_in_millis": float64(1870), + "cpu_user_in_millis": float64(13610), +} + +var jvmExpected = map[string]interface{}{ + "timestamp": float64(1436460392945), + "uptime_in_millis": float64(202245), + "mem_non_heap_used_in_bytes": float64(39634576), + "mem_non_heap_committed_in_bytes": float64(40841216), + "mem_pools_young_max_in_bytes": float64(279183360), + "mem_pools_young_peak_used_in_bytes": float64(71630848), + "mem_pools_young_peak_max_in_bytes": float64(279183360), + "mem_pools_young_used_in_bytes": float64(32685760), + "mem_pools_survivor_peak_used_in_bytes": float64(8912888), + "mem_pools_survivor_peak_max_in_bytes": float64(34865152), + "mem_pools_survivor_used_in_bytes": float64(8912880), + "mem_pools_survivor_max_in_bytes": float64(34865152), + "mem_pools_old_peak_max_in_bytes": float64(724828160), + "mem_pools_old_used_in_bytes": float64(11110928), + "mem_pools_old_max_in_bytes": float64(724828160), + "mem_pools_old_peak_used_in_bytes": float64(14354608), + "mem_heap_used_in_bytes": float64(52709568), + "mem_heap_used_percent": float64(5), + "mem_heap_committed_in_bytes": float64(259522560), + "mem_heap_max_in_bytes": float64(1038876672), + "threads_peak_count": float64(45), + "threads_count": float64(44), + "gc_collectors_young_collection_count": float64(2), + "gc_collectors_young_collection_time_in_millis": float64(98), + "gc_collectors_old_collection_count": float64(1), + "gc_collectors_old_collection_time_in_millis": float64(24), + "buffer_pools_direct_count": float64(40), + "buffer_pools_direct_used_in_bytes": float64(6304239), + "buffer_pools_direct_total_capacity_in_bytes": float64(6304239), + "buffer_pools_mapped_count": float64(0), + "buffer_pools_mapped_used_in_bytes": float64(0), + "buffer_pools_mapped_total_capacity_in_bytes": float64(0), +} + +var threadPoolExpected = map[string]interface{}{ + "merge_threads": float64(6), + "merge_queue": float64(4), + "merge_active": float64(5), + "merge_rejected": float64(2), + "merge_largest": float64(5), + "merge_completed": float64(1), + "bulk_threads": float64(4), + "bulk_queue": float64(5), + "bulk_active": float64(7), + "bulk_rejected": float64(3), + "bulk_largest": float64(1), + "bulk_completed": float64(4), + "warmer_threads": float64(2), + "warmer_queue": float64(7), + "warmer_active": float64(3), + "warmer_rejected": float64(2), + "warmer_largest": float64(3), + "warmer_completed": float64(1), + "get_largest": float64(2), + "get_completed": float64(1), + "get_threads": float64(1), + "get_queue": float64(8), + "get_active": float64(4), + "get_rejected": float64(3), + "index_threads": float64(6), + "index_queue": float64(8), + "index_active": float64(4), + "index_rejected": float64(2), + "index_largest": float64(3), + "index_completed": float64(6), + "suggest_threads": float64(2), + "suggest_queue": float64(7), + "suggest_active": float64(2), + "suggest_rejected": float64(1), + "suggest_largest": float64(8), + "suggest_completed": float64(3), + "fetch_shard_store_queue": float64(7), + "fetch_shard_store_active": float64(4), + "fetch_shard_store_rejected": float64(2), + "fetch_shard_store_largest": float64(4), + "fetch_shard_store_completed": float64(1), + "fetch_shard_store_threads": float64(1), + "management_threads": float64(2), + "management_queue": float64(3), + "management_active": float64(1), + "management_rejected": float64(6), + "management_largest": float64(2), + "management_completed": float64(22), + "percolate_queue": float64(23), + "percolate_active": float64(13), + "percolate_rejected": float64(235), + "percolate_largest": float64(23), + "percolate_completed": float64(33), + "percolate_threads": float64(123), + "listener_active": float64(4), + "listener_rejected": float64(8), + "listener_largest": float64(1), + "listener_completed": float64(1), + "listener_threads": float64(1), + "listener_queue": float64(2), + "search_rejected": float64(7), + "search_largest": float64(2), + "search_completed": float64(4), + "search_threads": float64(5), + "search_queue": float64(7), + "search_active": float64(2), + "fetch_shard_started_threads": float64(3), + "fetch_shard_started_queue": float64(1), + "fetch_shard_started_active": float64(5), + "fetch_shard_started_rejected": float64(6), + "fetch_shard_started_largest": float64(4), + "fetch_shard_started_completed": float64(54), + "refresh_rejected": float64(4), + "refresh_largest": float64(8), + "refresh_completed": float64(3), + "refresh_threads": float64(23), + "refresh_queue": float64(7), + "refresh_active": float64(3), + "optimize_threads": float64(3), + "optimize_queue": float64(4), + "optimize_active": float64(1), + "optimize_rejected": float64(2), + "optimize_largest": float64(7), + "optimize_completed": float64(3), + "snapshot_largest": float64(1), + "snapshot_completed": float64(0), + "snapshot_threads": float64(8), + "snapshot_queue": float64(5), + "snapshot_active": float64(6), + "snapshot_rejected": float64(2), + "generic_threads": float64(1), + "generic_queue": float64(4), + "generic_active": float64(6), + "generic_rejected": float64(3), + "generic_largest": float64(2), + "generic_completed": float64(27), + "flush_threads": float64(3), + "flush_queue": float64(8), + "flush_active": float64(0), + "flush_rejected": float64(1), + "flush_largest": float64(5), + "flush_completed": float64(3), +} + +var fsExpected = map[string]interface{}{ + "data_0_total_in_bytes": float64(19507089408), + "data_0_free_in_bytes": float64(16909316096), + "data_0_available_in_bytes": float64(15894814720), + "timestamp": float64(1436460392946), + "total_free_in_bytes": float64(16909316096), + "total_available_in_bytes": float64(15894814720), + "total_total_in_bytes": float64(19507089408), +} + +var transportExpected = map[string]interface{}{ + "server_open": float64(13), + "rx_count": float64(6), + "rx_size_in_bytes": float64(1380), + "tx_count": float64(6), + "tx_size_in_bytes": float64(1380), +} + +var httpExpected = map[string]interface{}{ + "current_open": float64(3), + "total_opened": float64(3), +} + +var breakersExpected = map[string]interface{}{ + "fielddata_estimated_size_in_bytes": float64(0), + "fielddata_overhead": float64(1.03), + "fielddata_tripped": float64(0), + "fielddata_limit_size_in_bytes": float64(623326003), + "request_estimated_size_in_bytes": float64(0), + "request_overhead": float64(1.0), + "request_tripped": float64(0), + "request_limit_size_in_bytes": float64(415550668), + "parent_overhead": float64(1.0), + "parent_tripped": float64(0), + "parent_limit_size_in_bytes": float64(727213670), + "parent_estimated_size_in_bytes": float64(0), +} diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md new file mode 100644 index 000000000..bd78f0b3c --- /dev/null +++ b/plugins/inputs/exec/README.md @@ -0,0 +1,45 @@ +# Exec Plugin + +The exec plugin can execute arbitrary commands which output JSON. Then it flattens JSON and finds +all numeric values, treating them as floats. + +For example, if you have a json-returning command called mycollector, you could +setup the exec plugin with: + +``` +[[inputs.exec]] + command = "/usr/bin/mycollector --output=json" + name_suffix = "_mycollector" + interval = "10s" +``` + +The name suffix is appended to exec as "exec_name_suffix" to identify the input stream. + +The interval is used to determine how often a particular command should be run. Each +time the exec plugin runs, it will only run a particular command if it has been at least +`interval` seconds since the exec plugin last ran the command. + + +# Sample + +Let's say that we have a command with the name_suffix "_mycollector", which gives the following output: +```json +{ + "a": 0.5, + "b": { + "c": 0.1, + "d": 5 + } +} +``` + +The collected metrics will be stored as field values under the same measurement "exec_mycollector": +``` + exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567 +``` + +Other options for modifying the measurement names are: +``` +name_override = "newname" +name_prefix = "prefix_" +``` diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go new file mode 100644 index 000000000..603ba1464 --- /dev/null +++ b/plugins/inputs/exec/exec.go @@ -0,0 +1,91 @@ +package exec + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + + "github.com/gonuts/go-shellquote" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + # the command to run + command = "/usr/bin/mycollector --foo=bar" + + # measurement name suffix (for separating different commands) + name_suffix = "_mycollector" +` + +type Exec struct { + Command string + + runner Runner +} + +type Runner interface { + Run(*Exec) ([]byte, error) +} + +type CommandRunner struct{} + +func (c CommandRunner) Run(e *Exec) ([]byte, error) { + split_cmd, err := shellquote.Split(e.Command) + if err != nil || len(split_cmd) == 0 { + return nil, fmt.Errorf("exec: unable to parse command, %s", err) + } + + cmd := exec.Command(split_cmd[0], split_cmd[1:]...) + var out bytes.Buffer + cmd.Stdout = &out + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("exec: %s for command '%s'", err, e.Command) + } + + return out.Bytes(), nil +} + +func NewExec() *Exec { + return &Exec{runner: CommandRunner{}} +} + +func (e *Exec) SampleConfig() string { + return sampleConfig +} + +func (e *Exec) Description() string { + return "Read flattened metrics from one or more commands that output JSON to stdout" +} + +func (e *Exec) Gather(acc inputs.Accumulator) error { + out, err := e.runner.Run(e) + if err != nil { + return err + } + + var jsonOut interface{} + err = json.Unmarshal(out, &jsonOut) + if err != nil { + return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", + e.Command, err) + } + + f := internal.JSONFlattener{} + err = f.FlattenJSON("", jsonOut) + if err != nil { + return err + } + + acc.AddFields("exec", f.Fields, nil) + return nil +} + +func init() { + inputs.Add("exec", func() inputs.Input { + return NewExec() + }) +} diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go new file mode 100644 index 000000000..8bf47c1d0 --- /dev/null +++ b/plugins/inputs/exec/exec_test.go @@ -0,0 +1,99 @@ +package exec + +import ( + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Midnight 9/22/2015 +const baseTimeSeconds = 1442905200 + +const validJson = ` +{ + "status": "green", + "num_processes": 82, + "cpu": { + "status": "red", + "nil_status": null, + "used": 8234, + "free": 32 + }, + "percent": 0.81, + "users": [0, 1, 2, 3] +}` + +const malformedJson = ` +{ + "status": "green", +` + +type runnerMock struct { + out []byte + err error +} + +func newRunnerMock(out []byte, err error) Runner { + return &runnerMock{ + out: out, + err: err, + } +} + +func (r runnerMock) Run(e *Exec) ([]byte, error) { + if r.err != nil { + return nil, r.err + } + return r.out, nil +} + +func TestExec(t *testing.T) { + e := &Exec{ + runner: newRunnerMock([]byte(validJson), nil), + Command: "testcommand arg1", + } + + var acc testutil.Accumulator + err := e.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") + + fields := map[string]interface{}{ + "num_processes": float64(82), + "cpu_used": float64(8234), + "cpu_free": float64(32), + "percent": float64(0.81), + "users_0": float64(0), + "users_1": float64(1), + "users_2": float64(2), + "users_3": float64(3), + } + acc.AssertContainsFields(t, "exec", fields) +} + +func TestExecMalformed(t *testing.T) { + e := &Exec{ + runner: newRunnerMock([]byte(malformedJson), nil), + Command: "badcommand arg1", + } + + var acc testutil.Accumulator + err := e.Gather(&acc) + require.Error(t, err) + assert.Equal(t, acc.NFields(), 0, "No new points should have been added") +} + +func TestCommandError(t *testing.T) { + e := &Exec{ + runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), + Command: "badcommand", + } + + var acc testutil.Accumulator + err := e.Gather(&acc) + require.Error(t, err) + assert.Equal(t, acc.NFields(), 0, "No new points should have been added") +} diff --git a/plugins/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go similarity index 85% rename from plugins/haproxy/haproxy.go rename to plugins/inputs/haproxy/haproxy.go index e5ce6e404..c2e334424 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -3,12 +3,13 @@ package haproxy import ( "encoding/csv" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "io" "net/http" "net/url" "strconv" "sync" + "time" ) //CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1 @@ -90,7 +91,7 @@ var sampleConfig = ` # If no servers are specified, then default to 127.0.0.1:1936 servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] # Or you can also use local socket(not work yet) - # servers = ["socket:/run/haproxy/admin.sock"] + # servers = ["socket://run/haproxy/admin.sock"] ` func (r *haproxy) SampleConfig() string { @@ -103,7 +104,7 @@ func (r *haproxy) Description() string { // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *haproxy) Gather(acc plugins.Accumulator) error { +func (g *haproxy) Gather(acc inputs.Accumulator) error { if len(g.Servers) == 0 { return g.gatherServer("http://127.0.0.1:1936", acc) } @@ -125,7 +126,7 @@ func (g *haproxy) Gather(acc plugins.Accumulator) error { return outerr } -func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error { +func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error { if g.client == nil { client := &http.Client{} @@ -152,214 +153,212 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error { return fmt.Errorf("Unable to get valid stat result from '%s': %s", addr, err) } - importCsvResult(res.Body, acc, u.Host) - - return nil + return importCsvResult(res.Body, acc, u.Host) } -func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) ([][]string, error) { +func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error { csv := csv.NewReader(r) result, err := csv.ReadAll() + now := time.Now() for _, row := range result { - + fields := make(map[string]interface{}) + tags := map[string]string{ + "server": host, + "proxy": row[HF_PXNAME], + "sv": row[HF_SVNAME], + } for field, v := range row { - tags := map[string]string{ - "server": host, - "proxy": row[HF_PXNAME], - "sv": row[HF_SVNAME], - } switch field { case HF_QCUR: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("qcur", ival, tags) + fields["qcur"] = ival } case HF_QMAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("qmax", ival, tags) + fields["qmax"] = ival } case HF_SCUR: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("scur", ival, tags) + fields["scur"] = ival } case HF_SMAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("smax", ival, tags) + fields["smax"] = ival } case HF_STOT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("stot", ival, tags) + fields["stot"] = ival } case HF_BIN: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("bin", ival, tags) + fields["bin"] = ival } case HF_BOUT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("bout", ival, tags) + fields["bout"] = ival } case HF_DREQ: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("dreq", ival, tags) + fields["dreq"] = ival } case HF_DRESP: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("dresp", ival, tags) + fields["dresp"] = ival } case HF_EREQ: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("ereq", ival, tags) + fields["ereq"] = ival } case HF_ECON: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("econ", ival, tags) + fields["econ"] = ival } case HF_ERESP: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("eresp", ival, tags) + fields["eresp"] = ival } case HF_WRETR: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("wretr", ival, tags) + fields["wretr"] = ival } case HF_WREDIS: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("wredis", ival, tags) + fields["wredis"] = ival } case HF_ACT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("active_servers", ival, tags) + fields["active_servers"] = ival } case HF_BCK: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("backup_servers", ival, tags) + fields["backup_servers"] = ival } case HF_DOWNTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("downtime", ival, tags) + fields["downtime"] = ival } case HF_THROTTLE: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("throttle", ival, tags) + fields["throttle"] = ival } case HF_LBTOT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("lbtot", ival, tags) + fields["lbtot"] = ival } case HF_RATE: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("rate", ival, tags) + fields["rate"] = ival } case HF_RATE_MAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("rate_max", ival, tags) + fields["rate_max"] = ival } case HF_CHECK_DURATION: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("check_duration", ival, tags) + fields["check_duration"] = ival } case HF_HRSP_1xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.1xx", ival, tags) + fields["http_response.1xx"] = ival } case HF_HRSP_2xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.2xx", ival, tags) + fields["http_response.2xx"] = ival } case HF_HRSP_3xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.3xx", ival, tags) + fields["http_response.3xx"] = ival } case HF_HRSP_4xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.4xx", ival, tags) + fields["http_response.4xx"] = ival } case HF_HRSP_5xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.5xx", ival, tags) + fields["http_response.5xx"] = ival } case HF_REQ_RATE: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("req_rate", ival, tags) + fields["req_rate"] = ival } case HF_REQ_RATE_MAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("req_rate_max", ival, tags) + fields["req_rate_max"] = ival } case HF_REQ_TOT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("req_tot", ival, tags) + fields["req_tot"] = ival } case HF_CLI_ABRT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("cli_abort", ival, tags) + fields["cli_abort"] = ival } case HF_SRV_ABRT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("srv_abort", ival, tags) + fields["srv_abort"] = ival } case HF_QTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("qtime", ival, tags) + fields["qtime"] = ival } case HF_CTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("ctime", ival, tags) + fields["ctime"] = ival } case HF_RTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("rtime", ival, tags) + fields["rtime"] = ival } case HF_TTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("ttime", ival, tags) + fields["ttime"] = ival } - } - } + acc.AddFields("haproxy", fields, tags, now) } - return result, err + return err } func init() { - plugins.Add("haproxy", func() plugins.Plugin { + inputs.Add("haproxy", func() inputs.Input { return &haproxy{} }) } diff --git a/plugins/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go similarity index 68% rename from plugins/haproxy/haproxy_test.go rename to plugins/inputs/haproxy/haproxy_test.go index 6f07d34d1..7b86f2b50 100644 --- a/plugins/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "net/http" @@ -47,52 +47,39 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { "sv": "host0", } - assert.NoError(t, acc.ValidateTaggedValue("stot", uint64(171014), tags)) - - checkInt := []struct { - name string - value uint64 - }{ - - {"qmax", 81}, - {"scur", 288}, - {"smax", 713}, - {"bin", 5557055817}, - {"bout", 24096715169}, - {"dreq", 1102}, - {"dresp", 80}, - {"ereq", 95740}, - {"econ", 0}, - {"eresp", 0}, - {"wretr", 17}, - {"wredis", 19}, - {"active_servers", 1}, - {"backup_servers", 0}, - {"downtime", 0}, - {"throttle", 13}, - {"lbtot", 114}, - {"rate", 18}, - {"rate_max", 102}, - {"check_duration", 1}, - {"http_response.1xx", 0}, - {"http_response.2xx", 1314093}, - {"http_response.3xx", 537036}, - {"http_response.4xx", 123452}, - {"http_response.5xx", 11966}, - {"req_rate", 35}, - {"req_rate_max", 140}, - {"req_tot", 1987928}, - {"cli_abort", 0}, - {"srv_abort", 0}, - {"qtime", 0}, - {"ctime", 2}, - {"rtime", 23}, - {"ttime", 545}, - } - - for _, c := range checkInt { - assert.Equal(t, true, acc.CheckValue(c.name, c.value)) + fields := map[string]interface{}{ + "active_servers": uint64(1), + "backup_servers": uint64(0), + "bin": uint64(510913516), + "bout": uint64(2193856571), + "check_duration": uint64(10), + "cli_abort": uint64(73), + "ctime": uint64(2), + "downtime": uint64(0), + "dresp": uint64(0), + "econ": uint64(0), + "eresp": uint64(1), + "http_response.1xx": uint64(0), + "http_response.2xx": uint64(119534), + "http_response.3xx": uint64(48051), + "http_response.4xx": uint64(2345), + "http_response.5xx": uint64(1056), + "lbtot": uint64(171013), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(0), + "rate": uint64(3), + "rate_max": uint64(12), + "rtime": uint64(312), + "scur": uint64(1), + "smax": uint64(32), + "srv_abort": uint64(1), + "stot": uint64(171014), + "ttime": uint64(2341), + "wredis": uint64(0), + "wretr": uint64(1), } + acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) //Here, we should get error because we don't pass authentication data r = &haproxy{ @@ -124,10 +111,39 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { "sv": "host0", } - assert.NoError(t, acc.ValidateTaggedValue("stot", uint64(171014), tags)) - assert.NoError(t, acc.ValidateTaggedValue("scur", uint64(1), tags)) - assert.NoError(t, acc.ValidateTaggedValue("rate", uint64(3), tags)) - assert.Equal(t, true, acc.CheckValue("bin", uint64(5557055817))) + fields := map[string]interface{}{ + "active_servers": uint64(1), + "backup_servers": uint64(0), + "bin": uint64(510913516), + "bout": uint64(2193856571), + "check_duration": uint64(10), + "cli_abort": uint64(73), + "ctime": uint64(2), + "downtime": uint64(0), + "dresp": uint64(0), + "econ": uint64(0), + "eresp": uint64(1), + "http_response.1xx": uint64(0), + "http_response.2xx": uint64(119534), + "http_response.3xx": uint64(48051), + "http_response.4xx": uint64(2345), + "http_response.5xx": uint64(1056), + "lbtot": uint64(171013), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(0), + "rate": uint64(3), + "rate_max": uint64(12), + "rtime": uint64(312), + "scur": uint64(1), + "smax": uint64(32), + "srv_abort": uint64(1), + "stot": uint64(171014), + "ttime": uint64(2341), + "wredis": uint64(0), + "wretr": uint64(1), + } + acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) } //When not passing server config, we default to localhost diff --git a/plugins/httpjson/README.md b/plugins/inputs/httpjson/README.md similarity index 100% rename from plugins/httpjson/README.md rename to plugins/inputs/httpjson/README.md diff --git a/plugins/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go similarity index 54% rename from plugins/httpjson/httpjson.go rename to plugins/inputs/httpjson/httpjson.go index f1d2ef927..b90a02e5b 100644 --- a/plugins/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -9,21 +9,19 @@ import ( "net/url" "strings" "sync" + "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) type HttpJson struct { - Services []Service - client HTTPClient -} - -type Service struct { Name string Servers []string Method string TagKeys []string Parameters map[string]string + client HTTPClient } type HTTPClient interface { @@ -47,31 +45,28 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { } var sampleConfig = ` - # Specify services via an array of tables - [[plugins.httpjson.services]] + # a name for the service being polled + name = "webserver_stats" - # a name for the service being polled - name = "webserver_stats" + # URL of each server in the service's cluster + servers = [ + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", + ] - # URL of each server in the service's cluster - servers = [ - "http://localhost:9999/stats/", - "http://localhost:9998/stats/", - ] + # HTTP method to use (case-sensitive) + method = "GET" - # HTTP method to use (case-sensitive) - method = "GET" + # List of tag names to extract from top-level of JSON server response + # tag_keys = [ + # "my_tag_1", + # "my_tag_2" + # ] - # List of tag names to extract from top-level of JSON server response - # tag_keys = [ - # "my_tag_1", - # "my_tag_2" - # ] - - # HTTP parameters (all values must be strings) - [plugins.httpjson.services.parameters] - event_type = "cpu_spike" - threshold = "0.75" + # HTTP parameters (all values must be strings) + [inputs.httpjson.parameters] + event_type = "cpu_spike" + threshold = "0.75" ` func (h *HttpJson) SampleConfig() string { @@ -83,25 +78,19 @@ func (h *HttpJson) Description() string { } // Gathers data for all servers. -func (h *HttpJson) Gather(acc plugins.Accumulator) error { +func (h *HttpJson) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup - totalServers := 0 - for _, service := range h.Services { - totalServers += len(service.Servers) - } - errorChannel := make(chan error, totalServers) + errorChannel := make(chan error, len(h.Servers)) - for _, service := range h.Services { - for _, server := range service.Servers { - wg.Add(1) - go func(service Service, server string) { - defer wg.Done() - if err := h.gatherServer(acc, service, server); err != nil { - errorChannel <- err - } - }(service, server) - } + for _, server := range h.Servers { + wg.Add(1) + go func(server string) { + defer wg.Done() + if err := h.gatherServer(acc, server); err != nil { + errorChannel <- err + } + }(server) } wg.Wait() @@ -128,11 +117,11 @@ func (h *HttpJson) Gather(acc plugins.Accumulator) error { // Returns: // error: Any error that may have occurred func (h *HttpJson) gatherServer( - acc plugins.Accumulator, - service Service, + acc inputs.Accumulator, serverURL string, ) error { - resp, err := h.sendRequest(service, serverURL) + resp, responseTime, err := h.sendRequest(serverURL) + if err != nil { return err } @@ -146,7 +135,7 @@ func (h *HttpJson) gatherServer( "server": serverURL, } - for _, tag := range service.TagKeys { + for _, tag := range h.TagKeys { switch v := jsonOut[tag].(type) { case string: tags[tag] = v @@ -154,7 +143,22 @@ func (h *HttpJson) gatherServer( delete(jsonOut, tag) } - processResponse(acc, service.Name, tags, jsonOut) + if responseTime >= 0 { + jsonOut["response_time"] = responseTime + } + f := internal.JSONFlattener{} + err = f.FlattenJSON("", jsonOut) + if err != nil { + return err + } + + var msrmnt_name string + if h.Name == "" { + msrmnt_name = "httpjson" + } else { + msrmnt_name = "httpjson_" + h.Name + } + acc.AddFields(msrmnt_name, f.Fields, tags) return nil } @@ -165,34 +169,37 @@ func (h *HttpJson) gatherServer( // Returns: // string: body of the response // error : Any error that may have occurred -func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error) { +func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { - return "", fmt.Errorf("Invalid server URL \"%s\"", serverURL) + return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) } params := url.Values{} - for k, v := range service.Parameters { + for k, v := range h.Parameters { params.Add(k, v) } requestURL.RawQuery = params.Encode() // Create + send request - req, err := http.NewRequest(service.Method, requestURL.String(), nil) + req, err := http.NewRequest(h.Method, requestURL.String(), nil) if err != nil { - return "", err + return "", -1, err } + start := time.Now() resp, err := h.client.MakeRequest(req) if err != nil { - return "", err + return "", -1, err } defer resp.Body.Close() + responseTime := time.Since(start).Seconds() + body, err := ioutil.ReadAll(resp.Body) if err != nil { - return string(body), err + return string(body), responseTime, err } // Process response @@ -203,31 +210,14 @@ func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) - return string(body), err + return string(body), responseTime, err } - return string(body), err -} - -// Flattens the map generated from the JSON object and stores its float values using a -// plugins.Accumulator. It ignores any non-float values. -// Parameters: -// acc: the Accumulator to use -// prefix: What the name of the measurement name should be prefixed by. -// tags: telegraf tags to -func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) { - switch t := v.(type) { - case map[string]interface{}: - for k, v := range t { - processResponse(acc, prefix+"_"+k, tags, v) - } - case float64: - acc.Add(prefix, v, tags) - } + return string(body), responseTime, err } func init() { - plugins.Add("httpjson", func() plugins.Plugin { + inputs.Add("httpjson", func() inputs.Input { return &HttpJson{client: RealHTTPClient{client: &http.Client{}}} }) } diff --git a/plugins/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go similarity index 52% rename from plugins/httpjson/httpjson_test.go rename to plugins/inputs/httpjson/httpjson_test.go index 8f9bfe3ac..0ea5e9e42 100644 --- a/plugins/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -1,13 +1,12 @@ package httpjson import ( - "fmt" "io/ioutil" "net/http" "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,17 +14,17 @@ import ( const validJSON = ` { "parent": { - "child": 3, + "child": 3.0, "ignored_child": "hi" }, "ignored_null": null, "integer": 4, - "ignored_list": [3, 4], + "list": [3, 4], "ignored_parent": { - "another_ignored_list": [4], "another_ignored_null": null, "ignored_string": "hello, world!" - } + }, + "another_list": [4] }` const validJSONTags = ` @@ -35,6 +34,14 @@ const validJSONTags = ` "build": "123" }` +var expectedFields = map[string]interface{}{ + "parent_child": float64(3), + "list_0": float64(3), + "list_1": float64(4), + "another_list_0": float64(4), + "integer": float64(4), +} + const invalidJSON = "I don't think this is JSON" const empty = "" @@ -76,37 +83,36 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { // // Returns: // *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client -func genMockHttpJson(response string, statusCode int) *HttpJson { - return &HttpJson{ - client: mockHTTPClient{responseBody: response, statusCode: statusCode}, - Services: []Service{ - Service{ - Servers: []string{ - "http://server1.example.com/metrics/", - "http://server2.example.com/metrics/", - }, - Name: "my_webapp", - Method: "GET", - Parameters: map[string]string{ - "httpParam1": "12", - "httpParam2": "the second parameter", - }, +func genMockHttpJson(response string, statusCode int) []*HttpJson { + return []*HttpJson{ + &HttpJson{ + client: mockHTTPClient{responseBody: response, statusCode: statusCode}, + Servers: []string{ + "http://server1.example.com/metrics/", + "http://server2.example.com/metrics/", }, - Service{ - Servers: []string{ - "http://server3.example.com/metrics/", - "http://server4.example.com/metrics/", - }, - Name: "other_webapp", - Method: "POST", - Parameters: map[string]string{ - "httpParam1": "12", - "httpParam2": "the second parameter", - }, - TagKeys: []string{ - "role", - "build", - }, + Name: "my_webapp", + Method: "GET", + Parameters: map[string]string{ + "httpParam1": "12", + "httpParam2": "the second parameter", + }, + }, + &HttpJson{ + client: mockHTTPClient{responseBody: response, statusCode: statusCode}, + Servers: []string{ + "http://server3.example.com/metrics/", + "http://server4.example.com/metrics/", + }, + Name: "other_webapp", + Method: "POST", + Parameters: map[string]string{ + "httpParam1": "12", + "httpParam2": "the second parameter", + }, + TagKeys: []string{ + "role", + "build", }, }, } @@ -116,28 +122,21 @@ func genMockHttpJson(response string, statusCode int) *HttpJson { func TestHttpJson200(t *testing.T) { httpjson := genMockHttpJson(validJSON, 200) - var acc testutil.Accumulator - err := httpjson.Gather(&acc) - require.NoError(t, err) + for _, service := range httpjson { + var acc testutil.Accumulator + err := service.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, 12, acc.NFields()) + // Set responsetime + for _, p := range acc.Points { + p.Fields["response_time"] = 1.0 + } - assert.Equal(t, 8, len(acc.Points)) - - for _, service := range httpjson.Services { for _, srv := range service.Servers { - require.NoError(t, - acc.ValidateTaggedValue( - fmt.Sprintf("%s_parent_child", service.Name), - 3.0, - map[string]string{"server": srv}, - ), - ) - require.NoError(t, - acc.ValidateTaggedValue( - fmt.Sprintf("%s_integer", service.Name), - 4.0, - map[string]string{"server": srv}, - ), - ) + tags := map[string]string{"server": srv} + mname := "httpjson_" + service.Name + expectedFields["response_time"] = 1.0 + acc.AssertContainsTaggedFields(t, mname, expectedFields, tags) } } } @@ -147,28 +146,22 @@ func TestHttpJson500(t *testing.T) { httpjson := genMockHttpJson(validJSON, 500) var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 4 error lines for (2 urls) * (2 services) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4) - assert.Equal(t, 0, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test response to HTTP 405 func TestHttpJsonBadMethod(t *testing.T) { httpjson := genMockHttpJson(validJSON, 200) - httpjson.Services[0].Method = "NOT_A_REAL_METHOD" + httpjson[0].Method = "NOT_A_REAL_METHOD" var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 2 error lines for (2 urls) * (1 falied service) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2) - - // (2 measurements) * (2 servers) * (1 successful service) - assert.Equal(t, 4, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON @@ -176,12 +169,10 @@ func TestHttpJsonBadJson(t *testing.T) { httpjson := genMockHttpJson(invalidJSON, 200) var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 4 error lines for (2 urls) * (2 services) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4) - assert.Equal(t, 0, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test response to empty string as response objectgT @@ -189,34 +180,31 @@ func TestHttpJsonEmptyResponse(t *testing.T) { httpjson := genMockHttpJson(empty, 200) var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 4 error lines for (2 urls) * (2 services) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4) - assert.Equal(t, 0, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test that the proper values are ignored or collected func TestHttpJson200Tags(t *testing.T) { httpjson := genMockHttpJson(validJSONTags, 200) - var acc testutil.Accumulator - err := httpjson.Gather(&acc) - require.NoError(t, err) - - assert.Equal(t, 4, len(acc.Points)) - - for _, service := range httpjson.Services { + for _, service := range httpjson { if service.Name == "other_webapp" { + var acc testutil.Accumulator + err := service.Gather(&acc) + // Set responsetime + for _, p := range acc.Points { + p.Fields["response_time"] = 1.0 + } + require.NoError(t, err) + assert.Equal(t, 4, acc.NFields()) for _, srv := range service.Servers { - require.NoError(t, - acc.ValidateTaggedValue( - fmt.Sprintf("%s_value", service.Name), - 15.0, - map[string]string{"server": srv, "role": "master", "build": "123"}, - ), - ) + tags := map[string]string{"server": srv, "role": "master", "build": "123"} + fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)} + mname := "httpjson_" + service.Name + acc.AssertContainsTaggedFields(t, mname, fields, tags) } } } diff --git a/plugins/influxdb/README.md b/plugins/inputs/influxdb/README.md similarity index 98% rename from plugins/influxdb/README.md rename to plugins/inputs/influxdb/README.md index 8d4727973..84dc3caf8 100644 --- a/plugins/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -5,7 +5,7 @@ The influxdb plugin collects InfluxDB-formatted data from JSON endpoints. With a configuration of: ```toml -[[plugins.influxdb]] +[[inputs.influxdb]] urls = [ "http://127.0.0.1:8086/debug/vars", "http://192.168.2.1:8086/debug/vars" diff --git a/plugins/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go similarity index 94% rename from plugins/influxdb/influxdb.go rename to plugins/inputs/influxdb/influxdb.go index a4044364a..e65c8afd2 100644 --- a/plugins/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -8,7 +8,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type InfluxDB struct { @@ -32,7 +32,7 @@ func (*InfluxDB) SampleConfig() string { ` } -func (i *InfluxDB) Gather(acc plugins.Accumulator) error { +func (i *InfluxDB) Gather(acc inputs.Accumulator) error { errorChannel := make(chan error, len(i.URLs)) var wg sync.WaitGroup @@ -77,7 +77,7 @@ type point struct { // Returns: // error: Any error that may have occurred func (i *InfluxDB) gatherURL( - acc plugins.Accumulator, + acc inputs.Accumulator, url string, ) error { resp, err := http.Get(url) @@ -140,7 +140,7 @@ func (i *InfluxDB) gatherURL( } func init() { - plugins.Add("influxdb", func() plugins.Plugin { + inputs.Add("influxdb", func() inputs.Input { return &InfluxDB{} }) } diff --git a/plugins/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go similarity index 65% rename from plugins/influxdb/influxdb_test.go rename to plugins/inputs/influxdb/influxdb_test.go index a6c9af56a..e7b43e7bc 100644 --- a/plugins/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -5,8 +5,8 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/plugins/influxdb" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/plugins/inputs/influxdb" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -72,29 +72,26 @@ func TestBasic(t *testing.T) { require.NoError(t, plugin.Gather(&acc)) require.Len(t, acc.Points, 2) - require.NoError(t, acc.ValidateTaggedFieldsValue( - "foo", - map[string]interface{}{ - // JSON will truncate floats to integer representations. - // Since there's no distinction in JSON, we can't assume it's an int. - "i": -1.0, - "f": 0.5, - "b": true, - "s": "string", - }, - map[string]string{ - "id": "ex1", - "url": fakeServer.URL + "/endpoint", - }, - )) - require.NoError(t, acc.ValidateTaggedFieldsValue( - "bar", - map[string]interface{}{ - "x": "x", - }, - map[string]string{ - "id": "ex2", - "url": fakeServer.URL + "/endpoint", - }, - )) + fields := map[string]interface{}{ + // JSON will truncate floats to integer representations. + // Since there's no distinction in JSON, we can't assume it's an int. + "i": -1.0, + "f": 0.5, + "b": true, + "s": "string", + } + tags := map[string]string{ + "id": "ex1", + "url": fakeServer.URL + "/endpoint", + } + acc.AssertContainsTaggedFields(t, "foo", fields, tags) + + fields = map[string]interface{}{ + "x": "x", + } + tags = map[string]string{ + "id": "ex2", + "url": fakeServer.URL + "/endpoint", + } + acc.AssertContainsTaggedFields(t, "bar", fields, tags) } diff --git a/plugins/jolokia/README.md b/plugins/inputs/jolokia/README.md similarity index 100% rename from plugins/jolokia/README.md rename to plugins/inputs/jolokia/README.md diff --git a/plugins/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go similarity index 62% rename from plugins/jolokia/jolokia.go rename to plugins/inputs/jolokia/jolokia.go index ee579433a..7579ecb4a 100644 --- a/plugins/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -7,9 +7,8 @@ import ( "io/ioutil" "net/http" "net/url" - "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Server struct { @@ -23,8 +22,6 @@ type Server struct { type Metric struct { Name string Jmx string - Pass []string - Drop []string } type JolokiaClient interface { @@ -44,7 +41,6 @@ type Jolokia struct { Context string Servers []Server Metrics []Metric - Tags map[string]string } func (j *Jolokia) SampleConfig() string { @@ -52,12 +48,8 @@ func (j *Jolokia) SampleConfig() string { # This is the context root used to compose the jolokia url context = "/jolokia/read" - # Tags added to each measurements - [jolokia.tags] - group = "as" - # List of servers exposing jolokia read service - [[plugins.jolokia.servers]] + [[inputs.jolokia.servers]] name = "stable" host = "192.168.103.2" port = "8180" @@ -67,26 +59,9 @@ func (j *Jolokia) SampleConfig() string { # List of metrics collected on above servers # Each metric consists in a name, a jmx path and either a pass or drop slice attributes # This collect all heap memory usage metrics - [[plugins.jolokia.metrics]] + [[inputs.jolokia.metrics]] name = "heap_memory_usage" jmx = "/java.lang:type=Memory/HeapMemoryUsage" - - - # This drops the 'committed' value from Eden space measurement - [[plugins.jolokia.metrics]] - name = "memory_eden" - jmx = "/java.lang:type=MemoryPool,name=PS Eden Space/Usage" - drop = [ "committed" ] - - - # This passes only DaemonThreadCount and ThreadCount - [[plugins.jolokia.metrics]] - name = "heap_threads" - jmx = "/java.lang:type=Threading" - pass = [ - "DaemonThreadCount", - "ThreadCount" - ] ` } @@ -102,10 +77,6 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { } resp, err := j.jClient.MakeRequest(req) - if err != nil { - return nil, err - } - if err != nil { return nil, err } @@ -137,65 +108,22 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { return jsonOut, nil } -func (m *Metric) shouldPass(field string) bool { - - if m.Pass != nil { - - for _, pass := range m.Pass { - if strings.HasPrefix(field, pass) { - return true - } - } - - return false - } - - if m.Drop != nil { - - for _, drop := range m.Drop { - if strings.HasPrefix(field, drop) { - return false - } - } - - return true - } - - return true -} - -func (m *Metric) filterFields(fields map[string]interface{}) map[string]interface{} { - - for field, _ := range fields { - if !m.shouldPass(field) { - delete(fields, field) - } - } - - return fields -} - -func (j *Jolokia) Gather(acc plugins.Accumulator) error { - +func (j *Jolokia) Gather(acc inputs.Accumulator) error { context := j.Context //"/jolokia/read" servers := j.Servers metrics := j.Metrics - tags := j.Tags - - if tags == nil { - tags = map[string]string{} - } + tags := make(map[string]string) for _, server := range servers { + tags["server"] = server.Name + tags["port"] = server.Port + tags["host"] = server.Host + fields := make(map[string]interface{}) for _, metric := range metrics { measurement := metric.Name jmxPath := metric.Jmx - tags["server"] = server.Name - tags["port"] = server.Port - tags["host"] = server.Host - // Prepare URL requestUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context + jmxPath) @@ -209,23 +137,27 @@ func (j *Jolokia) Gather(acc plugins.Accumulator) error { out, _ := j.getAttr(requestUrl) if values, ok := out["value"]; ok { - switch values.(type) { + switch t := values.(type) { case map[string]interface{}: - acc.AddFields(measurement, metric.filterFields(values.(map[string]interface{})), tags) + for k, v := range t { + fields[measurement+"_"+k] = v + } case interface{}: - acc.Add(measurement, values.(interface{}), tags) + fields[measurement] = t } } else { - fmt.Printf("Missing key 'value' in '%s' output response\n", requestUrl.String()) + fmt.Printf("Missing key 'value' in '%s' output response\n", + requestUrl.String()) } } + acc.AddFields("jolokia", fields, tags) } return nil } func init() { - plugins.Add("jolokia", func() plugins.Plugin { + inputs.Add("jolokia", func() inputs.Input { return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}} }) } diff --git a/plugins/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go similarity index 58% rename from plugins/jolokia/jolokia_test.go rename to plugins/inputs/jolokia/jolokia_test.go index 95df76e7b..63b47ebff 100644 --- a/plugins/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/require" ) @@ -48,7 +48,7 @@ const empty = "" var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}} var HeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"} -var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage", Pass: []string{"used"}} +var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"} type jolokiaClientStub struct { responseBody string @@ -79,7 +79,6 @@ func genJolokiaClientStub(response string, statusCode int, servers []Server, met // Test that the proper values are ignored or collected func TestHttpJsonMultiValue(t *testing.T) { - jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{HeapMetric}) var acc testutil.Accumulator @@ -88,58 +87,28 @@ func TestHttpJsonMultiValue(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 1, len(acc.Points)) - assert.True(t, acc.CheckFieldsValue("heap_memory_usage", map[string]interface{}{"init": 67108864.0, - "committed": 456130560.0, - "max": 477626368.0, - "used": 203288528.0})) -} - -// Test that the proper values are ignored or collected -func TestHttpJsonMultiValueWithPass(t *testing.T) { - - jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{UsedHeapMetric}) - - var acc testutil.Accumulator - err := jolokia.Gather(&acc) - - assert.Nil(t, err) - assert.Equal(t, 1, len(acc.Points)) - - assert.True(t, acc.CheckFieldsValue("heap_memory_usage", map[string]interface{}{"used": 203288528.0})) -} - -// Test that the proper values are ignored or collected -func TestHttpJsonMultiValueTags(t *testing.T) { - - jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{UsedHeapMetric}) - - var acc testutil.Accumulator - err := jolokia.Gather(&acc) - - assert.Nil(t, err) - assert.Equal(t, 1, len(acc.Points)) - assert.NoError(t, acc.ValidateTaggedFieldsValue("heap_memory_usage", map[string]interface{}{"used": 203288528.0}, map[string]string{"host": "127.0.0.1", "port": "8080", "server": "as1"})) -} - -// Test that the proper values are ignored or collected -func TestHttpJsonSingleValueTags(t *testing.T) { - - jolokia := genJolokiaClientStub(validSingleValueJSON, 200, Servers, []Metric{UsedHeapMetric}) - - var acc testutil.Accumulator - err := jolokia.Gather(&acc) - - assert.Nil(t, err) - assert.Equal(t, 1, len(acc.Points)) - assert.NoError(t, acc.ValidateTaggedFieldsValue("heap_memory_usage", map[string]interface{}{"value": 209274376.0}, map[string]string{"host": "127.0.0.1", "port": "8080", "server": "as1"})) + fields := map[string]interface{}{ + "heap_memory_usage_init": 67108864.0, + "heap_memory_usage_committed": 456130560.0, + "heap_memory_usage_max": 477626368.0, + "heap_memory_usage_used": 203288528.0, + } + tags := map[string]string{ + "host": "127.0.0.1", + "port": "8080", + "server": "as1", + } + acc.AssertContainsTaggedFields(t, "jolokia", fields, tags) } // Test that the proper values are ignored or collected func TestHttpJsonOn404(t *testing.T) { - jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers, []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers, + []Metric{UsedHeapMetric}) var acc testutil.Accumulator + acc.SetDebug(true) err := jolokia.Gather(&acc) assert.Nil(t, err) diff --git a/plugins/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md similarity index 100% rename from plugins/kafka_consumer/README.md rename to plugins/inputs/kafka_consumer/README.md diff --git a/plugins/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go similarity index 95% rename from plugins/kafka_consumer/kafka_consumer.go rename to plugins/inputs/kafka_consumer/kafka_consumer.go index f47e7e92c..a0f1d3d11 100644 --- a/plugins/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -5,8 +5,8 @@ import ( "strings" "sync" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/Shopify/sarama" "github.com/wvanbergen/kafka/consumergroup" @@ -148,7 +148,7 @@ func (k *Kafka) Stop() { } } -func (k *Kafka) Gather(acc plugins.Accumulator) error { +func (k *Kafka) Gather(acc inputs.Accumulator) error { k.Lock() defer k.Unlock() npoints := len(k.pointChan) @@ -160,7 +160,7 @@ func (k *Kafka) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("kafka_consumer", func() plugins.Plugin { + inputs.Add("kafka_consumer", func() inputs.Input { return &Kafka{} }) } diff --git a/plugins/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go similarity index 98% rename from plugins/kafka_consumer/kafka_consumer_integration_test.go rename to plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go index 9f554d9ab..0611467ff 100644 --- a/plugins/kafka_consumer/kafka_consumer_integration_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go similarity index 92% rename from plugins/kafka_consumer/kafka_consumer_test.go rename to plugins/inputs/kafka_consumer/kafka_consumer_test.go index eb0473361..560e130c0 100644 --- a/plugins/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/telegraf/testutil" "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" @@ -85,7 +85,8 @@ func TestRunParserAndGather(t *testing.T) { k.Gather(&acc) assert.Equal(t, len(acc.Points), 1) - assert.True(t, acc.CheckValue("cpu_load_short", 23422.0)) + acc.AssertContainsFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(23422)}) } func saramaMsg(val string) *sarama.ConsumerMessage { diff --git a/plugins/leofs/leofs.go b/plugins/inputs/leofs/leofs.go similarity index 94% rename from plugins/leofs/leofs.go rename to plugins/inputs/leofs/leofs.go index 32e5ee99a..f4dd314b7 100644 --- a/plugins/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -3,7 +3,7 @@ package leofs import ( "bufio" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "net/url" "os/exec" "strconv" @@ -146,7 +146,7 @@ func (l *LeoFS) Description() string { return "Read metrics from a LeoFS Server via SNMP" } -func (l *LeoFS) Gather(acc plugins.Accumulator) error { +func (l *LeoFS) Gather(acc inputs.Accumulator) error { if len(l.Servers) == 0 { l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) return nil @@ -176,7 +176,7 @@ func (l *LeoFS) Gather(acc plugins.Accumulator) error { return outerr } -func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins.Accumulator) error { +func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc inputs.Accumulator) error { cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid) stdout, err := cmd.StdoutPipe() if err != nil { @@ -197,6 +197,8 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins "node": nodeNameTrimmed, } i := 0 + + fields := make(map[string]interface{}) for scanner.Scan() { key := KeyMapping[serverType][i] val, err := retrieveTokenAfterColon(scanner.Text()) @@ -207,9 +209,10 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins if err != nil { return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err) } - acc.Add(key, fVal, tags) + fields[key] = fVal i++ } + acc.AddFields("leofs", fields, tags) return nil } @@ -222,7 +225,7 @@ func retrieveTokenAfterColon(line string) (string, error) { } func init() { - plugins.Add("leofs", func() plugins.Plugin { + inputs.Add("leofs", func() inputs.Input { return &LeoFS{} }) } diff --git a/plugins/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go similarity index 97% rename from plugins/leofs/leofs_test.go rename to plugins/inputs/leofs/leofs_test.go index 62a9f3fa3..292cd15d0 100644 --- a/plugins/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,7 +1,7 @@ package leofs import ( - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "io/ioutil" @@ -129,7 +129,6 @@ func buildFakeSNMPCmd(src string) { } func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { - // Build the fake snmpwalk for test src := makeFakeSNMPSrc(code) defer os.Remove(src) @@ -145,6 +144,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) } var acc testutil.Accumulator + acc.SetDebug(true) err := l.Gather(&acc) require.NoError(t, err) @@ -152,7 +152,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) floatMetrics := KeyMapping[serverType] for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatValue(metric), metric) + assert.True(t, acc.HasFloatField("leofs", metric), metric) } } diff --git a/plugins/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go similarity index 81% rename from plugins/lustre2/lustre2.go rename to plugins/inputs/lustre2/lustre2.go index 57217ec06..d6266de73 100644 --- a/plugins/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -13,8 +13,8 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) // Lustre proc files can change between versions, so we want to future-proof @@ -22,6 +22,9 @@ import ( type Lustre2 struct { Ost_procfiles []string Mds_procfiles []string + + // allFields maps and OST name to the metric fields associated with that OST + allFields map[string]map[string]interface{} } var sampleConfig = ` @@ -126,7 +129,7 @@ var wanted_mds_fields = []*mapping{ }, } -func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc plugins.Accumulator) error { +func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc inputs.Accumulator) error { files, err := filepath.Glob(fileglob) if err != nil { return err @@ -140,8 +143,11 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, */ path := strings.Split(file, "/") name := path[len(path)-2] - tags := map[string]string{ - "name": name, + var fields map[string]interface{} + fields, ok := l.allFields[name] + if !ok { + fields = make(map[string]interface{}) + l.allFields[name] = fields } lines, err := internal.ReadLines(file) @@ -150,18 +156,17 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, } for _, line := range lines { - fields := strings.Fields(line) - + parts := strings.Fields(line) for _, wanted := range wanted_fields { var data uint64 - if fields[0] == wanted.inProc { + if parts[0] == wanted.inProc { wanted_field := wanted.field // if not set, assume field[1]. Shouldn't be field[0], as // that's a string if wanted_field == 0 { wanted_field = 1 } - data, err = strconv.ParseUint((fields[wanted_field]), 10, 64) + data, err = strconv.ParseUint((parts[wanted_field]), 10, 64) if err != nil { return err } @@ -169,8 +174,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, if wanted.reportAs != "" { report_name = wanted.reportAs } - acc.Add(report_name, data, tags) - + fields[report_name] = data } } } @@ -189,16 +193,19 @@ func (l *Lustre2) Description() string { } // Gather reads stats from all lustre targets -func (l *Lustre2) Gather(acc plugins.Accumulator) error { +func (l *Lustre2) Gather(acc inputs.Accumulator) error { + l.allFields = make(map[string]map[string]interface{}) if len(l.Ost_procfiles) == 0 { // read/write bytes are in obdfilter//stats - err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", wanted_ost_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", + wanted_ost_fields, acc) if err != nil { return err } // cache counters are in osd-ldiskfs//stats - err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", wanted_ost_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", + wanted_ost_fields, acc) if err != nil { return err } @@ -206,7 +213,8 @@ func (l *Lustre2) Gather(acc plugins.Accumulator) error { if len(l.Mds_procfiles) == 0 { // Metadata server stats - err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", wanted_mds_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", + wanted_mds_fields, acc) if err != nil { return err } @@ -225,11 +233,18 @@ func (l *Lustre2) Gather(acc plugins.Accumulator) error { } } + for name, fields := range l.allFields { + tags := map[string]string{ + "name": name, + } + acc.AddFields("lustre2", fields, tags) + } + return nil } func init() { - plugins.Add("lustre2", func() plugins.Plugin { + inputs.Add("lustre2", func() inputs.Input { return &Lustre2{} }) } diff --git a/plugins/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go similarity index 77% rename from plugins/lustre2/lustre2_test.go rename to plugins/inputs/lustre2/lustre2_test.go index 850a4ff32..9e560df2c 100644 --- a/plugins/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -5,8 +5,7 @@ import ( "os" "testing" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -58,11 +57,6 @@ samedir_rename 259625 samples [reqs] crossdir_rename 369571 samples [reqs] ` -type metrics struct { - name string - value uint64 -} - func TestLustre2GeneratesMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" @@ -103,41 +97,33 @@ func TestLustre2GeneratesMetrics(t *testing.T) { "name": ost_name, } - intMetrics := []*metrics{ - { - name: "write_bytes", - value: 15201500833981, - }, - { - name: "read_bytes", - value: 78026117632000, - }, - { - name: "write_calls", - value: 71893382, - }, - { - name: "read_calls", - value: 203238095, - }, - { - name: "cache_hit", - value: 7393729777, - }, - { - name: "cache_access", - value: 19047063027, - }, - { - name: "cache_miss", - value: 11653333250, - }, + fields := map[string]interface{}{ + "cache_access": uint64(19047063027), + "cache_hit": uint64(7393729777), + "cache_miss": uint64(11653333250), + "close": uint64(873243496), + "crossdir_rename": uint64(369571), + "getattr": uint64(1503663097), + "getxattr": uint64(6145349681), + "link": uint64(445), + "mkdir": uint64(705499), + "mknod": uint64(349042), + "open": uint64(1024577037), + "read_bytes": uint64(78026117632000), + "read_calls": uint64(203238095), + "rename": uint64(629196), + "rmdir": uint64(227434), + "samedir_rename": uint64(259625), + "setattr": uint64(1898364), + "setxattr": uint64(83969), + "statfs": uint64(2916320), + "sync": uint64(434081), + "unlink": uint64(3549417), + "write_bytes": uint64(15201500833981), + "write_calls": uint64(71893382), } - for _, metric := range intMetrics { - assert.True(t, acc.HasUIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + acc.AssertContainsTaggedFields(t, "lustre2", fields, tags) err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) diff --git a/plugins/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go similarity index 100% rename from plugins/mailchimp/chimp_api.go rename to plugins/inputs/mailchimp/chimp_api.go diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go new file mode 100644 index 000000000..284ac61e1 --- /dev/null +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -0,0 +1,116 @@ +package mailchimp + +import ( + "fmt" + "time" + + "github.com/influxdata/telegraf/plugins/inputs" +) + +type MailChimp struct { + api *ChimpAPI + + ApiKey string + DaysOld int + CampaignId string +} + +var sampleConfig = ` + # MailChimp API key + # get from https://admin.mailchimp.com/account/api/ + api_key = "" # required + # Reports for campaigns sent more than days_old ago will not be collected. + # 0 means collect all. + days_old = 0 + # Campaign ID to get, if empty gets all campaigns, this option overrides days_old + # campaign_id = "" +` + +func (m *MailChimp) SampleConfig() string { + return sampleConfig +} + +func (m *MailChimp) Description() string { + return "Gathers metrics from the /3.0/reports MailChimp API" +} + +func (m *MailChimp) Gather(acc inputs.Accumulator) error { + if m.api == nil { + m.api = NewChimpAPI(m.ApiKey) + } + m.api.Debug = false + + if m.CampaignId == "" { + since := "" + if m.DaysOld > 0 { + now := time.Now() + d, _ := time.ParseDuration(fmt.Sprintf("%dh", 24*m.DaysOld)) + since = now.Add(-d).Format(time.RFC3339) + } + + reports, err := m.api.GetReports(ReportsParams{ + SinceSendTime: since, + }) + if err != nil { + return err + } + now := time.Now() + + for _, report := range reports.Reports { + gatherReport(acc, report, now) + } + } else { + report, err := m.api.GetReport(m.CampaignId) + if err != nil { + return err + } + now := time.Now() + gatherReport(acc, report, now) + } + + return nil +} + +func gatherReport(acc inputs.Accumulator, report Report, now time.Time) { + tags := make(map[string]string) + tags["id"] = report.ID + tags["campaign_title"] = report.CampaignTitle + fields := map[string]interface{}{ + "emails_sent": report.EmailsSent, + "abuse_reports": report.AbuseReports, + "unsubscribed": report.Unsubscribed, + "hard_bounces": report.Bounces.HardBounces, + "soft_bounces": report.Bounces.SoftBounces, + "syntax_errors": report.Bounces.SyntaxErrors, + "forwards_count": report.Forwards.ForwardsCount, + "forwards_opens": report.Forwards.ForwardsOpens, + "opens_total": report.Opens.OpensTotal, + "unique_opens": report.Opens.UniqueOpens, + "open_rate": report.Opens.OpenRate, + "clicks_total": report.Clicks.ClicksTotal, + "unique_clicks": report.Clicks.UniqueClicks, + "unique_subscriber_clicks": report.Clicks.UniqueSubscriberClicks, + "click_rate": report.Clicks.ClickRate, + "facebook_recipient_likes": report.FacebookLikes.RecipientLikes, + "facebook_unique_likes": report.FacebookLikes.UniqueLikes, + "facebook_likes": report.FacebookLikes.FacebookLikes, + "industry_type": report.IndustryStats.Type, + "industry_open_rate": report.IndustryStats.OpenRate, + "industry_click_rate": report.IndustryStats.ClickRate, + "industry_bounce_rate": report.IndustryStats.BounceRate, + "industry_unopen_rate": report.IndustryStats.UnopenRate, + "industry_unsub_rate": report.IndustryStats.UnsubRate, + "industry_abuse_rate": report.IndustryStats.AbuseRate, + "list_stats_sub_rate": report.ListStats.SubRate, + "list_stats_unsub_rate": report.ListStats.UnsubRate, + "list_stats_open_rate": report.ListStats.OpenRate, + "list_stats_click_rate": report.ListStats.ClickRate, + } + acc.AddFields("mailchimp", fields, tags, now) +} + +func init() { + inputs.Add("mailchimp", func() inputs.Input { + return &MailChimp{} + }) +} diff --git a/plugins/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go similarity index 84% rename from plugins/mailchimp/mailchimp_test.go rename to plugins/inputs/mailchimp/mailchimp_test.go index bd800f656..0c4dab56d 100644 --- a/plugins/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -7,9 +7,8 @@ import ( "net/url" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -42,67 +41,38 @@ func TestMailChimpGatherReports(t *testing.T) { tags["id"] = "42694e9e57" tags["campaign_title"] = "Freddie's Jokes Vol. 1" - testInts := []struct { - measurement string - value int - }{ - {"emails_sent", 200}, - {"abuse_reports", 0}, - {"unsubscribed", 2}, - {"hard_bounces", 0}, - {"soft_bounces", 2}, - {"syntax_errors", 0}, - {"forwards_count", 0}, - {"forwards_opens", 0}, - {"opens_total", 186}, - {"unique_opens", 100}, - {"clicks_total", 42}, - {"unique_clicks", 400}, - {"unique_subscriber_clicks", 42}, - {"facebook_recipient_likes", 5}, - {"facebook_unique_likes", 8}, - {"facebook_likes", 42}, - } - for _, test := range testInts { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } - - testFloats := []struct { - measurement string - value float64 - }{ - {"open_rate", 42}, - {"click_rate", 42}, - {"industry_open_rate", 0.17076777144396}, - {"industry_click_rate", 0.027431311866951}, - {"industry_bounce_rate", 0.0063767751251474}, - {"industry_unopen_rate", 0.82285545343089}, - {"industry_unsub_rate", 0.001436957032815}, - {"industry_abuse_rate", 0.00021111996110887}, - {"list_stats_sub_rate", 10}, - {"list_stats_unsub_rate", 20}, - {"list_stats_open_rate", 42}, - {"list_stats_click_rate", 42}, - } - for _, test := range testFloats { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } - - testStrings := []struct { - measurement string - value string - }{ - {"industry_type", "Social Networks and Online Communities"}, - } - for _, test := range testStrings { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) + fields := map[string]interface{}{ + "emails_sent": int(200), + "abuse_reports": int(0), + "unsubscribed": int(2), + "hard_bounces": int(0), + "soft_bounces": int(2), + "syntax_errors": int(0), + "forwards_count": int(0), + "forwards_opens": int(0), + "opens_total": int(186), + "unique_opens": int(100), + "clicks_total": int(42), + "unique_clicks": int(400), + "unique_subscriber_clicks": int(42), + "facebook_recipient_likes": int(5), + "facebook_unique_likes": int(8), + "facebook_likes": int(42), + "open_rate": float64(42), + "click_rate": float64(42), + "industry_open_rate": float64(0.17076777144396), + "industry_click_rate": float64(0.027431311866951), + "industry_bounce_rate": float64(0.0063767751251474), + "industry_unopen_rate": float64(0.82285545343089), + "industry_unsub_rate": float64(0.001436957032815), + "industry_abuse_rate": float64(0.00021111996110887), + "list_stats_sub_rate": float64(10), + "list_stats_unsub_rate": float64(20), + "list_stats_open_rate": float64(42), + "list_stats_click_rate": float64(42), + "industry_type": "Social Networks and Online Communities", } + acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags) } func TestMailChimpGatherReport(t *testing.T) { @@ -135,67 +105,39 @@ func TestMailChimpGatherReport(t *testing.T) { tags["id"] = "42694e9e57" tags["campaign_title"] = "Freddie's Jokes Vol. 1" - testInts := []struct { - measurement string - value int - }{ - {"emails_sent", 200}, - {"abuse_reports", 0}, - {"unsubscribed", 2}, - {"hard_bounces", 0}, - {"soft_bounces", 2}, - {"syntax_errors", 0}, - {"forwards_count", 0}, - {"forwards_opens", 0}, - {"opens_total", 186}, - {"unique_opens", 100}, - {"clicks_total", 42}, - {"unique_clicks", 400}, - {"unique_subscriber_clicks", 42}, - {"facebook_recipient_likes", 5}, - {"facebook_unique_likes", 8}, - {"facebook_likes", 42}, - } - for _, test := range testInts { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) + fields := map[string]interface{}{ + "emails_sent": int(200), + "abuse_reports": int(0), + "unsubscribed": int(2), + "hard_bounces": int(0), + "soft_bounces": int(2), + "syntax_errors": int(0), + "forwards_count": int(0), + "forwards_opens": int(0), + "opens_total": int(186), + "unique_opens": int(100), + "clicks_total": int(42), + "unique_clicks": int(400), + "unique_subscriber_clicks": int(42), + "facebook_recipient_likes": int(5), + "facebook_unique_likes": int(8), + "facebook_likes": int(42), + "open_rate": float64(42), + "click_rate": float64(42), + "industry_open_rate": float64(0.17076777144396), + "industry_click_rate": float64(0.027431311866951), + "industry_bounce_rate": float64(0.0063767751251474), + "industry_unopen_rate": float64(0.82285545343089), + "industry_unsub_rate": float64(0.001436957032815), + "industry_abuse_rate": float64(0.00021111996110887), + "list_stats_sub_rate": float64(10), + "list_stats_unsub_rate": float64(20), + "list_stats_open_rate": float64(42), + "list_stats_click_rate": float64(42), + "industry_type": "Social Networks and Online Communities", } + acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags) - testFloats := []struct { - measurement string - value float64 - }{ - {"open_rate", 42}, - {"click_rate", 42}, - {"industry_open_rate", 0.17076777144396}, - {"industry_click_rate", 0.027431311866951}, - {"industry_bounce_rate", 0.0063767751251474}, - {"industry_unopen_rate", 0.82285545343089}, - {"industry_unsub_rate", 0.001436957032815}, - {"industry_abuse_rate", 0.00021111996110887}, - {"list_stats_sub_rate", 10}, - {"list_stats_unsub_rate", 20}, - {"list_stats_open_rate", 42}, - {"list_stats_click_rate", 42}, - } - for _, test := range testFloats { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } - - testStrings := []struct { - measurement string - value string - }{ - {"industry_type", "Social Networks and Online Communities"}, - } - for _, test := range testStrings { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } } func TestMailChimpGatherError(t *testing.T) { diff --git a/plugins/memcached/memcached.go b/plugins/inputs/memcached/memcached.go similarity index 91% rename from plugins/memcached/memcached.go rename to plugins/inputs/memcached/memcached.go index 9919b0c24..078f05aa3 100644 --- a/plugins/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -8,7 +8,7 @@ import ( "strconv" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) // Memcached is a memcached plugin @@ -69,7 +69,7 @@ func (m *Memcached) Description() string { } // Gather reads stats from all configured servers accumulates stats -func (m *Memcached) Gather(acc plugins.Accumulator) error { +func (m *Memcached) Gather(acc inputs.Accumulator) error { if len(m.Servers) == 0 && len(m.UnixSockets) == 0 { return m.gatherServer(":11211", false, acc) } @@ -92,7 +92,7 @@ func (m *Memcached) Gather(acc plugins.Accumulator) error { func (m *Memcached) gatherServer( address string, unix bool, - acc plugins.Accumulator, + acc inputs.Accumulator, ) error { var conn net.Conn if unix { @@ -137,16 +137,18 @@ func (m *Memcached) gatherServer( tags := map[string]string{"server": address} // Process values + fields := make(map[string]interface{}) for _, key := range sendMetrics { if value, ok := values[key]; ok { // Mostly it is the number - if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse != nil { - acc.Add(key, value, tags) + if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse == nil { + fields[key] = iValue } else { - acc.Add(key, iValue, tags) + fields[key] = value } } } + acc.AddFields("memcached", fields, tags) return nil } @@ -176,7 +178,7 @@ func parseResponse(r *bufio.Reader) (map[string]string, error) { } func init() { - plugins.Add("memcached", func() plugins.Plugin { + inputs.Add("memcached", func() inputs.Input { return &Memcached{} }) } diff --git a/plugins/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go similarity index 97% rename from plugins/memcached/memcached_test.go rename to plugins/inputs/memcached/memcached_test.go index 05ff669b3..210adffdb 100644 --- a/plugins/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -32,7 +32,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric), metric) + assert.True(t, acc.HasIntField("memcached", metric), metric) } } diff --git a/plugins/mock_Plugin.go b/plugins/inputs/mock_Plugin.go similarity index 92% rename from plugins/mock_Plugin.go rename to plugins/inputs/mock_Plugin.go index 492384b25..87dd14884 100644 --- a/plugins/mock_Plugin.go +++ b/plugins/inputs/mock_Plugin.go @@ -1,4 +1,4 @@ -package plugins +package inputs import "github.com/stretchr/testify/mock" diff --git a/plugins/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go similarity index 90% rename from plugins/mongodb/mongodb.go rename to plugins/inputs/mongodb/mongodb.go index 87882a341..ce73c3a14 100644 --- a/plugins/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/mgo.v2" ) @@ -45,7 +45,7 @@ var localhost = &url.URL{Host: "127.0.0.1:27017"} // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (m *MongoDB) Gather(acc plugins.Accumulator) error { +func (m *MongoDB) Gather(acc inputs.Accumulator) error { if len(m.Servers) == 0 { m.gatherServer(m.getMongoServer(localhost), acc) return nil @@ -88,7 +88,7 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server { return m.mongos[url.Host] } -func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error { +func (m *MongoDB) gatherServer(server *Server, acc inputs.Accumulator) error { if server.Session == nil { var dialAddrs []string if server.Url.User != nil { @@ -98,7 +98,8 @@ func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error { } dialInfo, err := mgo.ParseURL(dialAddrs[0]) if err != nil { - return fmt.Errorf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error()) + return fmt.Errorf("Unable to parse URL (%s), %s\n", + dialAddrs[0], err.Error()) } dialInfo.Direct = true dialInfo.Timeout = time.Duration(10) * time.Second @@ -137,7 +138,7 @@ func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error { } func init() { - plugins.Add("mongodb", func() plugins.Plugin { + inputs.Add("mongodb", func() inputs.Input { return &MongoDB{ mongos: make(map[string]*Server), } diff --git a/plugins/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go similarity index 77% rename from plugins/mongodb/mongodb_data.go rename to plugins/inputs/mongodb/mongodb_data.go index fda1843bb..c0c68c330 100644 --- a/plugins/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -5,11 +5,12 @@ import ( "reflect" "strconv" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type MongodbData struct { StatLine *StatLine + Fields map[string]interface{} Tags map[string]string } @@ -20,6 +21,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { return &MongodbData{ StatLine: statLine, Tags: tags, + Fields: make(map[string]interface{}), } } @@ -63,38 +65,44 @@ var WiredTigerStats = map[string]string{ "percent_cache_used": "CacheUsedPercent", } -func (d *MongodbData) AddDefaultStats(acc plugins.Accumulator) { +func (d *MongodbData) AddDefaultStats() { statLine := reflect.ValueOf(d.StatLine).Elem() - d.addStat(acc, statLine, DefaultStats) + d.addStat(statLine, DefaultStats) if d.StatLine.NodeType != "" { - d.addStat(acc, statLine, DefaultReplStats) + d.addStat(statLine, DefaultReplStats) } if d.StatLine.StorageEngine == "mmapv1" { - d.addStat(acc, statLine, MmapStats) + d.addStat(statLine, MmapStats) } else if d.StatLine.StorageEngine == "wiredTiger" { for key, value := range WiredTigerStats { val := statLine.FieldByName(value).Interface() percentVal := fmt.Sprintf("%.1f", val.(float64)*100) floatVal, _ := strconv.ParseFloat(percentVal, 64) - d.add(acc, key, floatVal) + d.add(key, floatVal) } } } -func (d *MongodbData) addStat(acc plugins.Accumulator, statLine reflect.Value, stats map[string]string) { +func (d *MongodbData) addStat( + statLine reflect.Value, + stats map[string]string, +) { for key, value := range stats { val := statLine.FieldByName(value).Interface() - d.add(acc, key, val) + d.add(key, val) } } -func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) { +func (d *MongodbData) add(key string, val interface{}) { + d.Fields[key] = val +} + +func (d *MongodbData) flush(acc inputs.Accumulator) { acc.AddFields( - key, - map[string]interface{}{ - "value": val, - }, + "mongodb", + d.Fields, d.Tags, d.StatLine.Time, ) + d.Fields = make(map[string]interface{}) } diff --git a/plugins/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go similarity index 54% rename from plugins/mongodb/mongodb_data_test.go rename to plugins/inputs/mongodb/mongodb_data_test.go index 9ee3f9f48..3166ab018 100644 --- a/plugins/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -4,9 +4,8 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var tags = make(map[string]string) @@ -37,10 +36,11 @@ func TestAddNonReplStats(t *testing.T) { ) var acc testutil.Accumulator - d.AddDefaultStats(&acc) + d.AddDefaultStats() + d.flush(&acc) for key, _ := range DefaultStats { - assert.True(t, acc.HasIntValue(key)) + assert.True(t, acc.HasIntField("mongodb", key)) } } @@ -57,10 +57,11 @@ func TestAddReplStats(t *testing.T) { var acc testutil.Accumulator - d.AddDefaultStats(&acc) + d.AddDefaultStats() + d.flush(&acc) for key, _ := range MmapStats { - assert.True(t, acc.HasIntValue(key)) + assert.True(t, acc.HasIntField("mongodb", key)) } } @@ -76,10 +77,11 @@ func TestAddWiredTigerStats(t *testing.T) { var acc testutil.Accumulator - d.AddDefaultStats(&acc) + d.AddDefaultStats() + d.flush(&acc) for key, _ := range WiredTigerStats { - assert.True(t, acc.HasFloatValue(key)) + assert.True(t, acc.HasFloatField("mongodb", key)) } } @@ -95,17 +97,37 @@ func TestStateTag(t *testing.T) { tags, ) - stats := []string{"inserts_per_sec", "queries_per_sec"} - stateTags := make(map[string]string) stateTags["state"] = "PRI" var acc testutil.Accumulator - d.AddDefaultStats(&acc) - - for _, key := range stats { - err := acc.ValidateTaggedValue(key, int64(0), stateTags) - require.NoError(t, err) + d.AddDefaultStats() + d.flush(&acc) + fields := map[string]interface{}{ + "active_reads": int64(0), + "active_writes": int64(0), + "commands_per_sec": int64(0), + "deletes_per_sec": int64(0), + "flushes_per_sec": int64(0), + "getmores_per_sec": int64(0), + "inserts_per_sec": int64(0), + "member_status": "PRI", + "net_in_bytes": int64(0), + "net_out_bytes": int64(0), + "open_connections": int64(0), + "queries_per_sec": int64(0), + "queued_reads": int64(0), + "queued_writes": int64(0), + "repl_commands_per_sec": int64(0), + "repl_deletes_per_sec": int64(0), + "repl_getmores_per_sec": int64(0), + "repl_inserts_per_sec": int64(0), + "repl_queries_per_sec": int64(0), + "repl_updates_per_sec": int64(0), + "resident_megabytes": int64(0), + "updates_per_sec": int64(0), + "vsize_megabytes": int64(0), } + acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } diff --git a/plugins/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go similarity index 86% rename from plugins/mongodb/mongodb_server.go rename to plugins/inputs/mongodb/mongodb_server.go index d9b0edaad..87552f906 100644 --- a/plugins/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -4,7 +4,7 @@ import ( "net/url" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) @@ -21,7 +21,7 @@ func (s *Server) getDefaultTags() map[string]string { return tags } -func (s *Server) gatherData(acc plugins.Accumulator) error { +func (s *Server) gatherData(acc inputs.Accumulator) error { s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) result := &ServerStatus{} @@ -44,7 +44,8 @@ func (s *Server) gatherData(acc plugins.Accumulator) error { NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds), s.getDefaultTags(), ) - data.AddDefaultStats(acc) + data.AddDefaultStats() + data.flush(acc) } return nil } diff --git a/plugins/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go similarity index 95% rename from plugins/mongodb/mongodb_server_test.go rename to plugins/inputs/mongodb/mongodb_server_test.go index ec536bbef..52869724c 100644 --- a/plugins/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go similarity index 100% rename from plugins/mongodb/mongodb_test.go rename to plugins/inputs/mongodb/mongodb_test.go diff --git a/plugins/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go similarity index 100% rename from plugins/mongodb/mongostat.go rename to plugins/inputs/mongodb/mongostat.go diff --git a/plugins/mysql/mysql.go b/plugins/inputs/mysql/mysql.go similarity index 84% rename from plugins/mysql/mysql.go rename to plugins/inputs/mysql/mysql.go index 5193f078f..7434a282a 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -6,7 +6,7 @@ import ( "strings" _ "github.com/go-sql-driver/mysql" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Mysql struct { @@ -35,7 +35,7 @@ func (m *Mysql) Description() string { var localhost = "" -func (m *Mysql) Gather(acc plugins.Accumulator) error { +func (m *Mysql) Gather(acc inputs.Accumulator) error { if len(m.Servers) == 0 { // if we can't get stats in this case, thats fine, don't report // an error. @@ -113,7 +113,7 @@ var mappings = []*mapping{ }, } -func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { +func (m *Mysql) gatherServer(serv string, acc inputs.Accumulator) error { // If user forgot the '/', add it if strings.HasSuffix(serv, ")") { serv = serv + "/" @@ -138,6 +138,8 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { if err != nil { servtag = "localhost" } + tags := map[string]string{"server": servtag} + fields := make(map[string]interface{}) for rows.Next() { var name string var val interface{} @@ -149,12 +151,10 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { var found bool - tags := map[string]string{"server": servtag} - for _, mapped := range mappings { if strings.HasPrefix(name, mapped.onServer) { i, _ := strconv.Atoi(string(val.([]byte))) - acc.Add(mapped.inExport+name[len(mapped.onServer):], i, tags) + fields[mapped.inExport+name[len(mapped.onServer):]] = i found = true } } @@ -170,16 +170,17 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { return err } - acc.Add("queries", i, tags) + fields["queries"] = i case "Slow_queries": i, err := strconv.ParseInt(string(val.([]byte)), 10, 64) if err != nil { return err } - acc.Add("slow_queries", i, tags) + fields["slow_queries"] = i } } + acc.AddFields("mysql", fields, tags) conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") @@ -193,18 +194,20 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { } tags := map[string]string{"server": servtag, "user": user} + fields := make(map[string]interface{}) if err != nil { return err } - acc.Add("connections", connections, tags) + fields["connections"] = connections + acc.AddFields("mysql_users", fields, tags) } return nil } func init() { - plugins.Add("mysql", func() plugins.Plugin { + inputs.Add("mysql", func() inputs.Input { return &Mysql{} }) } diff --git a/plugins/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go similarity index 55% rename from plugins/mysql/mysql_test.go rename to plugins/inputs/mysql/mysql_test.go index d424f284b..855e8ba52 100644 --- a/plugins/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -2,72 +2,13 @@ package mysql import ( "fmt" - "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestMysqlGeneratesMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - m := &Mysql{ - Servers: []string{fmt.Sprintf("root@tcp(%s:3306)/", testutil.GetLocalHost())}, - } - - var acc testutil.Accumulator - - err := m.Gather(&acc) - require.NoError(t, err) - - prefixes := []struct { - prefix string - count int - }{ - {"commands", 139}, - {"handler", 16}, - {"bytes", 2}, - {"innodb", 46}, - {"threads", 4}, - {"aborted", 2}, - {"created", 3}, - {"key", 7}, - {"open", 7}, - {"opened", 3}, - {"qcache", 8}, - {"table", 1}, - } - - intMetrics := []string{ - "queries", - "slow_queries", - "connections", - } - - for _, prefix := range prefixes { - var count int - - for _, p := range acc.Points { - if strings.HasPrefix(p.Measurement, prefix.prefix) { - count++ - } - } - - if prefix.count > count { - t.Errorf("Expected less than %d measurements with prefix %s, got %d", - count, prefix.prefix, prefix.count) - } - } - - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric)) - } -} - func TestMysqlDefaultsToLocal(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") @@ -82,7 +23,7 @@ func TestMysqlDefaultsToLocal(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, len(acc.Points) > 0) + assert.True(t, acc.HasMeasurement("mysql")) } func TestMysqlParseDSN(t *testing.T) { diff --git a/plugins/mysql/parse_dsn.go b/plugins/inputs/mysql/parse_dsn.go similarity index 100% rename from plugins/mysql/parse_dsn.go rename to plugins/inputs/mysql/parse_dsn.go diff --git a/plugins/nginx/nginx.go b/plugins/inputs/nginx/nginx.go similarity index 85% rename from plugins/nginx/nginx.go rename to plugins/inputs/nginx/nginx.go index 2c30ea671..6ea665b7e 100644 --- a/plugins/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Nginx struct { @@ -31,7 +31,7 @@ func (n *Nginx) Description() string { return "Read Nginx's basic status information (ngx_http_stub_status_module)" } -func (n *Nginx) Gather(acc plugins.Accumulator) error { +func (n *Nginx) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup var outerr error @@ -59,7 +59,7 @@ var tr = &http.Transport{ var client = &http.Client{Transport: tr} -func (n *Nginx) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { +func (n *Nginx) gatherUrl(addr *url.URL, acc inputs.Accumulator) error { resp, err := client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) @@ -127,14 +127,16 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { } tags := getTags(addr) - - acc.Add("active", active, tags) - acc.Add("accepts", accepts, tags) - acc.Add("handled", handled, tags) - acc.Add("requests", requests, tags) - acc.Add("reading", reading, tags) - acc.Add("writing", writing, tags) - acc.Add("waiting", waiting, tags) + fields := map[string]interface{}{ + "active": active, + "accepts": accepts, + "handled": handled, + "requests": requests, + "reading": reading, + "writing": writing, + "waiting": waiting, + } + acc.AddFields("nginx", fields, tags) return nil } @@ -157,7 +159,7 @@ func getTags(addr *url.URL) map[string]string { } func init() { - plugins.Add("nginx", func() plugins.Plugin { + inputs.Add("nginx", func() inputs.Input { return &Nginx{} }) } diff --git a/plugins/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go similarity index 81% rename from plugins/nginx/nginx_test.go rename to plugins/inputs/nginx/nginx_test.go index 74dd37d31..895e3e583 100644 --- a/plugins/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -8,7 +8,7 @@ import ( "net/url" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -54,17 +54,14 @@ func TestNginxGeneratesMetrics(t *testing.T) { err := n.Gather(&acc) require.NoError(t, err) - metrics := []struct { - name string - value uint64 - }{ - {"active", 585}, - {"accepts", 85340}, - {"handled", 85340}, - {"requests", 35085}, - {"reading", 4}, - {"writing", 135}, - {"waiting", 446}, + fields := map[string]interface{}{ + "active": uint64(585), + "accepts": uint64(85340), + "handled": uint64(85340), + "requests": uint64(35085), + "reading": uint64(4), + "writing": uint64(135), + "waiting": uint64(446), } addr, err := url.Parse(ts.URL) if err != nil { @@ -84,8 +81,5 @@ func TestNginxGeneratesMetrics(t *testing.T) { } tags := map[string]string{"server": host, "port": port} - - for _, m := range metrics { - assert.NoError(t, acc.ValidateTaggedValue(m.name, m.value, tags)) - } + acc.AssertContainsTaggedFields(t, "nginx", fields, tags) } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go new file mode 100644 index 000000000..9b680a0db --- /dev/null +++ b/plugins/inputs/nsq/nsq.go @@ -0,0 +1,271 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015 Jeff Nickoloff (jeff@allingeek.com) +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package nsq + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Might add Lookupd endpoints for cluster discovery +type NSQ struct { + Endpoints []string +} + +var sampleConfig = ` + # An array of NSQD HTTP API endpoints + endpoints = ["http://localhost:4151"] +` + +const ( + requestPattern = `%s/stats?format=json` +) + +func init() { + inputs.Add("nsq", func() inputs.Input { + return &NSQ{} + }) +} + +func (n *NSQ) SampleConfig() string { + return sampleConfig +} + +func (n *NSQ) Description() string { + return "Read NSQ topic and channel statistics." +} + +func (n *NSQ) Gather(acc inputs.Accumulator) error { + var wg sync.WaitGroup + var outerr error + + for _, e := range n.Endpoints { + wg.Add(1) + go func(e string) { + defer wg.Done() + outerr = n.gatherEndpoint(e, acc) + }(e) + } + + wg.Wait() + + return outerr +} + +var tr = &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), +} + +var client = &http.Client{Transport: tr} + +func (n *NSQ) gatherEndpoint(e string, acc inputs.Accumulator) error { + u, err := buildURL(e) + if err != nil { + return err + } + r, err := client.Get(u.String()) + if err != nil { + return fmt.Errorf("Error while polling %s: %s", u.String(), err) + } + defer r.Body.Close() + + if r.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) + } + + s := &NSQStats{} + err = json.NewDecoder(r.Body).Decode(s) + if err != nil { + return fmt.Errorf(`Error parsing response: %s`, err) + } + + tags := map[string]string{ + `server_host`: u.Host, + `server_version`: s.Data.Version, + } + + fields := make(map[string]interface{}) + if s.Data.Health == `OK` { + fields["server_count"] = int64(1) + } else { + fields["server_count"] = int64(0) + } + fields["topic_count"] = int64(len(s.Data.Topics)) + + acc.AddFields("nsq_server", fields, tags) + for _, t := range s.Data.Topics { + topicStats(t, acc, u.Host, s.Data.Version) + } + + return nil +} + +func buildURL(e string) (*url.URL, error) { + u := fmt.Sprintf(requestPattern, e) + addr, err := url.Parse(u) + if err != nil { + return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) + } + return addr, nil +} + +func topicStats(t TopicStats, acc inputs.Accumulator, host, version string) { + // per topic overall (tag: name, paused, channel count) + tags := map[string]string{ + "server_host": host, + "server_version": version, + "topic": t.Name, + } + + fields := map[string]interface{}{ + "depth": t.Depth, + "backend_depth": t.BackendDepth, + "message_count": t.MessageCount, + "channel_count": int64(len(t.Channels)), + } + acc.AddFields("nsq_topic", fields, tags) + + for _, c := range t.Channels { + channelStats(c, acc, host, version, t.Name) + } +} + +func channelStats(c ChannelStats, acc inputs.Accumulator, host, version, topic string) { + tags := map[string]string{ + "server_host": host, + "server_version": version, + "topic": topic, + "channel": c.Name, + } + + fields := map[string]interface{}{ + "depth": c.Depth, + "backend_depth": c.BackendDepth, + "inflight_count": c.InFlightCount, + "deferred_count": c.DeferredCount, + "message_count": c.MessageCount, + "requeue_count": c.RequeueCount, + "timeout_count": c.TimeoutCount, + "client_count": int64(len(c.Clients)), + } + + acc.AddFields("nsq_channel", fields, tags) + for _, cl := range c.Clients { + clientStats(cl, acc, host, version, topic, c.Name) + } +} + +func clientStats(c ClientStats, acc inputs.Accumulator, host, version, topic, channel string) { + tags := map[string]string{ + "server_host": host, + "server_version": version, + "topic": topic, + "channel": channel, + "client_name": c.Name, + "client_id": c.ID, + "client_hostname": c.Hostname, + "client_version": c.Version, + "client_address": c.RemoteAddress, + "client_user_agent": c.UserAgent, + "client_tls": strconv.FormatBool(c.TLS), + "client_snappy": strconv.FormatBool(c.Snappy), + "client_deflate": strconv.FormatBool(c.Deflate), + } + + fields := map[string]interface{}{ + "ready_count": c.ReadyCount, + "inflight_count": c.InFlightCount, + "message_count": c.MessageCount, + "finish_count": c.FinishCount, + "requeue_count": c.RequeueCount, + } + acc.AddFields("nsq_client", fields, tags) +} + +type NSQStats struct { + Code int64 `json:"status_code"` + Txt string `json:"status_txt"` + Data NSQStatsData `json:"data"` +} + +type NSQStatsData struct { + Version string `json:"version"` + Health string `json:"health"` + StartTime int64 `json:"start_time"` + Topics []TopicStats `json:"topics"` +} + +// e2e_processing_latency is not modeled +type TopicStats struct { + Name string `json:"topic_name"` + Depth int64 `json:"depth"` + BackendDepth int64 `json:"backend_depth"` + MessageCount int64 `json:"message_count"` + Paused bool `json:"paused"` + Channels []ChannelStats `json:"channels"` +} + +// e2e_processing_latency is not modeled +type ChannelStats struct { + Name string `json:"channel_name"` + Depth int64 `json:"depth"` + BackendDepth int64 `json:"backend_depth"` + InFlightCount int64 `json:"in_flight_count"` + DeferredCount int64 `json:"deferred_count"` + MessageCount int64 `json:"message_count"` + RequeueCount int64 `json:"requeue_count"` + TimeoutCount int64 `json:"timeout_count"` + Paused bool `json:"paused"` + Clients []ClientStats `json:"clients"` +} + +type ClientStats struct { + Name string `json:"name"` + ID string `json:"client_id"` + Hostname string `json:"hostname"` + Version string `json:"version"` + RemoteAddress string `json:"remote_address"` + State int64 `json:"state"` + ReadyCount int64 `json:"ready_count"` + InFlightCount int64 `json:"in_flight_count"` + MessageCount int64 `json:"message_count"` + FinishCount int64 `json:"finish_count"` + RequeueCount int64 `json:"requeue_count"` + ConnectTime int64 `json:"connect_ts"` + SampleRate int64 `json:"sample_rate"` + Deflate bool `json:"deflate"` + Snappy bool `json:"snappy"` + UserAgent string `json:"user_agent"` + TLS bool `json:"tls"` + TLSCipherSuite string `json:"tls_cipher_suite"` + TLSVersion string `json:"tls_version"` + TLSNegotiatedProtocol string `json:"tls_negotiated_protocol"` + TLSNegotiatedProtocolIsMutual bool `json:"tls_negotiated_protocol_is_mutual"` +} diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go new file mode 100644 index 000000000..23fd19a42 --- /dev/null +++ b/plugins/inputs/nsq/nsq_test.go @@ -0,0 +1,273 @@ +package nsq + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" +) + +func TestNSQStats(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + n := &NSQ{ + Endpoints: []string{ts.URL}, + } + + var acc testutil.Accumulator + err := n.Gather(&acc) + require.NoError(t, err) + + u, err := url.Parse(ts.URL) + require.NoError(t, err) + host := u.Host + + // actually validate the tests + tests := []struct { + m string + f map[string]interface{} + g map[string]string + }{ + { + "nsq_server", + map[string]interface{}{ + "server_count": int64(1), + "topic_count": int64(2), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + }, + }, + { + "nsq_topic", + map[string]interface{}{ + "depth": int64(12), + "backend_depth": int64(13), + "message_count": int64(14), + "channel_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t1"}, + }, + { + "nsq_channel", + map[string]interface{}{ + "depth": int64(0), + "backend_depth": int64(1), + "inflight_count": int64(2), + "deferred_count": int64(3), + "message_count": int64(4), + "requeue_count": int64(5), + "timeout_count": int64(6), + "client_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t1", + "channel": "c1", + }, + }, + { + "nsq_client", + map[string]interface{}{ + "ready_count": int64(200), + "inflight_count": int64(7), + "message_count": int64(8), + "finish_count": int64(9), + "requeue_count": int64(10), + }, + map[string]string{"server_host": host, "server_version": "0.3.6", + "topic": "t1", "channel": "c1", "client_name": "373a715cd990", + "client_id": "373a715cd990", "client_hostname": "373a715cd990", + "client_version": "V2", "client_address": "172.17.0.11:35560", + "client_tls": "false", "client_snappy": "false", + "client_deflate": "false", + "client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"}, + }, + { + "nsq_topic", + map[string]interface{}{ + "depth": int64(28), + "backend_depth": int64(29), + "message_count": int64(30), + "channel_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t2"}, + }, + { + "nsq_channel", + map[string]interface{}{ + "depth": int64(15), + "backend_depth": int64(16), + "inflight_count": int64(17), + "deferred_count": int64(18), + "message_count": int64(19), + "requeue_count": int64(20), + "timeout_count": int64(21), + "client_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t2", + "channel": "c2", + }, + }, + { + "nsq_client", + map[string]interface{}{ + "ready_count": int64(22), + "inflight_count": int64(23), + "message_count": int64(24), + "finish_count": int64(25), + "requeue_count": int64(26), + }, + map[string]string{"server_host": host, "server_version": "0.3.6", + "topic": "t2", "channel": "c2", "client_name": "377569bd462b", + "client_id": "377569bd462b", "client_hostname": "377569bd462b", + "client_version": "V2", "client_address": "172.17.0.8:48145", + "client_user_agent": "go-nsq/1.0.5", "client_tls": "true", + "client_snappy": "true", "client_deflate": "true"}, + }, + } + + for _, test := range tests { + acc.AssertContainsTaggedFields(t, test.m, test.f, test.g) + } +} + +var response = ` +{ + "status_code": 200, + "status_txt": "OK", + "data": { + "version": "0.3.6", + "health": "OK", + "start_time": 1452021674, + "topics": [ + { + "topic_name": "t1", + "channels": [ + { + "channel_name": "c1", + "depth": 0, + "backend_depth": 1, + "in_flight_count": 2, + "deferred_count": 3, + "message_count": 4, + "requeue_count": 5, + "timeout_count": 6, + "clients": [ + { + "name": "373a715cd990", + "client_id": "373a715cd990", + "hostname": "373a715cd990", + "version": "V2", + "remote_address": "172.17.0.11:35560", + "state": 3, + "ready_count": 200, + "in_flight_count": 7, + "message_count": 8, + "finish_count": 9, + "requeue_count": 10, + "connect_ts": 1452021675, + "sample_rate": 11, + "deflate": false, + "snappy": false, + "user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5", + "tls": false, + "tls_cipher_suite": "", + "tls_version": "", + "tls_negotiated_protocol": "", + "tls_negotiated_protocol_is_mutual": false + } + ], + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + } + ], + "depth": 12, + "backend_depth": 13, + "message_count": 14, + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + }, + { + "topic_name": "t2", + "channels": [ + { + "channel_name": "c2", + "depth": 15, + "backend_depth": 16, + "in_flight_count": 17, + "deferred_count": 18, + "message_count": 19, + "requeue_count": 20, + "timeout_count": 21, + "clients": [ + { + "name": "377569bd462b", + "client_id": "377569bd462b", + "hostname": "377569bd462b", + "version": "V2", + "remote_address": "172.17.0.8:48145", + "state": 3, + "ready_count": 22, + "in_flight_count": 23, + "message_count": 24, + "finish_count": 25, + "requeue_count": 26, + "connect_ts": 1452021678, + "sample_rate": 27, + "deflate": true, + "snappy": true, + "user_agent": "go-nsq\/1.0.5", + "tls": true, + "tls_cipher_suite": "", + "tls_version": "", + "tls_negotiated_protocol": "", + "tls_negotiated_protocol_is_mutual": false + } + ], + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + } + ], + "depth": 28, + "backend_depth": 29, + "message_count": 30, + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + } + ] + } +} +` diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md new file mode 100644 index 000000000..64e39729b --- /dev/null +++ b/plugins/inputs/passenger/README.md @@ -0,0 +1,138 @@ +# Telegraf plugin: passenger + +Get phusion passenger stat using their command line utility +`passenger-status` + +# Measurements + +Meta: + +- tags: + + * name + * passenger_version + * pid + * code_revision + +Measurement names: + +- passenger: + + * Tags: `passenger_version` + * Fields: + + - process_count + - max + - capacity_used + - get_wait_list_size + +- passenger_supergroup: + + * Tags: `name` + * Fields: + + - get_wait_list_size + - capacity_used + +- passenger_group: + + * Tags: + + - name + - app_root + - app_type + + * Fields: + + - get_wait_list_size + - capacity_used + - processes_being_spawned + +- passenger_process: + + * Tags: + + - group_name + - app_root + - supergroup_name + - pid + - code_revision + - life_status + - process_group_id + + * Field: + + - concurrency + - sessions + - busyness + - processed + - spawner_creation_time + - spawn_start_time + - spawn_end_time + - last_used + - uptime + - cpu + - rss + - pss + - private_dirty + - swap + - real_memory + - vmsize + +# Example output + +Using this configuration: + +``` +[[inputs.passenger]] + # Path of passenger-status. + # + # Plugin gather metric via parsing XML output of passenger-status + # More information about the tool: + # https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html + # + # + # If no path is specified, then the plugin simply execute passenger-status + # hopefully it can be found in your PATH + command = "passenger-status -v --show=xml" +``` + +When run with: + +``` +./telegraf -config telegraf.conf -test -input-filter passenger +``` + +It produces: + +``` +> passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257 +> passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977 +> passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021 +> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11553,process_group_id=13608,supergroup_name=/var/app/current/public busyness=0i,concurrency=1i,cpu=58i,last_used=1452747071764940i,private_dirty=314900i,processed=951i,pss=319391i,real_memory=314900i,rss=418548i,sessions=0i,spawn_end_time=1452746845013365i,spawn_start_time=1452746844946982i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563580i 1452984112799571490 +> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11563,process_group_id=13608,supergroup_name=/var/app/current/public busyness=2147483647i,concurrency=1i,cpu=47i,last_used=1452747071709179i,private_dirty=309240i,processed=756i,pss=314036i,real_memory=309240i,rss=418296i,sessions=1i,spawn_end_time=1452746845172460i,spawn_start_time=1452746845136882i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563608i 1452984112799638581 +``` + +# Note + +You have to ensure that you can run the `passenger-status` command under +telegraf user. Depend on how you install and configure passenger, this +maybe an issue for you. If you are using passenger standlone, or compile +yourself, it is straight forward. However, if you are using gem and +`rvm`, it maybe harder to get this right. + +Such as with `rvm`, you can use this command: + +``` +~/.rvm/bin/rvm default do passenger-status -v --show=xml +``` + +You can use `&` and `;` in the shell command to run comlicated shell command +in order to get the passenger-status such as load the rvm shell, source the +path +``` +command = "source .rvm/scripts/rvm && passenger-status -v --show=xml" +``` + +Anyway, just ensure that you can run the command under `telegraf` user, and it +has to produce XML output. diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go new file mode 100644 index 000000000..c5b049b7c --- /dev/null +++ b/plugins/inputs/passenger/passenger.go @@ -0,0 +1,250 @@ +package passenger + +import ( + "bytes" + "encoding/xml" + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/influxdata/telegraf/plugins/inputs" + "golang.org/x/net/html/charset" +) + +type passenger struct { + Command string +} + +func (p *passenger) parseCommand() (string, []string) { + var arguments []string + if !strings.Contains(p.Command, " ") { + return p.Command, arguments + } + + arguments = strings.Split(p.Command, " ") + if len(arguments) == 1 { + return arguments[0], arguments[1:] + } + + return arguments[0], arguments[1:] +} + +type info struct { + Passenger_version string `xml:"passenger_version"` + Process_count int `xml:"process_count"` + Capacity_used int `xml:"capacity_used"` + Get_wait_list_size int `xml:"get_wait_list_size"` + Max int `xml:"max"` + Supergroups struct { + Supergroup []struct { + Name string `xml:"name"` + Get_wait_list_size int `xml:"get_wait_list_size"` + Capacity_used int `xml:"capacity_used"` + Group []struct { + Name string `xml:"name"` + AppRoot string `xml:"app_root"` + AppType string `xml:"app_type"` + Enabled_process_count int `xml:"enabled_process_count"` + Disabling_process_count int `xml:"disabling_process_count"` + Disabled_process_count int `xml:"disabled_process_count"` + Capacity_used int `xml:"capacity_used"` + Get_wait_list_size int `xml:"get_wait_list_size"` + Processes_being_spawned int `xml:"processes_being_spawned"` + Processes struct { + Process []*process `xml:"process"` + } `xml:"processes"` + } `xml:"group"` + } `xml:"supergroup"` + } `xml:"supergroups"` +} + +type process struct { + Pid int `xml:"pid"` + Concurrency int `xml:"concurrency"` + Sessions int `xml:"sessions"` + Busyness int `xml:"busyness"` + Processed int `xml:"processed"` + Spawner_creation_time int64 `xml:"spawner_creation_time"` + Spawn_start_time int64 `xml:"spawn_start_time"` + Spawn_end_time int64 `xml:"spawn_end_time"` + Last_used int64 `xml:"last_used"` + Uptime string `xml:"uptime"` + Code_revision string `xml:"code_revision"` + Life_status string `xml:"life_status"` + Enabled string `xml:"enabled"` + Has_metrics bool `xml:"has_metrics"` + Cpu int64 `xml:"cpu"` + Rss int64 `xml:"rss"` + Pss int64 `xml:"pss"` + Private_dirty int64 `xml:"private_dirty"` + Swap int64 `xml:"swap"` + Real_memory int64 `xml:"real_memory"` + Vmsize int64 `xml:"vmsize"` + Process_group_id string `xml:"process_group_id"` +} + +func (p *process) getUptime() int64 { + if p.Uptime == "" { + return 0 + } + + timeSlice := strings.Split(p.Uptime, " ") + var uptime int64 + uptime = 0 + for _, v := range timeSlice { + switch { + case strings.HasSuffix(v, "d"): + iValue := strings.TrimSuffix(v, "d") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value * (24 * 60 * 60) + } + case strings.HasSuffix(v, "h"): + iValue := strings.TrimSuffix(v, "y") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value * (60 * 60) + } + case strings.HasSuffix(v, "m"): + iValue := strings.TrimSuffix(v, "m") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value * 60 + } + case strings.HasSuffix(v, "s"): + iValue := strings.TrimSuffix(v, "s") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value + } + } + } + + return uptime +} + +var sampleConfig = ` + # Path of passenger-status. + # + # Plugin gather metric via parsing XML output of passenger-status + # More information about the tool: + # https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html + # + # + # If no path is specified, then the plugin simply execute passenger-status + # hopefully it can be found in your PATH + command = "passenger-status -v --show=xml" +` + +func (r *passenger) SampleConfig() string { + return sampleConfig +} + +func (r *passenger) Description() string { + return "Read metrics of passenger using passenger-status" +} + +func (g *passenger) Gather(acc inputs.Accumulator) error { + if g.Command == "" { + g.Command = "passenger-status -v --show=xml" + } + + cmd, args := g.parseCommand() + out, err := exec.Command(cmd, args...).Output() + + if err != nil { + return err + } + + if err = importMetric(out, acc); err != nil { + return err + } + + return nil +} + +func importMetric(stat []byte, acc inputs.Accumulator) error { + var p info + + decoder := xml.NewDecoder(bytes.NewReader(stat)) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&p); err != nil { + return fmt.Errorf("Cannot parse input with error: %v\n", err) + } + + tags := map[string]string{ + "passenger_version": p.Passenger_version, + } + fields := map[string]interface{}{ + "process_count": p.Process_count, + "max": p.Max, + "capacity_used": p.Capacity_used, + "get_wait_list_size": p.Get_wait_list_size, + } + acc.AddFields("passenger", fields, tags) + + for _, sg := range p.Supergroups.Supergroup { + tags := map[string]string{ + "name": sg.Name, + } + fields := map[string]interface{}{ + "get_wait_list_size": sg.Get_wait_list_size, + "capacity_used": sg.Capacity_used, + } + acc.AddFields("passenger_supergroup", fields, tags) + + for _, group := range sg.Group { + tags := map[string]string{ + "name": group.Name, + "app_root": group.AppRoot, + "app_type": group.AppType, + } + fields := map[string]interface{}{ + "get_wait_list_size": group.Get_wait_list_size, + "capacity_used": group.Capacity_used, + "processes_being_spawned": group.Processes_being_spawned, + } + acc.AddFields("passenger_group", fields, tags) + + for _, process := range group.Processes.Process { + tags := map[string]string{ + "group_name": group.Name, + "app_root": group.AppRoot, + "supergroup_name": sg.Name, + "pid": fmt.Sprintf("%d", process.Pid), + "code_revision": process.Code_revision, + "life_status": process.Life_status, + "process_group_id": process.Process_group_id, + } + fields := map[string]interface{}{ + "concurrency": process.Concurrency, + "sessions": process.Sessions, + "busyness": process.Busyness, + "processed": process.Processed, + "spawner_creation_time": process.Spawner_creation_time, + "spawn_start_time": process.Spawn_start_time, + "spawn_end_time": process.Spawn_end_time, + "last_used": process.Last_used, + "uptime": process.getUptime(), + "cpu": process.Cpu, + "rss": process.Rss, + "pss": process.Pss, + "private_dirty": process.Private_dirty, + "swap": process.Swap, + "real_memory": process.Real_memory, + "vmsize": process.Vmsize, + } + acc.AddFields("passenger_process", fields, tags) + } + } + } + + return nil +} + +func init() { + inputs.Add("passenger", func() inputs.Input { + return &passenger{} + }) +} diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go new file mode 100644 index 000000000..6124a968e --- /dev/null +++ b/plugins/inputs/passenger/passenger_test.go @@ -0,0 +1,301 @@ +package passenger + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func fakePassengerStatus(stat string) { + content := fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) + ioutil.WriteFile("/tmp/passenger-status", []byte(content), 0700) +} + +func teardown() { + os.Remove("/tmp/passenger-status") +} + +func Test_Invalid_Passenger_Status_Cli(t *testing.T) { + r := &passenger{ + Command: "an-invalid-command passenger-status", + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, err.Error(), `exec: "an-invalid-command": executable file not found in $PATH`) +} + +func Test_Invalid_Xml(t *testing.T) { + fakePassengerStatus("invalid xml") + defer teardown() + + r := &passenger{ + Command: "/tmp/passenger-status", + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, err.Error(), "Cannot parse input with error: EOF\n") +} + +// We test this by ensure that the error message match the path of default cli +func Test_Default_Config_Load_Default_Command(t *testing.T) { + fakePassengerStatus("invalid xml") + defer teardown() + + r := &passenger{} + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, err.Error(), "exec: \"passenger-status\": executable file not found in $PATH") +} + +func TestPassengerGenerateMetric(t *testing.T) { + fakePassengerStatus(sampleStat) + defer teardown() + + //Now we tested again above server, with our authentication data + r := &passenger{ + Command: "/tmp/passenger-status", + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "passenger_version": "5.0.17", + } + fields := map[string]interface{}{ + "process_count": 23, + "max": 23, + "capacity_used": 23, + "get_wait_list_size": 3, + } + acc.AssertContainsTaggedFields(t, "passenger", fields, tags) + + tags = map[string]string{ + "name": "/var/app/current/public", + "app_root": "/var/app/current", + "app_type": "rack", + } + fields = map[string]interface{}{ + "processes_being_spawned": 2, + "capacity_used": 23, + "get_wait_list_size": 3, + } + acc.AssertContainsTaggedFields(t, "passenger_group", fields, tags) + + tags = map[string]string{ + "name": "/var/app/current/public", + } + + fields = map[string]interface{}{ + "capacity_used": 23, + "get_wait_list_size": 3, + } + acc.AssertContainsTaggedFields(t, "passenger_supergroup", fields, tags) + + tags = map[string]string{ + "app_root": "/var/app/current", + "group_name": "/var/app/current/public", + "supergroup_name": "/var/app/current/public", + "pid": "11553", + "code_revision": "899ac7f", + "life_status": "ALIVE", + "process_group_id": "13608", + } + fields = map[string]interface{}{ + "concurrency": 1, + "sessions": 0, + "busyness": 0, + "processed": 951, + "spawner_creation_time": int64(1452746835922747), + "spawn_start_time": int64(1452746844946982), + "spawn_end_time": int64(1452746845013365), + "last_used": int64(1452747071764940), + "uptime": int64(226), // in seconds of 3m 46s + "cpu": int64(58), + "rss": int64(418548), + "pss": int64(319391), + "private_dirty": int64(314900), + "swap": int64(0), + "real_memory": int64(314900), + "vmsize": int64(1563580), + } + acc.AssertContainsTaggedFields(t, "passenger_process", fields, tags) +} + +var sampleStat = ` + + + + 5.0.17 + 1 + 23 + 23 + 23 + 3 + + + + /var/app/current/public + READY + 3 + 23 + foo + + /var/app/current/public + /var/app/current/public + /var/app/current + rack + production + QQUrbCVYxbJYpfgyDOwJ + 23 + 0 + 0 + 23 + 3 + 0 + 2 + foo + foo + ALIVE + axcoto + 1001 + axcoto + 1001 + + /var/app/current + /var/app/current/public + rack + /var/app/.rvm/gems/ruby-2.2.0-p645/gems/passenger-5.0.17/helper-scripts/rack-loader.rb + config.ru + Passenger RubyApp + 3 + 90000 + production + / + smart + nobody + nogroup + /var/app/.rvm/gems/ruby-2.2.0-p645/wrappers/ruby + python + node + unix:/tmp/passenger.eKFdvdC/agents.s/ust_router + logging + foo + false + false + foo + 22 + 0 + 300 + 1 + + + + 11553 + 378579907 + 17173df-PoNT3J9HCf + 1 + 0 + 0 + 951 + 1452746835922747 + 1452746844946982 + 1452746845013365 + 1452747071764940 + 0s ago + 3m 46s + 899ac7f + ALIVE + ENABLED + true + 58 + 418548 + 319391 + 314900 + 0 + 314900 + 1563580 + 13608 + Passenger RubyApp: /var/app/current/public + + + main +
unix:/tmp/passenger.eKFdvdC/apps.s/ruby.UWF6zkRJ71aoMXPxpknpWVfC1POFqgWZzbEsdz5v0G46cSSMxJ3GHLFhJaUrK2I
+ session + 1 + 0 +
+ + http +
tcp://127.0.0.1:49888
+ http + 1 + 0 +
+
+
+ + 11563 + 1549681201 + 17173df-pX5iJOipd8 + 1 + 1 + 2147483647 + 756 + 1452746835922747 + 1452746845136882 + 1452746845172460 + 1452747071709179 + 0s ago + 3m 46s + 899ac7f + ALIVE + ENABLED + true + 47 + 418296 + 314036 + 309240 + 0 + 309240 + 1563608 + 13608 + Passenger RubyApp: /var/app/current/public + + + main +
unix:/tmp/passenger.eKFdvdC/apps.s/ruby.PVCh7TmvCi9knqhba2vG5qXrlHGEIwhGrxnUvRbIAD6SPz9m0G7YlJ8HEsREHY3
+ session + 1 + 1 +
+ + http +
tcp://127.0.0.1:52783
+ http + 1 + 0 +
+
+
+
+
+
+
+
` diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md new file mode 100644 index 000000000..b853b7fd7 --- /dev/null +++ b/plugins/inputs/phpfpm/README.md @@ -0,0 +1,65 @@ +# Telegraf plugin: phpfpm + +Get phpfpm stat using either HTTP status page or fpm socket. + +# Measurements + +Meta: + +- tags: `pool=poolname` + +Measurement names: + +- phpfpm + +Measurement field: + +- accepted_conn +- listen_queue +- max_listen_queue +- listen_queue_len +- idle_processes +- active_processes +- total_processes +- max_active_processes +- max_children_reached +- slow_requests + +# Example output + +Using this configuration: + +``` +[phpfpm] + # An array of address to gather stats about. Specify an ip on hostname + # with optional port and path. ie localhost, 10.10.3.33/server-status, etc. + # + # We can configure in three modes: + # - unixsocket: the string is the path to fpm socket like + # /var/run/php5-fpm.sock + # - http: the URL has to start with http:// or https:// + # - fcgi: the URL has to start with fcgi:// or cgi://, and socket port must present + # + # If no servers are specified, then default to 127.0.0.1/server-status + urls = ["http://localhost/status", "10.0.0.12:/var/run/php5-fpm-www2.sock", "fcgi://10.0.0.12:9000/status"] +``` + +When run with: + +``` +./telegraf -config telegraf.conf -input-filter phpfpm -test +``` + +It produces: + +``` +* Plugin: phpfpm, Collection 1 +> phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187 +> phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422 +> phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658 +``` + +## Note + +When using `unixsocket`, you have to ensure that telegraf runs on same +host, and socket path is accessible to telegraf user. diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go new file mode 100644 index 000000000..0166f7bea --- /dev/null +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -0,0 +1,245 @@ +package phpfpm + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + PF_POOL = "pool" + PF_PROCESS_MANAGER = "process manager" + PF_ACCEPTED_CONN = "accepted conn" + PF_LISTEN_QUEUE = "listen queue" + PF_MAX_LISTEN_QUEUE = "max listen queue" + PF_LISTEN_QUEUE_LEN = "listen queue len" + PF_IDLE_PROCESSES = "idle processes" + PF_ACTIVE_PROCESSES = "active processes" + PF_TOTAL_PROCESSES = "total processes" + PF_MAX_ACTIVE_PROCESSES = "max active processes" + PF_MAX_CHILDREN_REACHED = "max children reached" + PF_SLOW_REQUESTS = "slow requests" +) + +type metric map[string]int64 +type poolStat map[string]metric + +type phpfpm struct { + Urls []string + + client *http.Client +} + +var sampleConfig = ` + # An array of addresses to gather stats about. Specify an ip or hostname + # with optional port and path + # + # Plugin can be configured in three modes (either can be used): + # - http: the URL must start with http:// or https://, ie: + # "http://localhost/status" + # "http://192.168.130.1/status?full" + # + # - unixsocket: path to fpm socket, ie: + # "/var/run/php5-fpm.sock" + # or using a custom fpm status path: + # "/var/run/php5-fpm.sock:fpm-custom-status-path" + # + # - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: + # "fcgi://10.0.0.12:9000/status" + # "cgi://10.0.10.12:9001/status" + # + # Example of multiple gathering from local socket and remove host + # urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] + # If no servers are specified, then default to http://127.0.0.1/status + urls = ["http://localhost/status"] +` + +func (r *phpfpm) SampleConfig() string { + return sampleConfig +} + +func (r *phpfpm) Description() string { + return "Read metrics of phpfpm, via HTTP status page or socket" +} + +// Reads stats from all configured servers accumulates stats. +// Returns one of the errors encountered while gather stats (if any). +func (g *phpfpm) Gather(acc inputs.Accumulator) error { + if len(g.Urls) == 0 { + return g.gatherServer("http://127.0.0.1/status", acc) + } + + var wg sync.WaitGroup + + var outerr error + + for _, serv := range g.Urls { + wg.Add(1) + go func(serv string) { + defer wg.Done() + outerr = g.gatherServer(serv, acc) + }(serv) + } + + wg.Wait() + + return outerr +} + +// Request status page to get stat raw data and import it +func (g *phpfpm) gatherServer(addr string, acc inputs.Accumulator) error { + if g.client == nil { + client := &http.Client{} + g.client = client + } + + if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { + return g.gatherHttp(addr, acc) + } + + var ( + fcgi *conn + socketPath string + statusPath string + ) + + if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { + u, err := url.Parse(addr) + if err != nil { + return fmt.Errorf("Unable parse server address '%s': %s", addr, err) + } + socketAddr := strings.Split(u.Host, ":") + fcgiIp := socketAddr[0] + fcgiPort, _ := strconv.Atoi(socketAddr[1]) + fcgi, _ = NewClient(fcgiIp, fcgiPort) + } else { + socketAddr := strings.Split(addr, ":") + if len(socketAddr) >= 2 { + socketPath = socketAddr[0] + statusPath = socketAddr[1] + } else { + socketPath = socketAddr[0] + statusPath = "status" + } + + if _, err := os.Stat(socketPath); os.IsNotExist(err) { + return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err) + } + fcgi, _ = NewClient("unix", socketPath) + } + return g.gatherFcgi(fcgi, statusPath, acc) +} + +// Gather stat using fcgi protocol +func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc inputs.Accumulator) error { + fpmOutput, fpmErr, err := fcgi.Request(map[string]string{ + "SCRIPT_NAME": "/" + statusPath, + "SCRIPT_FILENAME": statusPath, + "REQUEST_METHOD": "GET", + "CONTENT_LENGTH": "0", + "SERVER_PROTOCOL": "HTTP/1.0", + "SERVER_SOFTWARE": "go / fcgiclient ", + "REMOTE_ADDR": "127.0.0.1", + }, "/"+statusPath) + + if len(fpmErr) == 0 && err == nil { + importMetric(bytes.NewReader(fpmOutput), acc) + return nil + } else { + return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err) + } +} + +// Gather stat using http protocol +func (g *phpfpm) gatherHttp(addr string, acc inputs.Accumulator) error { + u, err := url.Parse(addr) + if err != nil { + return fmt.Errorf("Unable parse server address '%s': %s", addr, err) + } + + req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme, + u.Host, u.Path), nil) + res, err := g.client.Do(req) + if err != nil { + return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v", + addr, err) + } + + if res.StatusCode != 200 { + return fmt.Errorf("Unable to get valid stat result from '%s': %v", + addr, err) + } + + importMetric(res.Body, acc) + return nil +} + +// Import stat data into Telegraf system +func importMetric(r io.Reader, acc inputs.Accumulator) (poolStat, error) { + stats := make(poolStat) + var currentPool string + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + statLine := scanner.Text() + keyvalue := strings.Split(statLine, ":") + + if len(keyvalue) < 2 { + continue + } + fieldName := strings.Trim(keyvalue[0], " ") + // We start to gather data for a new pool here + if fieldName == PF_POOL { + currentPool = strings.Trim(keyvalue[1], " ") + stats[currentPool] = make(metric) + continue + } + + // Start to parse metric for current pool + switch fieldName { + case PF_ACCEPTED_CONN, + PF_LISTEN_QUEUE, + PF_MAX_LISTEN_QUEUE, + PF_LISTEN_QUEUE_LEN, + PF_IDLE_PROCESSES, + PF_ACTIVE_PROCESSES, + PF_TOTAL_PROCESSES, + PF_MAX_ACTIVE_PROCESSES, + PF_MAX_CHILDREN_REACHED, + PF_SLOW_REQUESTS: + fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64) + if err == nil { + stats[currentPool][fieldName] = fieldValue + } + } + } + + // Finally, we push the pool metric + for pool := range stats { + tags := map[string]string{ + "pool": pool, + } + fields := make(map[string]interface{}) + for k, v := range stats[pool] { + fields[strings.Replace(k, " ", "_", -1)] = v + } + acc.AddFields("phpfpm", fields, tags) + } + + return stats, nil +} + +func init() { + inputs.Add("phpfpm", func() inputs.Input { + return &phpfpm{} + }) +} diff --git a/plugins/phpfpm/phpfpm_fcgi.go b/plugins/inputs/phpfpm/phpfpm_fcgi.go similarity index 52% rename from plugins/phpfpm/phpfpm_fcgi.go rename to plugins/inputs/phpfpm/phpfpm_fcgi.go index 65f4c789b..03aac7634 100644 --- a/plugins/phpfpm/phpfpm_fcgi.go +++ b/plugins/inputs/phpfpm/phpfpm_fcgi.go @@ -1,13 +1,14 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fcgi implements the FastCGI protocol. +// Currently only the responder role is supported. +// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 package phpfpm -// FastCGI client to request via socket - -// Copyright 2012 Junqing Tan and The Go Authors -// Use of this source code is governed by a BSD-style -// Part of source code is from Go fcgi package - -// Fix bug: Can't recive more than 1 record untill FCGI_END_REQUEST 2012-09-15 -// By: wofeiwo +// This file defines the raw protocol and some utilities used by the child and +// the host. import ( "bufio" @@ -15,70 +16,84 @@ import ( "encoding/binary" "errors" "io" + "sync" + "net" "strconv" - "sync" + + "strings" ) -const FCGI_LISTENSOCK_FILENO uint8 = 0 -const FCGI_HEADER_LEN uint8 = 8 -const VERSION_1 uint8 = 1 -const FCGI_NULL_REQUEST_ID uint8 = 0 -const FCGI_KEEP_CONN uint8 = 1 +// recType is a record type, as defined by +// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8 +type recType uint8 const ( - FCGI_BEGIN_REQUEST uint8 = iota + 1 - FCGI_ABORT_REQUEST - FCGI_END_REQUEST - FCGI_PARAMS - FCGI_STDIN - FCGI_STDOUT - FCGI_STDERR - FCGI_DATA - FCGI_GET_VALUES - FCGI_GET_VALUES_RESULT - FCGI_UNKNOWN_TYPE - FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE + typeBeginRequest recType = 1 + typeAbortRequest recType = 2 + typeEndRequest recType = 3 + typeParams recType = 4 + typeStdin recType = 5 + typeStdout recType = 6 + typeStderr recType = 7 + typeData recType = 8 + typeGetValues recType = 9 + typeGetValuesResult recType = 10 + typeUnknownType recType = 11 ) -const ( - FCGI_RESPONDER uint8 = iota + 1 - FCGI_AUTHORIZER - FCGI_FILTER -) +// keep the connection between web-server and responder open after request +const flagKeepConn = 1 const ( - FCGI_REQUEST_COMPLETE uint8 = iota - FCGI_CANT_MPX_CONN - FCGI_OVERLOADED - FCGI_UNKNOWN_ROLE -) - -const ( - FCGI_MAX_CONNS string = "MAX_CONNS" - FCGI_MAX_REQS string = "MAX_REQS" - FCGI_MPXS_CONNS string = "MPXS_CONNS" -) - -const ( - maxWrite = 6553500 // maximum record body + maxWrite = 65535 // maximum record body maxPad = 255 ) +const ( + roleResponder = iota + 1 // only Responders are implemented. + roleAuthorizer + roleFilter +) + +const ( + statusRequestComplete = iota + statusCantMultiplex + statusOverloaded + statusUnknownRole +) + +const headerLen = 8 + type header struct { Version uint8 - Type uint8 + Type recType Id uint16 ContentLength uint16 PaddingLength uint8 Reserved uint8 } +type beginRequest struct { + role uint16 + flags uint8 + reserved [5]uint8 +} + +func (br *beginRequest) read(content []byte) error { + if len(content) != 8 { + return errors.New("fcgi: invalid begin request record") + } + br.role = binary.BigEndian.Uint16(content) + br.flags = content[2] + return nil +} + // for padding so we don't have to allocate all the time // not synchronized because we don't care what the contents are var pad [maxPad]byte -func (h *header) init(recType uint8, reqId uint16, contentLength int) { +func (h *header) init(recType recType, reqId uint16, contentLength int) { h.Version = 1 h.Type = recType h.Id = reqId @@ -86,6 +101,26 @@ func (h *header) init(recType uint8, reqId uint16, contentLength int) { h.PaddingLength = uint8(-contentLength & 7) } +// conn sends records over rwc +type conn struct { + mutex sync.Mutex + rwc io.ReadWriteCloser + + // to avoid allocations + buf bytes.Buffer + h header +} + +func newConn(rwc io.ReadWriteCloser) *conn { + return &conn{rwc: rwc} +} + +func (c *conn) Close() error { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.rwc.Close() +} + type record struct { h header buf [maxWrite + maxPad]byte @@ -109,69 +144,39 @@ func (r *record) content() []byte { return r.buf[:r.h.ContentLength] } -type FCGIClient struct { - mutex sync.Mutex - rwc io.ReadWriteCloser - h header - buf bytes.Buffer - keepAlive bool -} - -func NewClient(h string, args ...interface{}) (fcgi *FCGIClient, err error) { - var conn net.Conn - if len(args) != 1 { - err = errors.New("fcgi: not enough params") - return - } - switch args[0].(type) { - case int: - addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10) - conn, err = net.Dial("tcp", addr) - case string: - laddr := net.UnixAddr{Name: args[0].(string), Net: h} - conn, err = net.DialUnix(h, nil, &laddr) - default: - err = errors.New("fcgi: we only accept int (port) or string (socket) params.") - } - fcgi = &FCGIClient{ - rwc: conn, - keepAlive: false, - } - return -} - -func (client *FCGIClient) writeRecord(recType uint8, reqId uint16, content []byte) (err error) { - client.mutex.Lock() - defer client.mutex.Unlock() - client.buf.Reset() - client.h.init(recType, reqId, len(content)) - if err := binary.Write(&client.buf, binary.BigEndian, client.h); err != nil { +// writeRecord writes and sends a single record. +func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { + c.mutex.Lock() + defer c.mutex.Unlock() + c.buf.Reset() + c.h.init(recType, reqId, len(b)) + if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { return err } - if _, err := client.buf.Write(content); err != nil { + if _, err := c.buf.Write(b); err != nil { return err } - if _, err := client.buf.Write(pad[:client.h.PaddingLength]); err != nil { + if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { return err } - _, err = client.rwc.Write(client.buf.Bytes()) + _, err := c.rwc.Write(c.buf.Bytes()) return err } -func (client *FCGIClient) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { +func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { b := [8]byte{byte(role >> 8), byte(role), flags} - return client.writeRecord(FCGI_BEGIN_REQUEST, reqId, b[:]) + return c.writeRecord(typeBeginRequest, reqId, b[:]) } -func (client *FCGIClient) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { +func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { b := make([]byte, 8) binary.BigEndian.PutUint32(b, uint32(appStatus)) b[4] = protocolStatus - return client.writeRecord(FCGI_END_REQUEST, reqId, b) + return c.writeRecord(typeEndRequest, reqId, b) } -func (client *FCGIClient) writePairs(recType uint8, reqId uint16, pairs map[string]string) error { - w := newWriter(client, recType, reqId) +func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { + w := newWriter(c, recType, reqId) b := make([]byte, 8) for k, v := range pairs { n := encodeSize(b, uint32(len(k))) @@ -238,7 +243,7 @@ func (w *bufWriter) Close() error { return w.closer.Close() } -func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter { +func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { s := &streamWriter{c: c, recType: recType, reqId: reqId} w := bufio.NewWriterSize(s, maxWrite) return &bufWriter{s, w} @@ -247,8 +252,8 @@ func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter { // streamWriter abstracts out the separation of a stream into discrete records. // It only writes maxWrite bytes at a time. type streamWriter struct { - c *FCGIClient - recType uint8 + c *conn + recType recType reqId uint16 } @@ -273,22 +278,44 @@ func (w *streamWriter) Close() error { return w.c.writeRecord(w.recType, w.reqId, nil) } -func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout []byte, reterr []byte, err error) { +func NewClient(h string, args ...interface{}) (fcgi *conn, err error) { + var con net.Conn + if len(args) != 1 { + err = errors.New("fcgi: not enough params") + return + } + switch args[0].(type) { + case int: + addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10) + con, err = net.Dial("tcp", addr) + case string: + laddr := net.UnixAddr{Name: args[0].(string), Net: h} + con, err = net.DialUnix(h, nil, &laddr) + default: + err = errors.New("fcgi: we only accept int (port) or string (socket) params.") + } + fcgi = &conn{ + rwc: con, + } + return +} - var reqId uint16 = 1 +func (client *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) { defer client.rwc.Close() + var reqId uint16 = 1 - err = client.writeBeginRequest(reqId, uint16(FCGI_RESPONDER), 0) + err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) if err != nil { return } - err = client.writePairs(FCGI_PARAMS, reqId, env) + + err = client.writePairs(typeParams, reqId, env) if err != nil { return } - if len(reqStr) > 0 { - err = client.writeRecord(FCGI_STDIN, reqId, []byte(reqStr)) - if err != nil { + + if len(requestData) > 0 { + if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { return } } @@ -297,23 +324,25 @@ func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout var err1 error // recive untill EOF or FCGI_END_REQUEST +READ_LOOP: for { err1 = rec.read(client.rwc) - if err1 != nil { + if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { if err1 != io.EOF { err = err1 } break } + switch { - case rec.h.Type == FCGI_STDOUT: + case rec.h.Type == typeStdout: retout = append(retout, rec.content()...) - case rec.h.Type == FCGI_STDERR: + case rec.h.Type == typeStderr: reterr = append(reterr, rec.content()...) - case rec.h.Type == FCGI_END_REQUEST: + case rec.h.Type == typeEndRequest: fallthrough default: - break + break READ_LOOP } } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go new file mode 100644 index 000000000..c965e5a13 --- /dev/null +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -0,0 +1,241 @@ +package phpfpm + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "net" + "net/http" + "net/http/fcgi" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type statServer struct{} + +// We create a fake server to return test data +func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) + fmt.Fprint(w, outputSample) +} + +func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { + sv := statServer{} + ts := httptest.NewServer(sv) + defer ts.Close() + + r := &phpfpm{ + Urls: []string{ts.URL}, + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "pool": "www", + } + + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) +} + +func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { + // Let OS find an available port + tcp, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal("Cannot initalize test server") + } + defer tcp.Close() + + s := statServer{} + go fcgi.Serve(tcp, s) + + //Now we tested again above server + r := &phpfpm{ + Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, + } + + var acc testutil.Accumulator + err = r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "pool": "www", + } + + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) +} + +func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { + // Create a socket in /tmp because we always have write permission and if the + // removing of socket fail when system restart /tmp is clear so + // we don't have junk files around + var randomNumber int64 + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) + if err != nil { + t.Fatal("Cannot initalize server on port ") + } + + defer tcp.Close() + s := statServer{} + go fcgi.Serve(tcp, s) + + r := &phpfpm{ + Urls: []string{tcp.Addr().String()}, + } + + var acc testutil.Accumulator + + err = r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "pool": "www", + } + + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) +} + +func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { + // Create a socket in /tmp because we always have write permission. If the + // removing of socket fail we won't have junk files around. Cuz when system + // restart, it clears out /tmp + var randomNumber int64 + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) + if err != nil { + t.Fatal("Cannot initalize server on port ") + } + + defer tcp.Close() + s := statServer{} + go fcgi.Serve(tcp, s) + + r := &phpfpm{ + Urls: []string{tcp.Addr().String() + ":custom-status-path"}, + } + + var acc testutil.Accumulator + + err = r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "pool": "www", + } + + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) +} + +//When not passing server config, we default to localhost +//We just want to make sure we did request stat from localhost +func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { + r := &phpfpm{} + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Contains(t, err.Error(), "127.0.0.1/status") +} + +func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { + r := &phpfpm{ + Urls: []string{"http://aninvalidone"}, + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`) +} + +func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { + r := &phpfpm{ + Urls: []string{"/tmp/invalid.sock"}, + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error()) + +} + +const outputSample = ` +pool: www +process manager: dynamic +start time: 11/Oct/2015:23:38:51 +0000 +start since: 1991 +accepted conn: 3 +listen queue: 1 +max listen queue: 0 +listen queue len: 0 +idle processes: 1 +active processes: 1 +total processes: 2 +max active processes: 1 +max children reached: 2 +slow requests: 1 +` diff --git a/plugins/ping/ping.go b/plugins/inputs/ping/ping.go similarity index 91% rename from plugins/ping/ping.go rename to plugins/inputs/ping/ping.go index e894fe297..aa1d5bf36 100644 --- a/plugins/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) // HostPinger is a function that runs the "ping" function using a list of @@ -56,7 +56,7 @@ func (_ *Ping) SampleConfig() string { return sampleConfig } -func (p *Ping) Gather(acc plugins.Accumulator) error { +func (p *Ping) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup errorChannel := make(chan error, len(p.Urls)*2) @@ -64,7 +64,7 @@ func (p *Ping) Gather(acc plugins.Accumulator) error { // Spin off a go routine for each url to ping for _, url := range p.Urls { wg.Add(1) - go func(url string, acc plugins.Accumulator) { + go func(url string, acc inputs.Accumulator) { defer wg.Done() args := p.args(url) out, err := p.pingHost(args...) @@ -82,10 +82,15 @@ func (p *Ping) Gather(acc plugins.Accumulator) error { } // Calculate packet loss percentage loss := float64(trans-rec) / float64(trans) * 100.0 - acc.Add("packets_transmitted", trans, tags) - acc.Add("packets_received", rec, tags) - acc.Add("percent_packet_loss", loss, tags) - acc.Add("average_response_ms", avg, tags) + fields := map[string]interface{}{ + "packets_transmitted": trans, + "packets_received": rec, + "percent_packet_loss": loss, + } + if avg > 0 { + fields["average_response_ms"] = avg + } + acc.AddFields("ping", fields, tags) }(url, acc) } @@ -171,7 +176,7 @@ func processPingOutput(out string) (int, int, float64, error) { } func init() { - plugins.Add("ping", func() plugins.Plugin { + inputs.Add("ping", func() inputs.Input { return &Ping{pingHost: hostPinger} }) } diff --git a/plugins/ping/ping_test.go b/plugins/inputs/ping/ping_test.go similarity index 83% rename from plugins/ping/ping_test.go rename to plugins/inputs/ping/ping_test.go index 5fed0b6c8..be603a49c 100644 --- a/plugins/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -6,7 +6,7 @@ import ( "sort" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) @@ -120,18 +120,16 @@ func TestPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.google.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 0.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", - 43.628, tags)) + fields := map[string]interface{}{ + "packets_transmitted": 5, + "packets_received": 5, + "percent_packet_loss": 0.0, + "average_response_ms": 43.628, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) tags = map[string]string{"url": "www.reddit.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 0.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", - 43.628, tags)) + acc.AssertContainsTaggedFields(t, "ping", fields, tags) } var lossyPingOutput = ` @@ -159,10 +157,13 @@ func TestLossyPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.google.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 3, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 40.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", 44.033, tags)) + fields := map[string]interface{}{ + "packets_transmitted": 5, + "packets_received": 3, + "percent_packet_loss": 40.0, + "average_response_ms": 44.033, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) } var errorPingOutput = ` @@ -188,10 +189,12 @@ func TestBadPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.amazon.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 2, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 100.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", 0.0, tags)) + fields := map[string]interface{}{ + "packets_transmitted": 2, + "packets_received": 0, + "percent_packet_loss": 100.0, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) } func mockFatalHostPinger(args ...string) (string, error) { diff --git a/plugins/postgresql/README.md b/plugins/inputs/postgresql/README.md similarity index 100% rename from plugins/postgresql/README.md rename to plugins/inputs/postgresql/README.md diff --git a/plugins/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go similarity index 61% rename from plugins/postgresql/postgresql.go rename to plugins/inputs/postgresql/postgresql.go index a31a9b4d2..3398f5ac0 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -6,51 +6,37 @@ import ( "fmt" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" _ "github.com/lib/pq" ) -type Server struct { +type Postgresql struct { Address string Databases []string OrderedColumns []string } -type Postgresql struct { - Servers []*Server -} - var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var sampleConfig = ` - # specify servers via an array of tables - [[plugins.postgresql.servers]] - # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: # host=localhost user=pqotest password=... sslmode=... dbname=app_production # - # All connection parameters are optional. By default, the host is localhost - # and the user is the currently running user. For localhost, we default - # to sslmode=disable as well. + # All connection parameters are optional. # # Without the dbname parameter, the driver will default to a database # with the same name as the user. This dbname is just for instantiating a # connection with the server and doesn't restrict the databases we are trying # to grab metrics for. # - - address = "sslmode=disable" + address = "host=localhost user=postgres sslmode=disable" # A list of databases to pull metrics about. If not specified, metrics for all # databases are gathered. - - # databases = ["app_production", "blah_testing"] - - # [[plugins.postgresql.servers]] - # address = "influx@remoteserver" + # databases = ["app_production", "testing"] ` func (p *Postgresql) SampleConfig() string { @@ -65,42 +51,27 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { return ignoredColumns } -var localhost = &Server{Address: "sslmode=disable"} +var localhost = "host=localhost sslmode=disable" -func (p *Postgresql) Gather(acc plugins.Accumulator) error { - if len(p.Servers) == 0 { - p.gatherServer(localhost, acc) - return nil - } - - for _, serv := range p.Servers { - err := p.gatherServer(serv, acc) - if err != nil { - return err - } - } - - return nil -} - -func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error { +func (p *Postgresql) Gather(acc inputs.Accumulator) error { var query string - if serv.Address == "" || serv.Address == "localhost" { - serv = localhost + if p.Address == "" || p.Address == "localhost" { + p.Address = localhost } - db, err := sql.Open("postgres", serv.Address) + db, err := sql.Open("postgres", p.Address) if err != nil { return err } defer db.Close() - if len(serv.Databases) == 0 { + if len(p.Databases) == 0 { query = `SELECT * FROM pg_stat_database` } else { - query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, strings.Join(serv.Databases, "','")) + query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, + strings.Join(p.Databases, "','")) } rows, err := db.Query(query) @@ -111,13 +82,13 @@ func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error { defer rows.Close() // grab the column information from the result - serv.OrderedColumns, err = rows.Columns() + p.OrderedColumns, err = rows.Columns() if err != nil { return err } for rows.Next() { - err = p.accRow(rows, acc, serv) + err = p.accRow(rows, acc) if err != nil { return err } @@ -130,20 +101,20 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) error { +func (p *Postgresql) accRow(row scanner, acc inputs.Accumulator) error { var columnVars []interface{} var dbname bytes.Buffer // this is where we'll store the column name with its *interface{} columnMap := make(map[string]*interface{}) - for _, column := range serv.OrderedColumns { + for _, column := range p.OrderedColumns { columnMap[column] = new(interface{}) } // populate the array of interface{} with the pointers in the right order for i := 0; i < len(columnMap); i++ { - columnVars = append(columnVars, columnMap[serv.OrderedColumns[i]]) + columnVars = append(columnVars, columnMap[p.OrderedColumns[i]]) } // deconstruct array of variables and send to Scan @@ -159,20 +130,22 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) dbname.WriteString(string(dbnameChars[i])) } - tags := map[string]string{"server": serv.Address, "db": dbname.String()} + tags := map[string]string{"server": p.Address, "db": dbname.String()} + fields := make(map[string]interface{}) for col, val := range columnMap { _, ignore := ignoredColumns[col] if !ignore { - acc.Add(col, *val, tags) + fields[col] = *val } } + acc.AddFields("postgresql", fields, tags) return nil } func init() { - plugins.Add("postgresql", func() plugins.Plugin { + inputs.Add("postgresql", func() inputs.Input { return &Postgresql{} }) } diff --git a/plugins/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go similarity index 71% rename from plugins/postgresql/postgresql_test.go rename to plugins/inputs/postgresql/postgresql_test.go index 4a53a2e8f..8baae39a6 100644 --- a/plugins/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,13 +15,9 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - Databases: []string{"postgres"}, - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + Databases: []string{"postgres"}, } var acc testutil.Accumulator @@ -30,7 +26,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { require.NoError(t, err) availableColumns := make(map[string]bool) - for _, col := range p.Servers[0].OrderedColumns { + for _, col := range p.OrderedColumns { availableColumns[col] = true } @@ -61,7 +57,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { for _, metric := range intMetrics { _, ok := availableColumns[metric] if ok { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("postgresql", metric)) metricsCounted++ } } @@ -69,7 +65,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { for _, metric := range floatMetrics { _, ok := availableColumns[metric] if ok { - assert.True(t, acc.HasFloatValue(metric)) + assert.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } } @@ -84,13 +80,9 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - Databases: []string{"postgres"}, - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + Databases: []string{"postgres"}, } var acc testutil.Accumulator @@ -98,7 +90,7 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { err := p.Gather(&acc) require.NoError(t, err) - point, ok := acc.Get("xact_commit") + point, ok := acc.Get("postgresql") require.True(t, ok) assert.Equal(t, "postgres", point.Tags["db"]) @@ -110,12 +102,8 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), } var acc testutil.Accumulator @@ -126,7 +114,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { var found bool for _, pnt := range acc.Points { - if pnt.Measurement == "xact_commit" { + if pnt.Measurement == "postgresql" { if pnt.Tags["db"] == "postgres" { found = true break @@ -143,12 +131,8 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), } var acc testutil.Accumulator diff --git a/plugins/procstat/README.md b/plugins/inputs/procstat/README.md similarity index 80% rename from plugins/procstat/README.md rename to plugins/inputs/procstat/README.md index d2322ab1f..0c37af509 100644 --- a/plugins/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -16,25 +16,19 @@ individual process specific measurements. Example: ``` - [procstat] +[[inputs.procstat]] + exe = "influxd" + prefix = "influxd" - [[procstat.specifications]] - exe = "influxd" - prefix = "influxd" - - [[procstat.specifications]] - pid_file = "/var/run/lxc/dnsmasq.pid" +[[inputs.procstat]] + pid_file = "/var/run/lxc/dnsmasq.pid" ``` The above configuration would result in output like: ``` -[...] -> [name="dnsmasq" pid="44979"] procstat_cpu_user value=0.14 -> [name="dnsmasq" pid="44979"] procstat_cpu_system value=0.07 -[...] -> [name="influxd" pid="34337"] procstat_influxd_cpu_user value=25.43 -> [name="influxd" pid="34337"] procstat_influxd_cpu_system value=21.82 +> procstat,name="dnsmasq",pid="44979" cpu_user=0.14,cpu_system=0.07 +> procstat,name="influxd",pid="34337" influxd_cpu_user=25.43,influxd_cpu_system=21.82 ``` # Measurements diff --git a/plugins/procstat/procstat.go b/plugins/inputs/procstat/procstat.go similarity index 63% rename from plugins/procstat/procstat.go rename to plugins/inputs/procstat/procstat.go index 1370a0003..aa56bd501 100644 --- a/plugins/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -7,31 +7,28 @@ import ( "os/exec" "strconv" "strings" - "sync" "github.com/shirou/gopsutil/process" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) -type Specification struct { +type Procstat struct { PidFile string `toml:"pid_file"` Exe string - Prefix string Pattern string -} + Prefix string -type Procstat struct { - Specifications []*Specification + pidmap map[int32]*process.Process } func NewProcstat() *Procstat { - return &Procstat{} + return &Procstat{ + pidmap: make(map[int32]*process.Process), + } } var sampleConfig = ` - [[plugins.procstat.specifications]] - prefix = "" # optional string to prefix measurements # Must specify one of: pid_file, exe, or pattern # PID file to monitor process pid_file = "/var/run/nginx.pid" @@ -39,6 +36,9 @@ var sampleConfig = ` # exe = "nginx" # pattern as argument for pgrep (ie, pgrep -f ) # pattern = "nginx" + + # Field name prefix + prefix = "" ` func (_ *Procstat) SampleConfig() string { @@ -49,46 +49,39 @@ func (_ *Procstat) Description() string { return "Monitor process cpu and memory usage" } -func (p *Procstat) Gather(acc plugins.Accumulator) error { - var wg sync.WaitGroup - - for _, specification := range p.Specifications { - wg.Add(1) - go func(spec *Specification, acc plugins.Accumulator) { - defer wg.Done() - procs, err := spec.createProcesses() - if err != nil { - log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s", - spec.Exe, spec.PidFile, spec.Pattern, err.Error()) - } else { - for _, proc := range procs { - p := NewSpecProcessor(spec.Prefix, acc, proc) - p.pushMetrics() - } - } - }(specification, acc) +func (p *Procstat) Gather(acc inputs.Accumulator) error { + err := p.createProcesses() + if err != nil { + log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s", + p.Exe, p.PidFile, p.Pattern, err.Error()) + } else { + for _, proc := range p.pidmap { + p := NewSpecProcessor(p.Prefix, acc, proc) + p.pushMetrics() + } } - wg.Wait() return nil } -func (spec *Specification) createProcesses() ([]*process.Process, error) { - var out []*process.Process +func (p *Procstat) createProcesses() error { var errstring string var outerr error - pids, err := spec.getAllPids() + pids, err := p.getAllPids() if err != nil { errstring += err.Error() + " " } for _, pid := range pids { - p, err := process.NewProcess(int32(pid)) - if err == nil { - out = append(out, p) - } else { - errstring += err.Error() + " " + _, ok := p.pidmap[pid] + if !ok { + proc, err := process.NewProcess(pid) + if err == nil { + p.pidmap[pid] = proc + } else { + errstring += err.Error() + " " + } } } @@ -96,19 +89,19 @@ func (spec *Specification) createProcesses() ([]*process.Process, error) { outerr = fmt.Errorf("%s", errstring) } - return out, outerr + return outerr } -func (spec *Specification) getAllPids() ([]int32, error) { +func (p *Procstat) getAllPids() ([]int32, error) { var pids []int32 var err error - if spec.PidFile != "" { - pids, err = pidsFromFile(spec.PidFile) - } else if spec.Exe != "" { - pids, err = pidsFromExe(spec.Exe) - } else if spec.Pattern != "" { - pids, err = pidsFromPattern(spec.Pattern) + if p.PidFile != "" { + pids, err = pidsFromFile(p.PidFile) + } else if p.Exe != "" { + pids, err = pidsFromExe(p.Exe) + } else if p.Pattern != "" { + pids, err = pidsFromPattern(p.Pattern) } else { err = fmt.Errorf("Either exe, pid_file or pattern has to be specified") } @@ -174,7 +167,7 @@ func pidsFromPattern(pattern string) ([]int32, error) { } func init() { - plugins.Add("procstat", func() plugins.Plugin { + inputs.Add("procstat", func() inputs.Input { return NewProcstat() }) } diff --git a/plugins/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go similarity index 59% rename from plugins/procstat/procstat_test.go rename to plugins/inputs/procstat/procstat_test.go index e0d17ac4d..bf5790f67 100644 --- a/plugins/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -6,10 +6,11 @@ import ( "strconv" "testing" + "github.com/shirou/gopsutil/process" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" ) func TestGather(t *testing.T) { @@ -20,11 +21,12 @@ func TestGather(t *testing.T) { file.Write([]byte(strconv.Itoa(pid))) file.Close() defer os.Remove(file.Name()) - specifications := []*Specification{&Specification{PidFile: file.Name(), Prefix: "foo"}} p := Procstat{ - Specifications: specifications, + PidFile: file.Name(), + Prefix: "foo", + pidmap: make(map[int32]*process.Process), } p.Gather(&acc) - assert.True(t, acc.HasFloatValue("foo_cpu_user")) - assert.True(t, acc.HasUIntValue("foo_memory_vms")) + assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user")) + assert.True(t, acc.HasUIntField("procstat", "foo_memory_vms")) } diff --git a/plugins/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go similarity index 61% rename from plugins/procstat/spec_processor.go rename to plugins/inputs/procstat/spec_processor.go index ede14549a..b66572f2e 100644 --- a/plugins/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -2,17 +2,18 @@ package procstat import ( "fmt" - "log" + "time" "github.com/shirou/gopsutil/process" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type SpecProcessor struct { Prefix string tags map[string]string - acc plugins.Accumulator + fields map[string]interface{} + acc inputs.Accumulator proc *process.Process } @@ -23,43 +24,40 @@ func (p *SpecProcessor) add(metric string, value interface{}) { } else { mname = p.Prefix + "_" + metric } - p.acc.Add(mname, value, p.tags) + p.fields[mname] = value +} + +func (p *SpecProcessor) flush() { + p.acc.AddFields("procstat", p.fields, p.tags) + p.fields = make(map[string]interface{}) } func NewSpecProcessor( prefix string, - acc plugins.Accumulator, + acc inputs.Accumulator, p *process.Process, ) *SpecProcessor { tags := make(map[string]string) tags["pid"] = fmt.Sprintf("%v", p.Pid) if name, err := p.Name(); err == nil { - tags["name"] = name + tags["process_name"] = name } return &SpecProcessor{ Prefix: prefix, tags: tags, + fields: make(map[string]interface{}), acc: acc, proc: p, } } func (p *SpecProcessor) pushMetrics() { - if err := p.pushFDStats(); err != nil { - log.Printf("procstat, fd stats not available: %s", err.Error()) - } - if err := p.pushCtxStats(); err != nil { - log.Printf("procstat, ctx stats not available: %s", err.Error()) - } - if err := p.pushIOStats(); err != nil { - log.Printf("procstat, io stats not available: %s", err.Error()) - } - if err := p.pushCPUStats(); err != nil { - log.Printf("procstat, cpu stats not available: %s", err.Error()) - } - if err := p.pushMemoryStats(); err != nil { - log.Printf("procstat, mem stats not available: %s", err.Error()) - } + p.pushFDStats() + p.pushCtxStats() + p.pushIOStats() + p.pushCPUStats() + p.pushMemoryStats() + p.flush() } func (p *SpecProcessor) pushFDStats() error { @@ -94,21 +92,30 @@ func (p *SpecProcessor) pushIOStats() error { } func (p *SpecProcessor) pushCPUStats() error { - cpu, err := p.proc.CPUTimes() + cpu_time, err := p.proc.CPUTimes() if err != nil { return err } - p.add("cpu_user", cpu.User) - p.add("cpu_system", cpu.System) - p.add("cpu_idle", cpu.Idle) - p.add("cpu_nice", cpu.Nice) - p.add("cpu_iowait", cpu.Iowait) - p.add("cpu_irq", cpu.Irq) - p.add("cpu_soft_irq", cpu.Softirq) - p.add("cpu_soft_steal", cpu.Steal) - p.add("cpu_soft_stolen", cpu.Stolen) - p.add("cpu_soft_guest", cpu.Guest) - p.add("cpu_soft_guest_nice", cpu.GuestNice) + p.add("cpu_time_user", cpu_time.User) + p.add("cpu_time_system", cpu_time.System) + p.add("cpu_time_idle", cpu_time.Idle) + p.add("cpu_time_nice", cpu_time.Nice) + p.add("cpu_time_iowait", cpu_time.Iowait) + p.add("cpu_time_irq", cpu_time.Irq) + p.add("cpu_time_soft_irq", cpu_time.Softirq) + p.add("cpu_time_steal", cpu_time.Steal) + p.add("cpu_time_stolen", cpu_time.Stolen) + p.add("cpu_time_guest", cpu_time.Guest) + p.add("cpu_time_guest_nice", cpu_time.GuestNice) + + cpu_perc, err := p.proc.CPUPercent(time.Duration(0)) + if err != nil { + return err + } else if cpu_perc == 0 { + return nil + } + p.add("cpu_usage", cpu_perc) + return nil } diff --git a/plugins/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go similarity index 83% rename from plugins/prometheus/prometheus.go rename to plugins/inputs/prometheus/prometheus.go index cb824e3f2..e6374b8d6 100644 --- a/plugins/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -3,7 +3,7 @@ package prometheus import ( "errors" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "io" @@ -32,7 +32,7 @@ var ErrProtocolError = errors.New("prometheus protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Prometheus) Gather(acc plugins.Accumulator) error { +func (g *Prometheus) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup var outerr error @@ -50,7 +50,7 @@ func (g *Prometheus) Gather(acc plugins.Accumulator) error { return outerr } -func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error { +func (g *Prometheus) gatherURL(url string, acc inputs.Accumulator) error { resp, err := http.Get(url) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", url, err) @@ -77,17 +77,18 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error { if err == io.EOF { break } else if err != nil { - return fmt.Errorf("error getting processing samples for %s: %s", url, err) + return fmt.Errorf("error getting processing samples for %s: %s", + url, err) } for _, sample := range samples { - tags := map[string]string{} + tags := make(map[string]string) for key, value := range sample.Metric { if key == model.MetricNameLabel { continue } tags[string(key)] = string(value) } - acc.Add(string(sample.Metric[model.MetricNameLabel]), + acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]), float64(sample.Value), tags) } } @@ -96,7 +97,7 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error { } func init() { - plugins.Add("prometheus", func() plugins.Plugin { + inputs.Add("prometheus", func() inputs.Input { return &Prometheus{} }) } diff --git a/plugins/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go similarity index 84% rename from plugins/prometheus/prometheus_test.go rename to plugins/inputs/prometheus/prometheus_test.go index 4f79822c1..2009cbb11 100644 --- a/plugins/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -45,11 +45,11 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { value float64 tags map[string]string }{ - {"go_gc_duration_seconds_count", 7, map[string]string{}}, - {"go_goroutines", 15, map[string]string{}}, + {"prometheus_go_gc_duration_seconds_count", 7, map[string]string{}}, + {"prometheus_go_goroutines", 15, map[string]string{}}, } for _, e := range expected { - assert.NoError(t, acc.ValidateValue(e.name, e.value)) + assert.True(t, acc.HasFloatField(e.name, "value")) } } diff --git a/plugins/puppetagent/README.md b/plugins/inputs/puppetagent/README.md similarity index 100% rename from plugins/puppetagent/README.md rename to plugins/inputs/puppetagent/README.md diff --git a/plugins/puppetagent/last_run_summary.yaml b/plugins/inputs/puppetagent/last_run_summary.yaml similarity index 100% rename from plugins/puppetagent/last_run_summary.yaml rename to plugins/inputs/puppetagent/last_run_summary.yaml diff --git a/plugins/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go similarity index 86% rename from plugins/puppetagent/puppetagent.go rename to plugins/inputs/puppetagent/puppetagent.go index 67b01dce3..eee9186b3 100644 --- a/plugins/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -8,7 +8,7 @@ import ( "reflect" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) // PuppetAgent is a PuppetAgent plugin @@ -82,7 +82,7 @@ func (pa *PuppetAgent) Description() string { } // Gather reads stats from all configured servers accumulates stats -func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error { +func (pa *PuppetAgent) Gather(acc inputs.Accumulator) error { if len(pa.Location) == 0 { pa.Location = "/var/lib/puppet/state/last_run_summary.yaml" @@ -104,15 +104,16 @@ func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error { return fmt.Errorf("%s", err) } - structPrinter(&puppetState, acc) + tags := map[string]string{"location": pa.Location} + structPrinter(&puppetState, acc, tags) return nil } -func structPrinter(s *State, acc plugins.Accumulator) { - +func structPrinter(s *State, acc inputs.Accumulator, tags map[string]string) { e := reflect.ValueOf(s).Elem() + fields := make(map[string]interface{}) for tLevelFNum := 0; tLevelFNum < e.NumField(); tLevelFNum++ { name := e.Type().Field(tLevelFNum).Name nameNumField := e.FieldByName(name).NumField() @@ -123,14 +124,14 @@ func structPrinter(s *State, acc plugins.Accumulator) { lname := strings.ToLower(name) lsName := strings.ToLower(sName) - acc.Add(fmt.Sprintf("%s_%s", lname, lsName), sValue, nil) + fields[fmt.Sprintf("%s_%s", lname, lsName)] = sValue } } - + acc.AddFields("puppetagent", fields, tags) } func init() { - plugins.Add("puppetagent", func() plugins.Plugin { + inputs.Add("puppetagent", func() inputs.Input { return &PuppetAgent{} }) } diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go new file mode 100644 index 000000000..d1470bc27 --- /dev/null +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -0,0 +1,48 @@ +package puppetagent + +import ( + "github.com/influxdata/telegraf/testutil" + "testing" +) + +func TestGather(t *testing.T) { + var acc testutil.Accumulator + + pa := PuppetAgent{ + Location: "last_run_summary.yaml", + } + pa.Gather(&acc) + + tags := map[string]string{"location": "last_run_summary.yaml"} + fields := map[string]interface{}{ + "events_failure": int64(0), + "events_total": int64(0), + "events_success": int64(0), + "resources_failed": int64(0), + "resources_scheduled": int64(0), + "resources_changed": int64(0), + "resources_skipped": int64(0), + "resources_total": int64(109), + "resources_failedtorestart": int64(0), + "resources_restarted": int64(0), + "resources_outofsync": int64(0), + "changes_total": int64(0), + "time_lastrun": int64(1444936531), + "version_config": int64(1444936521), + "time_user": float64(0.004331), + "time_schedule": float64(0.001123), + "time_filebucket": float64(0.000353), + "time_file": float64(0.441472), + "time_exec": float64(0.508123), + "time_anchor": float64(0.000555), + "time_sshauthorizedkey": float64(0.000764), + "time_service": float64(1.807795), + "time_package": float64(1.325788), + "time_total": float64(8.85354707064819), + "time_configretrieval": float64(4.75567007064819), + "time_cron": float64(0.000584), + "version_puppet": "3.7.5", + } + + acc.AssertContainsTaggedFields(t, "puppetagent", fields, tags) +} diff --git a/plugins/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go similarity index 63% rename from plugins/rabbitmq/rabbitmq.go rename to plugins/inputs/rabbitmq/rabbitmq.go index 27580a13a..c062b3164 100644 --- a/plugins/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -5,25 +5,22 @@ import ( "fmt" "net/http" "strconv" + "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) const DefaultUsername = "guest" const DefaultPassword = "guest" const DefaultURL = "http://localhost:15672" -type Server struct { +type RabbitMQ struct { URL string Name string Username string Password string Nodes []string Queues []string -} - -type RabbitMQ struct { - Servers []*Server Client *http.Client } @@ -94,15 +91,13 @@ type Node struct { SocketsUsed int64 `json:"sockets_used"` } -type gatherFunc func(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) +type gatherFunc func(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues} var sampleConfig = ` - # Specify servers via an array of tables - [[plugins.rabbitmq.servers]] + url = "http://localhost:15672" # required # name = "rmq-server-1" # optional tag - # url = "http://localhost:15672" # username = "guest" # password = "guest" @@ -119,27 +114,18 @@ func (r *RabbitMQ) Description() string { return "Read metrics from one or many RabbitMQ servers via the management API" } -var localhost = &Server{URL: DefaultURL} - -func (r *RabbitMQ) Gather(acc plugins.Accumulator) error { +func (r *RabbitMQ) Gather(acc inputs.Accumulator) error { if r.Client == nil { r.Client = &http.Client{} } - var errChan = make(chan error, len(r.Servers)) + var errChan = make(chan error, len(gatherFunctions)) - // use localhost is no servers are specified in config - if len(r.Servers) == 0 { - r.Servers = append(r.Servers, localhost) + for _, f := range gatherFunctions { + go f(r, acc, errChan) } - for _, serv := range r.Servers { - for _, f := range gatherFunctions { - go f(r, serv, acc, errChan) - } - } - - for i := 1; i <= len(r.Servers)*len(gatherFunctions); i++ { + for i := 1; i <= len(gatherFunctions); i++ { err := <-errChan if err != nil { return err @@ -149,20 +135,20 @@ func (r *RabbitMQ) Gather(acc plugins.Accumulator) error { return nil } -func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error { - u = fmt.Sprintf("%s%s", serv.URL, u) +func (r *RabbitMQ) requestJSON(u string, target interface{}) error { + u = fmt.Sprintf("%s%s", r.URL, u) req, err := http.NewRequest("GET", u, nil) if err != nil { return err } - username := serv.Username + username := r.Username if username == "" { username = DefaultUsername } - password := serv.Password + password := r.Password if password == "" { password = DefaultPassword } @@ -181,10 +167,10 @@ func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error return nil } -func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) { +func gatherOverview(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) { overview := &OverviewResponse{} - err := r.requestJSON(serv, "/api/overview", &overview) + err := r.requestJSON("/api/overview", &overview) if err != nil { errChan <- err return @@ -195,76 +181,80 @@ func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan return } - tags := map[string]string{"url": serv.URL} - if serv.Name != "" { - tags["name"] = serv.Name + tags := map[string]string{"url": r.URL} + if r.Name != "" { + tags["name"] = r.Name } - - acc.Add("messages", overview.QueueTotals.Messages, tags) - acc.Add("messages_ready", overview.QueueTotals.MessagesReady, tags) - acc.Add("messages_unacked", overview.QueueTotals.MessagesUnacknowledged, tags) - - acc.Add("channels", overview.ObjectTotals.Channels, tags) - acc.Add("connections", overview.ObjectTotals.Connections, tags) - acc.Add("consumers", overview.ObjectTotals.Consumers, tags) - acc.Add("exchanges", overview.ObjectTotals.Exchanges, tags) - acc.Add("queues", overview.ObjectTotals.Queues, tags) - - acc.Add("messages_acked", overview.MessageStats.Ack, tags) - acc.Add("messages_delivered", overview.MessageStats.Deliver, tags) - acc.Add("messages_published", overview.MessageStats.Publish, tags) + fields := map[string]interface{}{ + "messages": overview.QueueTotals.Messages, + "messages_ready": overview.QueueTotals.MessagesReady, + "messages_unacked": overview.QueueTotals.MessagesUnacknowledged, + "channels": overview.ObjectTotals.Channels, + "connections": overview.ObjectTotals.Connections, + "consumers": overview.ObjectTotals.Consumers, + "exchanges": overview.ObjectTotals.Exchanges, + "queues": overview.ObjectTotals.Queues, + "messages_acked": overview.MessageStats.Ack, + "messages_delivered": overview.MessageStats.Deliver, + "messages_published": overview.MessageStats.Publish, + } + acc.AddFields("rabbitmq_overview", fields, tags) errChan <- nil } -func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) { +func gatherNodes(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) { nodes := make([]Node, 0) // Gather information about nodes - err := r.requestJSON(serv, "/api/nodes", &nodes) + err := r.requestJSON("/api/nodes", &nodes) if err != nil { errChan <- err return } + now := time.Now() for _, node := range nodes { - if !shouldGatherNode(node, serv) { + if !r.shouldGatherNode(node) { continue } - tags := map[string]string{"url": serv.URL} + tags := map[string]string{"url": r.URL} tags["node"] = node.Name - acc.Add("disk_free", node.DiskFree, tags) - acc.Add("disk_free_limit", node.DiskFreeLimit, tags) - acc.Add("fd_total", node.FdTotal, tags) - acc.Add("fd_used", node.FdUsed, tags) - acc.Add("mem_limit", node.MemLimit, tags) - acc.Add("mem_used", node.MemUsed, tags) - acc.Add("proc_total", node.ProcTotal, tags) - acc.Add("proc_used", node.ProcUsed, tags) - acc.Add("run_queue", node.RunQueue, tags) - acc.Add("sockets_total", node.SocketsTotal, tags) - acc.Add("sockets_used", node.SocketsUsed, tags) + fields := map[string]interface{}{ + "disk_free": node.DiskFree, + "disk_free_limit": node.DiskFreeLimit, + "fd_total": node.FdTotal, + "fd_used": node.FdUsed, + "mem_limit": node.MemLimit, + "mem_used": node.MemUsed, + "proc_total": node.ProcTotal, + "proc_used": node.ProcUsed, + "run_queue": node.RunQueue, + "sockets_total": node.SocketsTotal, + "sockets_used": node.SocketsUsed, + } + acc.AddFields("rabbitmq_node", fields, tags, now) } errChan <- nil } -func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) { +func gatherQueues(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) { // Gather information about queues queues := make([]Queue, 0) - err := r.requestJSON(serv, "/api/queues", &queues) + err := r.requestJSON("/api/queues", &queues) if err != nil { errChan <- err return } for _, queue := range queues { - if !shouldGatherQueue(queue, serv) { + if !r.shouldGatherQueue(queue) { continue } tags := map[string]string{ - "url": serv.URL, + "url": r.URL, "queue": queue.Name, "vhost": queue.Vhost, "node": queue.Node, @@ -273,7 +263,7 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch } acc.AddFields( - "queue", + "rabbitmq_queue", map[string]interface{}{ // common information "consumers": queue.Consumers, @@ -301,12 +291,12 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch errChan <- nil } -func shouldGatherNode(node Node, serv *Server) bool { - if len(serv.Nodes) == 0 { +func (r *RabbitMQ) shouldGatherNode(node Node) bool { + if len(r.Nodes) == 0 { return true } - for _, name := range serv.Nodes { + for _, name := range r.Nodes { if name == node.Name { return true } @@ -315,12 +305,12 @@ func shouldGatherNode(node Node, serv *Server) bool { return false } -func shouldGatherQueue(queue Queue, serv *Server) bool { - if len(serv.Queues) == 0 { +func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool { + if len(r.Queues) == 0 { return true } - for _, name := range serv.Queues { + for _, name := range r.Queues { if name == queue.Name { return true } @@ -330,7 +320,7 @@ func shouldGatherQueue(queue Queue, serv *Server) bool { } func init() { - plugins.Add("rabbitmq", func() plugins.Plugin { + inputs.Add("rabbitmq", func() inputs.Input { return &RabbitMQ{} }) } diff --git a/plugins/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go similarity index 97% rename from plugins/rabbitmq/rabbitmq_test.go rename to plugins/inputs/rabbitmq/rabbitmq_test.go index 38bfb7a7d..4bdc980db 100644 --- a/plugins/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -394,11 +394,7 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { defer ts.Close() r := &RabbitMQ{ - Servers: []*Server{ - { - URL: ts.URL, - }, - }, + URL: ts.URL, } var acc testutil.Accumulator @@ -423,7 +419,7 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rabbitmq_overview", metric)) } nodeIntMetrics := []string{ @@ -441,8 +437,8 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { } for _, metric := range nodeIntMetrics { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rabbitmq_node", metric)) } - assert.True(t, acc.HasMeasurement("queue")) + assert.True(t, acc.HasMeasurement("rabbitmq_queue")) } diff --git a/plugins/redis/redis.go b/plugins/inputs/redis/redis.go similarity index 91% rename from plugins/redis/redis.go rename to plugins/inputs/redis/redis.go index 151fb4f46..735aa2052 100644 --- a/plugins/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Redis struct { @@ -76,7 +76,7 @@ var ErrProtocolError = errors.New("redis protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (r *Redis) Gather(acc plugins.Accumulator) error { +func (r *Redis) Gather(acc inputs.Accumulator) error { if len(r.Servers) == 0 { url := &url.URL{ Host: ":6379", @@ -113,7 +113,7 @@ func (r *Redis) Gather(acc plugins.Accumulator) error { const defaultPort = "6379" -func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { +func (r *Redis) gatherServer(addr *url.URL, acc inputs.Accumulator) error { _, _, err := net.SplitHostPort(addr.Host) if err != nil { addr.Host = addr.Host + ":" + defaultPort @@ -158,12 +158,13 @@ func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { // gatherInfoOutput gathers func gatherInfoOutput( rdr *bufio.Reader, - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, ) error { var keyspace_hits, keyspace_misses uint64 = 0, 0 scanner := bufio.NewScanner(rdr) + fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() if strings.Contains(line, "ERR") { @@ -199,7 +200,7 @@ func gatherInfoOutput( } if err == nil { - acc.Add(metric, ival, tags) + fields[metric] = ival continue } @@ -208,13 +209,14 @@ func gatherInfoOutput( return err } - acc.Add(metric, fval, tags) + fields[metric] = fval } var keyspace_hitrate float64 = 0.0 if keyspace_hits != 0 || keyspace_misses != 0 { keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses) } - acc.Add("keyspace_hitrate", keyspace_hitrate, tags) + fields["keyspace_hitrate"] = keyspace_hitrate + acc.AddFields("redis", fields, tags) return nil } @@ -225,24 +227,26 @@ func gatherInfoOutput( func gatherKeyspaceLine( name string, line string, - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, ) { if strings.Contains(line, "keys=") { + fields := make(map[string]interface{}) tags["database"] = name dbparts := strings.Split(line, ",") for _, dbp := range dbparts { kv := strings.Split(dbp, "=") ival, err := strconv.ParseUint(kv[1], 10, 64) if err == nil { - acc.Add(kv[0], ival, tags) + fields[kv[0]] = ival } } + acc.AddFields("redis_keyspace", fields, tags) } } func init() { - plugins.Add("redis", func() plugins.Plugin { + inputs.Add("redis", func() inputs.Input { return &Redis{} }) } diff --git a/plugins/redis/redis_test.go b/plugins/inputs/redis/redis_test.go similarity index 57% rename from plugins/redis/redis_test.go rename to plugins/inputs/redis/redis_test.go index ff52e8c57..612595cdb 100644 --- a/plugins/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -6,8 +6,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -36,61 +35,48 @@ func TestRedis_ParseMetrics(t *testing.T) { err := gatherInfoOutput(rdr, &acc, tags) require.NoError(t, err) - checkInt := []struct { - name string - value uint64 - }{ - {"uptime", 238}, - {"clients", 1}, - {"used_memory", 1003936}, - {"used_memory_rss", 811008}, - {"used_memory_peak", 1003936}, - {"used_memory_lua", 33792}, - {"rdb_changes_since_last_save", 0}, - {"total_connections_received", 2}, - {"total_commands_processed", 1}, - {"instantaneous_ops_per_sec", 0}, - {"sync_full", 0}, - {"sync_partial_ok", 0}, - {"sync_partial_err", 0}, - {"expired_keys", 0}, - {"evicted_keys", 0}, - {"keyspace_hits", 1}, - {"keyspace_misses", 1}, - {"pubsub_channels", 0}, - {"pubsub_patterns", 0}, - {"latest_fork_usec", 0}, - {"connected_slaves", 0}, - {"master_repl_offset", 0}, - {"repl_backlog_active", 0}, - {"repl_backlog_size", 1048576}, - {"repl_backlog_histlen", 0}, - {"keys", 2}, - {"expires", 0}, - {"avg_ttl", 0}, + fields := map[string]interface{}{ + "uptime": uint64(238), + "clients": uint64(1), + "used_memory": uint64(1003936), + "used_memory_rss": uint64(811008), + "used_memory_peak": uint64(1003936), + "used_memory_lua": uint64(33792), + "rdb_changes_since_last_save": uint64(0), + "total_connections_received": uint64(2), + "total_commands_processed": uint64(1), + "instantaneous_ops_per_sec": uint64(0), + "sync_full": uint64(0), + "sync_partial_ok": uint64(0), + "sync_partial_err": uint64(0), + "expired_keys": uint64(0), + "evicted_keys": uint64(0), + "keyspace_hits": uint64(1), + "keyspace_misses": uint64(1), + "pubsub_channels": uint64(0), + "pubsub_patterns": uint64(0), + "latest_fork_usec": uint64(0), + "connected_slaves": uint64(0), + "master_repl_offset": uint64(0), + "repl_backlog_active": uint64(0), + "repl_backlog_size": uint64(1048576), + "repl_backlog_histlen": uint64(0), + "mem_fragmentation_ratio": float64(0.81), + "instantaneous_input_kbps": float64(876.16), + "instantaneous_output_kbps": float64(3010.23), + "used_cpu_sys": float64(0.14), + "used_cpu_user": float64(0.05), + "used_cpu_sys_children": float64(0.00), + "used_cpu_user_children": float64(0.00), + "keyspace_hitrate": float64(0.50), } - - for _, c := range checkInt { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"mem_fragmentation_ratio", 0.81}, - {"instantaneous_input_kbps", 876.16}, - {"instantaneous_output_kbps", 3010.23}, - {"used_cpu_sys", 0.14}, - {"used_cpu_user", 0.05}, - {"used_cpu_sys_children", 0.00}, - {"used_cpu_user_children", 0.00}, - {"keyspace_hitrate", 0.50}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) + keyspaceFields := map[string]interface{}{ + "avg_ttl": uint64(0), + "expires": uint64(0), + "keys": uint64(2), } + acc.AssertContainsTaggedFields(t, "redis", fields, tags) + acc.AssertContainsTaggedFields(t, "redis_keyspace", keyspaceFields, tags) } const testOutput = `# Server diff --git a/plugins/registry.go b/plugins/inputs/registry.go similarity index 64% rename from plugins/registry.go rename to plugins/inputs/registry.go index 3e544917d..2b99078f0 100644 --- a/plugins/registry.go +++ b/plugins/inputs/registry.go @@ -1,4 +1,4 @@ -package plugins +package inputs import "time" @@ -17,40 +17,40 @@ type Accumulator interface { t ...time.Time) } -type Plugin interface { - // SampleConfig returns the default configuration of the Plugin +type Input interface { + // SampleConfig returns the default configuration of the Input SampleConfig() string - // Description returns a one-sentence description on the Plugin + // Description returns a one-sentence description on the Input Description() string - // Gather takes in an accumulator and adds the metrics that the Plugin + // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" Gather(Accumulator) error } -type ServicePlugin interface { - // SampleConfig returns the default configuration of the Plugin +type ServiceInput interface { + // SampleConfig returns the default configuration of the Input SampleConfig() string - // Description returns a one-sentence description on the Plugin + // Description returns a one-sentence description on the Input Description() string - // Gather takes in an accumulator and adds the metrics that the Plugin + // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" Gather(Accumulator) error - // Start starts the ServicePlugin's service, whatever that may be + // Start starts the ServiceInput's service, whatever that may be Start() error // Stop stops the services and closes any necessary channels and connections Stop() } -type Creator func() Plugin +type Creator func() Input -var Plugins = map[string]Creator{} +var Inputs = map[string]Creator{} func Add(name string, creator Creator) { - Plugins[name] = creator + Inputs[name] = creator } diff --git a/plugins/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go similarity index 88% rename from plugins/rethinkdb/rethinkdb.go rename to plugins/inputs/rethinkdb/rethinkdb.go index 8af890661..1f28dab25 100644 --- a/plugins/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -5,7 +5,7 @@ import ( "net/url" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/dancannon/gorethink.v1" ) @@ -35,7 +35,7 @@ var localhost = &Server{Url: &url.URL{Host: "127.0.0.1:28015"}} // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (r *RethinkDB) Gather(acc plugins.Accumulator) error { +func (r *RethinkDB) Gather(acc inputs.Accumulator) error { if len(r.Servers) == 0 { r.gatherServer(localhost, acc) return nil @@ -65,7 +65,7 @@ func (r *RethinkDB) Gather(acc plugins.Accumulator) error { return outerr } -func (r *RethinkDB) gatherServer(server *Server, acc plugins.Accumulator) error { +func (r *RethinkDB) gatherServer(server *Server, acc inputs.Accumulator) error { var err error connectOpts := gorethink.ConnectOpts{ Address: server.Url.Host, @@ -87,7 +87,7 @@ func (r *RethinkDB) gatherServer(server *Server, acc plugins.Accumulator) error } func init() { - plugins.Add("rethinkdb", func() plugins.Plugin { + inputs.Add("rethinkdb", func() inputs.Input { return &RethinkDB{} }) } diff --git a/plugins/rethinkdb/rethinkdb_data.go b/plugins/inputs/rethinkdb/rethinkdb_data.go similarity index 71% rename from plugins/rethinkdb/rethinkdb_data.go rename to plugins/inputs/rethinkdb/rethinkdb_data.go index 5fae28931..8093fa5ba 100644 --- a/plugins/rethinkdb/rethinkdb_data.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data.go @@ -4,7 +4,7 @@ import ( "reflect" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type serverStatus struct { @@ -86,25 +86,30 @@ var engineStats = map[string]string{ "total_writes": "TotalWrites", } -func (e *Engine) AddEngineStats(keys []string, acc plugins.Accumulator, tags map[string]string) { +func (e *Engine) AddEngineStats( + keys []string, + acc inputs.Accumulator, + tags map[string]string, +) { engine := reflect.ValueOf(e).Elem() + fields := make(map[string]interface{}) for _, key := range keys { - acc.Add( - key, - engine.FieldByName(engineStats[key]).Interface(), - tags, - ) + fields[key] = engine.FieldByName(engineStats[key]).Interface() } + acc.AddFields("rethinkdb_engine", fields, tags) } -func (s *Storage) AddStats(acc plugins.Accumulator, tags map[string]string) { - acc.Add("cache_bytes_in_use", s.Cache.BytesInUse, tags) - acc.Add("disk_read_bytes_per_sec", s.Disk.ReadBytesPerSec, tags) - acc.Add("disk_read_bytes_total", s.Disk.ReadBytesTotal, tags) - acc.Add("disk_written_bytes_per_sec", s.Disk.WriteBytesPerSec, tags) - acc.Add("disk_written_bytes_total", s.Disk.WriteBytesTotal, tags) - acc.Add("disk_usage_data_bytes", s.Disk.SpaceUsage.Data, tags) - acc.Add("disk_usage_garbage_bytes", s.Disk.SpaceUsage.Garbage, tags) - acc.Add("disk_usage_metadata_bytes", s.Disk.SpaceUsage.Metadata, tags) - acc.Add("disk_usage_preallocated_bytes", s.Disk.SpaceUsage.Prealloc, tags) +func (s *Storage) AddStats(acc inputs.Accumulator, tags map[string]string) { + fields := map[string]interface{}{ + "cache_bytes_in_use": s.Cache.BytesInUse, + "disk_read_bytes_per_sec": s.Disk.ReadBytesPerSec, + "disk_read_bytes_total": s.Disk.ReadBytesTotal, + "disk_written_bytes_per_sec": s.Disk.WriteBytesPerSec, + "disk_written_bytes_total": s.Disk.WriteBytesTotal, + "disk_usage_data_bytes": s.Disk.SpaceUsage.Data, + "disk_usage_garbage_bytes": s.Disk.SpaceUsage.Garbage, + "disk_usage_metadata_bytes": s.Disk.SpaceUsage.Metadata, + "disk_usage_preallocated_bytes": s.Disk.SpaceUsage.Prealloc, + } + acc.AddFields("rethinkdb", fields, tags) } diff --git a/plugins/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go similarity index 89% rename from plugins/rethinkdb/rethinkdb_data_test.go rename to plugins/inputs/rethinkdb/rethinkdb_data_test.go index 4c76b2340..6159016c0 100644 --- a/plugins/rethinkdb/rethinkdb_data_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go @@ -3,7 +3,7 @@ package rethinkdb import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) @@ -36,7 +36,7 @@ func TestAddEngineStats(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range keys { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rethinkdb_engine", metric)) } } @@ -67,7 +67,7 @@ func TestAddEngineStatsPartial(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range missing_keys { - assert.False(t, acc.HasIntValue(metric)) + assert.False(t, acc.HasIntField("rethinkdb", metric)) } } @@ -107,6 +107,6 @@ func TestAddStorageStats(t *testing.T) { storage.AddStats(&acc, tags) for _, metric := range keys { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rethinkdb", metric)) } } diff --git a/plugins/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go similarity index 94% rename from plugins/rethinkdb/rethinkdb_server.go rename to plugins/inputs/rethinkdb/rethinkdb_server.go index 9285068bd..6ca7a3af1 100644 --- a/plugins/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/dancannon/gorethink.v1" ) @@ -20,7 +20,7 @@ type Server struct { serverStatus serverStatus } -func (s *Server) gatherData(acc plugins.Accumulator) error { +func (s *Server) gatherData(acc inputs.Accumulator) error { if err := s.getServerStatus(); err != nil { return fmt.Errorf("Failed to get server_status, %s\n", err) } @@ -110,7 +110,7 @@ var ClusterTracking = []string{ "written_docs_per_sec", } -func (s *Server) addClusterStats(acc plugins.Accumulator) error { +func (s *Server) addClusterStats(acc inputs.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"cluster"}).Run(s.session) if err != nil { return fmt.Errorf("cluster stats query error, %s\n", err.Error()) @@ -138,7 +138,7 @@ var MemberTracking = []string{ "total_writes", } -func (s *Server) addMemberStats(acc plugins.Accumulator) error { +func (s *Server) addMemberStats(acc inputs.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.Id}).Run(s.session) if err != nil { return fmt.Errorf("member stats query error, %s\n", err.Error()) @@ -162,7 +162,7 @@ var TableTracking = []string{ "total_writes", } -func (s *Server) addTableStats(acc plugins.Accumulator) error { +func (s *Server) addTableStats(acc inputs.Accumulator) error { tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session) defer tablesCursor.Close() var tables []tableStatus diff --git a/plugins/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go similarity index 97% rename from plugins/rethinkdb/rethinkdb_server_test.go rename to plugins/inputs/rethinkdb/rethinkdb_server_test.go index 21ab0dbbd..c4b644222 100644 --- a/plugins/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -5,7 +5,7 @@ package rethinkdb import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go similarity index 100% rename from plugins/rethinkdb/rethinkdb_test.go rename to plugins/inputs/rethinkdb/rethinkdb_test.go diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go new file mode 100644 index 000000000..81001abd8 --- /dev/null +++ b/plugins/inputs/sensors/sensors.go @@ -0,0 +1,90 @@ +// +build linux,sensors + +package sensors + +import ( + "strings" + + "github.com/md14454/gosensors" + + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Sensors struct { + Sensors []string +} + +func (_ *Sensors) Description() string { + return "Monitor sensors using lm-sensors package" +} + +var sensorsSampleConfig = ` + # By default, telegraf gathers stats from all sensors detected by the + # lm-sensors module. + # + # Only collect stats from the selected sensors. Sensors are listed as + # :. This information can be found by running the + # sensors command, e.g. sensors -u + # + # A * as the feature name will return all features of the chip + # + # sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"] +` + +func (_ *Sensors) SampleConfig() string { + return sensorsSampleConfig +} + +func (s *Sensors) Gather(acc inputs.Accumulator) error { + gosensors.Init() + defer gosensors.Cleanup() + + for _, chip := range gosensors.GetDetectedChips() { + for _, feature := range chip.GetFeatures() { + chipName := chip.String() + featureLabel := feature.GetLabel() + + if len(s.Sensors) != 0 { + var found bool + + for _, sensor := range s.Sensors { + parts := strings.SplitN(":", sensor, 2) + + if parts[0] == chipName { + if parts[1] == "*" || parts[1] == featureLabel { + found = true + break + } + } + } + + if !found { + continue + } + } + + tags := map[string]string{ + "chip": chipName, + "adapter": chip.AdapterName(), + "feature-name": feature.Name, + "feature-label": featureLabel, + } + + fieldName := chipName + ":" + featureLabel + + fields := map[string]interface{}{ + fieldName: feature.GetValue(), + } + + acc.AddFields("sensors", fields, tags) + } + } + + return nil +} + +func init() { + inputs.Add("sensors", func() inputs.Input { + return &Sensors{} + }) +} diff --git a/plugins/inputs/sensors/sensors_nocompile.go b/plugins/inputs/sensors/sensors_nocompile.go new file mode 100644 index 000000000..5c38a437b --- /dev/null +++ b/plugins/inputs/sensors/sensors_nocompile.go @@ -0,0 +1,3 @@ +// +build !linux !sensors + +package sensors diff --git a/plugins/statsd/README.md b/plugins/inputs/statsd/README.md similarity index 98% rename from plugins/statsd/README.md rename to plugins/inputs/statsd/README.md index 76255f3b0..49b8ff842 100644 --- a/plugins/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -157,4 +157,4 @@ mem.cached.localhost:256|g ``` There are many more options available, -[More details can be found here](https://github.com/influxdb/influxdb/tree/master/services/graphite#templates) +[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates) diff --git a/plugins/statsd/running_stats.go b/plugins/inputs/statsd/running_stats.go similarity index 100% rename from plugins/statsd/running_stats.go rename to plugins/inputs/statsd/running_stats.go diff --git a/plugins/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go similarity index 100% rename from plugins/statsd/running_stats_test.go rename to plugins/inputs/statsd/running_stats_test.go diff --git a/plugins/statsd/statsd.go b/plugins/inputs/statsd/statsd.go similarity index 86% rename from plugins/statsd/statsd.go rename to plugins/inputs/statsd/statsd.go index bd86a56d7..6b7a427b7 100644 --- a/plugins/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -10,11 +10,13 @@ import ( "strings" "sync" - "github.com/influxdb/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/graphite" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) +const UDP_PACKET_SIZE int = 1500 + var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + "You may want to increase allowed_pending_messages in the config\n" @@ -35,11 +37,16 @@ type Statsd struct { DeleteCounters bool DeleteSets bool DeleteTimings bool + ConvertNames bool + + // UDPPacketSize is the size of the read packets for the server listening + // for statsd UDP packets. This will default to 1500 bytes. + UDPPacketSize int `toml:"udp_packet_size"` sync.Mutex - // Channel for all incoming statsd messages - in chan string + // Channel for all incoming statsd packets + in chan []byte done chan struct{} // Cache gauges, counters & sets so they can be aggregated as they arrive @@ -57,12 +64,15 @@ func NewStatsd() *Statsd { // Make data structures s.done = make(chan struct{}) - s.in = make(chan string, s.AllowedPendingMessages) + s.in = make(chan []byte, s.AllowedPendingMessages) s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.ConvertNames = true + s.UDPPacketSize = UDP_PACKET_SIZE + return &s } @@ -121,6 +131,9 @@ const sampleConfig = ` # Percentiles to calculate for timing & histogram stats percentiles = [90] + # convert measurement names, "." to "_" and "-" to "__" + convert_names = true + # templates = [ # "cpu.* measurement*" # ] @@ -133,13 +146,17 @@ const sampleConfig = ` # calculation of percentiles. Raising this limit increases the accuracy # of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 + + # UDP packet size for the server to listen for. This will depend on the size + # of the packets that the client is sending, which is usually 1500 bytes. + udp_packet_size = 1500 ` func (_ *Statsd) SampleConfig() string { return sampleConfig } -func (s *Statsd) Gather(acc plugins.Accumulator) error { +func (s *Statsd) Gather(acc inputs.Accumulator) error { s.Lock() defer s.Unlock() @@ -185,7 +202,7 @@ func (s *Statsd) Gather(acc plugins.Accumulator) error { func (s *Statsd) Start() error { // Make data structures s.done = make(chan struct{}) - s.in = make(chan string, s.AllowedPendingMessages) + s.in = make(chan []byte, s.AllowedPendingMessages) s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) @@ -214,36 +231,37 @@ func (s *Statsd) udpListen() error { case <-s.done: return nil default: - buf := make([]byte, 1024) + buf := make([]byte, s.UDPPacketSize) n, _, err := listener.ReadFromUDP(buf) if err != nil { log.Printf("ERROR: %s\n", err.Error()) } - lines := strings.Split(string(buf[:n]), "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line != "" { - select { - case s.in <- line: - default: - log.Printf(dropwarn, line) - } - } + select { + case s.in <- buf[:n]: + default: + log.Printf(dropwarn, string(buf[:n])) } } } } -// parser monitors the s.in channel, if there is a line ready, it parses the -// statsd string into a usable metric struct and aggregates the value +// parser monitors the s.in channel, if there is a packet ready, it parses the +// packet into statsd strings and then calls parseStatsdLine, which parses a +// single statsd metric into a struct. func (s *Statsd) parser() error { for { select { case <-s.done: return nil - case line := <-s.in: - s.parseStatsdLine(line) + case packet := <-s.in: + lines := strings.Split(string(packet), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + s.parseStatsdLine(line) + } + } } } } @@ -319,10 +337,15 @@ func (s *Statsd) parseStatsdLine(line string) error { } m.floatvalue = v case "c", "s": + var v int64 v, err := strconv.ParseInt(pipesplit[0], 10, 64) if err != nil { - log.Printf("Error: parsing value to int64: %s\n", line) - return errors.New("Error Parsing statsd line") + v2, err2 := strconv.ParseFloat(pipesplit[0], 64) + if err2 != nil { + log.Printf("Error: parsing value to int64: %s\n", line) + return errors.New("Error Parsing statsd line") + } + v = int64(v2) } // If a sample rate is given with a counter, divide value by the rate if m.samplerate != 0 && m.mtype == "c" { @@ -389,8 +412,10 @@ func (s *Statsd) parseName(bucket string) (string, map[string]string) { if err == nil { name, tags, _, _ = p.ApplyTemplate(name) } - name = strings.Replace(name, ".", "_", -1) - name = strings.Replace(name, "-", "__", -1) + if s.ConvertNames { + name = strings.Replace(name, ".", "_", -1) + name = strings.Replace(name, "-", "__", -1) + } return name, tags } @@ -490,7 +515,10 @@ func (s *Statsd) Stop() { } func init() { - plugins.Add("statsd", func() plugins.Plugin { - return &Statsd{} + inputs.Add("statsd", func() inputs.Input { + return &Statsd{ + ConvertNames: true, + UDPPacketSize: UDP_PACKET_SIZE, + } }) } diff --git a/plugins/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go similarity index 94% rename from plugins/statsd/statsd_test.go rename to plugins/inputs/statsd/statsd_test.go index 6c85b0c4b..6fc1f6933 100644 --- a/plugins/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" ) // Invalid lines should return an error @@ -303,6 +303,64 @@ func TestParse_Tags(t *testing.T) { } } +// Test that statsd buckets are parsed to measurement names properly +func TestParseName(t *testing.T) { + s := NewStatsd() + + tests := []struct { + in_name string + out_name string + }{ + { + "foobar", + "foobar", + }, + { + "foo.bar", + "foo_bar", + }, + { + "foo.bar-baz", + "foo_bar__baz", + }, + } + + for _, test := range tests { + name, _ := s.parseName(test.in_name) + if name != test.out_name { + t.Errorf("Expected: %s, got %s", test.out_name, name) + } + } + + // Test with ConvertNames = false + s.ConvertNames = false + + tests = []struct { + in_name string + out_name string + }{ + { + "foobar", + "foobar", + }, + { + "foo.bar", + "foo.bar", + }, + { + "foo.bar-baz", + "foo.bar-baz", + }, + } + + for _, test := range tests { + name, _ := s.parseName(test.in_name) + if name != test.out_name { + t.Errorf("Expected: %s, got %s", test.out_name, name) + } + } +} + // Test that measurements with the same name, but different tags, are treated // as different outputs func TestParse_MeasurementsWithSameName(t *testing.T) { @@ -647,7 +705,7 @@ func TestParse_Counters(t *testing.T) { func TestParse_Timings(t *testing.T) { s := NewStatsd() s.Percentiles = []int{90} - testacc := &testutil.Accumulator{} + acc := &testutil.Accumulator{} // Test that counters work valid_lines := []string{ @@ -665,7 +723,7 @@ func TestParse_Timings(t *testing.T) { } } - s.Gather(testacc) + s.Gather(acc) tests := []struct { name string @@ -698,10 +756,8 @@ func TestParse_Timings(t *testing.T) { } for _, test := range tests { - if !testacc.CheckValue(test.name, test.value) { - t.Errorf("Did not find measurement %s with value %v", - test.name, test.value) - } + acc.AssertContainsFields(t, test.name, + map[string]interface{}{"value": test.value}) } } diff --git a/plugins/system/CPU_README.md b/plugins/inputs/system/CPU_README.md similarity index 100% rename from plugins/system/CPU_README.md rename to plugins/inputs/system/CPU_README.md diff --git a/plugins/system/MEM_README.md b/plugins/inputs/system/MEM_README.md similarity index 100% rename from plugins/system/MEM_README.md rename to plugins/inputs/system/MEM_README.md diff --git a/plugins/system/NETSTAT_README.md b/plugins/inputs/system/NETSTAT_README.md similarity index 100% rename from plugins/system/NETSTAT_README.md rename to plugins/inputs/system/NETSTAT_README.md diff --git a/plugins/system/cpu.go b/plugins/inputs/system/cpu.go similarity index 50% rename from plugins/system/cpu.go rename to plugins/inputs/system/cpu.go index 837a1bc23..95c854b2c 100644 --- a/plugins/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -2,8 +2,9 @@ package system import ( "fmt" + "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/shirou/gopsutil/cpu" ) @@ -31,18 +32,19 @@ var sampleConfig = ` # Whether to report total system cpu stats or not totalcpu = true # Comment this line if you want the raw CPU time metrics - drop = ["cpu_time*"] + drop = ["time_*"] ` func (_ *CPUStats) SampleConfig() string { return sampleConfig } -func (s *CPUStats) Gather(acc plugins.Accumulator) error { +func (s *CPUStats) Gather(acc inputs.Accumulator) error { times, err := s.ps.CPUTimes(s.PerCPU, s.TotalCPU) if err != nil { return fmt.Errorf("error getting CPU info: %s", err) } + now := time.Now() for i, cts := range times { tags := map[string]string{ @@ -51,21 +53,24 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error { total := totalCpuTime(cts) - // Add total cpu numbers - add(acc, "time_user", cts.User, tags) - add(acc, "time_system", cts.System, tags) - add(acc, "time_idle", cts.Idle, tags) - add(acc, "time_nice", cts.Nice, tags) - add(acc, "time_iowait", cts.Iowait, tags) - add(acc, "time_irq", cts.Irq, tags) - add(acc, "time_softirq", cts.Softirq, tags) - add(acc, "time_steal", cts.Steal, tags) - add(acc, "time_guest", cts.Guest, tags) - add(acc, "time_guest_nice", cts.GuestNice, tags) + // Add cpu time metrics + fields := map[string]interface{}{ + "time_user": cts.User, + "time_system": cts.System, + "time_idle": cts.Idle, + "time_nice": cts.Nice, + "time_iowait": cts.Iowait, + "time_irq": cts.Irq, + "time_softirq": cts.Softirq, + "time_steal": cts.Steal, + "time_guest": cts.Guest, + "time_guest_nice": cts.GuestNice, + } // Add in percentage if len(s.lastStats) == 0 { - // If it's the 1st gather, can't get CPU stats yet + acc.AddFields("cpu", fields, tags, now) + // If it's the 1st gather, can't get CPU Usage stats yet continue } lastCts := s.lastStats[i] @@ -81,17 +86,17 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error { continue } - add(acc, "usage_user", 100*(cts.User-lastCts.User)/totalDelta, tags) - add(acc, "usage_system", 100*(cts.System-lastCts.System)/totalDelta, tags) - add(acc, "usage_idle", 100*(cts.Idle-lastCts.Idle)/totalDelta, tags) - add(acc, "usage_nice", 100*(cts.Nice-lastCts.Nice)/totalDelta, tags) - add(acc, "usage_iowait", 100*(cts.Iowait-lastCts.Iowait)/totalDelta, tags) - add(acc, "usage_irq", 100*(cts.Irq-lastCts.Irq)/totalDelta, tags) - add(acc, "usage_softirq", 100*(cts.Softirq-lastCts.Softirq)/totalDelta, tags) - add(acc, "usage_steal", 100*(cts.Steal-lastCts.Steal)/totalDelta, tags) - add(acc, "usage_guest", 100*(cts.Guest-lastCts.Guest)/totalDelta, tags) - add(acc, "usage_guest_nice", 100*(cts.GuestNice-lastCts.GuestNice)/totalDelta, tags) - + fields["usage_user"] = 100 * (cts.User - lastCts.User) / totalDelta + fields["usage_system"] = 100 * (cts.System - lastCts.System) / totalDelta + fields["usage_idle"] = 100 * (cts.Idle - lastCts.Idle) / totalDelta + fields["usage_nice"] = 100 * (cts.Nice - lastCts.Nice) / totalDelta + fields["usage_iowait"] = 100 * (cts.Iowait - lastCts.Iowait) / totalDelta + fields["usage_irq"] = 100 * (cts.Irq - lastCts.Irq) / totalDelta + fields["usage_softirq"] = 100 * (cts.Softirq - lastCts.Softirq) / totalDelta + fields["usage_steal"] = 100 * (cts.Steal - lastCts.Steal) / totalDelta + fields["usage_guest"] = 100 * (cts.Guest - lastCts.Guest) / totalDelta + fields["usage_guest_nice"] = 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta + acc.AddFields("cpu", fields, tags, now) } s.lastStats = times @@ -106,7 +111,7 @@ func totalCpuTime(t cpu.CPUTimesStat) float64 { } func init() { - plugins.Add("cpu", func() plugins.Plugin { + inputs.Add("cpu", func() inputs.Input { return &CPUStats{ps: &systemPS{}} }) } diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/system/cpu_test.go new file mode 100644 index 000000000..77d90e2a5 --- /dev/null +++ b/plugins/inputs/system/cpu_test.go @@ -0,0 +1,148 @@ +package system + +import ( + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/shirou/gopsutil/cpu" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCPUStats(t *testing.T) { + var mps MockPS + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + cts := cpu.CPUTimesStat{ + CPU: "cpu0", + User: 3.1, + System: 8.2, + Idle: 80.1, + Nice: 1.3, + Iowait: 0.2, + Irq: 0.1, + Softirq: 0.11, + Steal: 0.0511, + Guest: 8.1, + GuestNice: 0.324, + } + + cts2 := cpu.CPUTimesStat{ + CPU: "cpu0", + User: 11.4, // increased by 8.3 + System: 10.9, // increased by 2.7 + Idle: 158.8699, // increased by 78.7699 (for total increase of 100) + Nice: 2.5, // increased by 1.2 + Iowait: 0.7, // increased by 0.5 + Irq: 1.2, // increased by 1.1 + Softirq: 0.31, // increased by 0.2 + Steal: 0.2812, // increased by 0.0001 + Guest: 12.9, // increased by 4.8 + GuestNice: 2.524, // increased by 2.2 + } + + mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil) + + cs := NewCPUStats(&mps) + + cputags := map[string]string{ + "cpu": "cpu0", + } + + err := cs.Gather(&acc) + require.NoError(t, err) + + // Computed values are checked with delta > 0 becasue of floating point arithmatic + // imprecision + assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 3.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 8.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags) + + mps2 := MockPS{} + mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil) + cs.ps = &mps2 + + // Should have added cpu percentages too + err = cs.Gather(&acc) + require.NoError(t, err) + + assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 11.4, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 158.8699, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 2.5, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.7, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 12.9, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags) + + assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 8.3, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 78.7699, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 1.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.5, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 1.1, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 4.8, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) +} + +// Asserts that a given accumulator contains a measurment of type float64 with +// specific tags within a certain distance of a given expected value. Asserts a failure +// if the measurement is of the wrong type, or if no matching measurements are found +// +// Paramaters: +// t *testing.T : Testing object to use +// acc testutil.Accumulator: Accumulator to examine +// measurement string : Name of the measurement to examine +// expectedValue float64 : Value to search for within the measurement +// delta float64 : Maximum acceptable distance of an accumulated value +// from the expectedValue parameter. Useful when +// floating-point arithmatic imprecision makes looking +// for an exact match impractical +// tags map[string]string : Tag set the found measurement must have. Set to nil to +// ignore the tag set. +func assertContainsTaggedFloat( + t *testing.T, + acc *testutil.Accumulator, + measurement string, + field string, + expectedValue float64, + delta float64, + tags map[string]string, +) { + var actualValue float64 + for _, pt := range acc.Points { + if pt.Measurement == measurement { + for fieldname, value := range pt.Fields { + if fieldname == field { + if value, ok := value.(float64); ok { + actualValue = value + if (value >= expectedValue-delta) && (value <= expectedValue+delta) { + // Found the point, return without failing + return + } + } else { + assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", + measurement)) + } + } + } + } + } + msg := fmt.Sprintf( + "Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", + measurement, delta, expectedValue, actualValue) + assert.Fail(t, msg) +} diff --git a/plugins/system/disk.go b/plugins/inputs/system/disk.go similarity index 64% rename from plugins/system/disk.go rename to plugins/inputs/system/disk.go index 2e202f8d2..c6b23492b 100644 --- a/plugins/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -3,13 +3,16 @@ package system import ( "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type DiskStats struct { ps PS + // Legacy support Mountpoints []string + + MountPoints []string } func (_ *DiskStats) Description() string { @@ -19,43 +22,38 @@ func (_ *DiskStats) Description() string { var diskSampleConfig = ` # By default, telegraf gather stats for all mountpoints. # Setting mountpoints will restrict the stats to the specified mountpoints. - # Mountpoints=["/"] + # mount_points = ["/"] ` func (_ *DiskStats) SampleConfig() string { return diskSampleConfig } -func (s *DiskStats) Gather(acc plugins.Accumulator) error { - disks, err := s.ps.DiskUsage() +func (s *DiskStats) Gather(acc inputs.Accumulator) error { + // Legacy support: + if len(s.Mountpoints) != 0 { + s.MountPoints = s.Mountpoints + } + + disks, err := s.ps.DiskUsage(s.MountPoints) if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) } - var restrictMpoints bool - mPoints := make(map[string]bool) - if len(s.Mountpoints) != 0 { - restrictMpoints = true - for _, mp := range s.Mountpoints { - mPoints[mp] = true - } - } - for _, du := range disks { - _, member := mPoints[du.Path] - if restrictMpoints && !member { - continue - } tags := map[string]string{ "path": du.Path, "fstype": du.Fstype, } - acc.Add("total", du.Total, tags) - acc.Add("free", du.Free, tags) - acc.Add("used", du.Total-du.Free, tags) - acc.Add("inodes_total", du.InodesTotal, tags) - acc.Add("inodes_free", du.InodesFree, tags) - acc.Add("inodes_used", du.InodesTotal-du.InodesFree, tags) + fields := map[string]interface{}{ + "total": du.Total, + "free": du.Free, + "used": du.Total - du.Free, + "inodes_total": du.InodesTotal, + "inodes_free": du.InodesFree, + "inodes_used": du.InodesTotal - du.InodesFree, + } + acc.AddFields("disk", fields, tags) } return nil @@ -85,7 +83,7 @@ func (_ *DiskIOStats) SampleConfig() string { return diskIoSampleConfig } -func (s *DiskIOStats) Gather(acc plugins.Accumulator) error { +func (s *DiskIOStats) Gather(acc inputs.Accumulator) error { diskio, err := s.ps.DiskIO() if err != nil { return fmt.Errorf("error getting disk io info: %s", err) @@ -115,24 +113,27 @@ func (s *DiskIOStats) Gather(acc plugins.Accumulator) error { } } - acc.Add("reads", io.ReadCount, tags) - acc.Add("writes", io.WriteCount, tags) - acc.Add("read_bytes", io.ReadBytes, tags) - acc.Add("write_bytes", io.WriteBytes, tags) - acc.Add("read_time", io.ReadTime, tags) - acc.Add("write_time", io.WriteTime, tags) - acc.Add("io_time", io.IoTime, tags) + fields := map[string]interface{}{ + "reads": io.ReadCount, + "writes": io.WriteCount, + "read_bytes": io.ReadBytes, + "write_bytes": io.WriteBytes, + "read_time": io.ReadTime, + "write_time": io.WriteTime, + "io_time": io.IoTime, + } + acc.AddFields("diskio", fields, tags) } return nil } func init() { - plugins.Add("disk", func() plugins.Plugin { + inputs.Add("disk", func() inputs.Input { return &DiskStats{ps: &systemPS{}} }) - plugins.Add("io", func() plugins.Plugin { + inputs.Add("diskio", func() inputs.Input { return &DiskIOStats{ps: &systemPS{}} }) } diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/system/disk_test.go new file mode 100644 index 000000000..ec4182cb3 --- /dev/null +++ b/plugins/inputs/system/disk_test.go @@ -0,0 +1,177 @@ +package system + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/shirou/gopsutil/disk" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDiskStats(t *testing.T) { + var mps MockPS + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + var err error + + duAll := []*disk.DiskUsageStat{ + { + Path: "/", + Fstype: "ext4", + Total: 128, + Free: 23, + InodesTotal: 1234, + InodesFree: 234, + }, + { + Path: "/home", + Fstype: "ext4", + Total: 256, + Free: 46, + InodesTotal: 2468, + InodesFree: 468, + }, + } + duFiltered := []*disk.DiskUsageStat{ + { + Path: "/", + Fstype: "ext4", + Total: 128, + Free: 23, + InodesTotal: 1234, + InodesFree: 234, + }, + } + + mps.On("DiskUsage", []string(nil)).Return(duAll, nil) + mps.On("DiskUsage", []string{"/", "/dev"}).Return(duFiltered, nil) + mps.On("DiskUsage", []string{"/", "/home"}).Return(duAll, nil) + + err = (&DiskStats{ps: &mps}).Gather(&acc) + require.NoError(t, err) + + numDiskPoints := acc.NFields() + expectedAllDiskPoints := 12 + assert.Equal(t, expectedAllDiskPoints, numDiskPoints) + + tags1 := map[string]string{ + "path": "/", + "fstype": "ext4", + } + tags2 := map[string]string{ + "path": "/home", + "fstype": "ext4", + } + + fields1 := map[string]interface{}{ + "total": uint64(128), + "used": uint64(105), + "free": uint64(23), + "inodes_total": uint64(1234), + "inodes_free": uint64(234), + "inodes_used": uint64(1000), + } + fields2 := map[string]interface{}{ + "total": uint64(256), + "used": uint64(210), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + } + acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) + acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) + + // We expect 6 more DiskPoints to show up with an explicit match on "/" + // and /home not matching the /dev in MountPoints + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) + assert.Equal(t, expectedAllDiskPoints+6, acc.NFields()) + + // We should see all the diskpoints as MountPoints includes both + // / and /home + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) + assert.Equal(t, 2*expectedAllDiskPoints+6, acc.NFields()) +} + +// func TestDiskIOStats(t *testing.T) { +// var mps MockPS +// defer mps.AssertExpectations(t) +// var acc testutil.Accumulator +// var err error + +// diskio1 := disk.DiskIOCountersStat{ +// ReadCount: 888, +// WriteCount: 5341, +// ReadBytes: 100000, +// WriteBytes: 200000, +// ReadTime: 7123, +// WriteTime: 9087, +// Name: "sda1", +// IoTime: 123552, +// SerialNumber: "ab-123-ad", +// } +// diskio2 := disk.DiskIOCountersStat{ +// ReadCount: 444, +// WriteCount: 2341, +// ReadBytes: 200000, +// WriteBytes: 400000, +// ReadTime: 3123, +// WriteTime: 6087, +// Name: "sdb1", +// IoTime: 246552, +// SerialNumber: "bb-123-ad", +// } + +// mps.On("DiskIO").Return( +// map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, +// nil) + +// err = (&DiskIOStats{ps: &mps}).Gather(&acc) +// require.NoError(t, err) + +// numDiskIOPoints := acc.NFields() +// expectedAllDiskIOPoints := 14 +// assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints) + +// dtags1 := map[string]string{ +// "name": "sda1", +// "serial": "ab-123-ad", +// } +// dtags2 := map[string]string{ +// "name": "sdb1", +// "serial": "bb-123-ad", +// } + +// assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1)) +// assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1)) +// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1)) +// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1)) +// assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1)) +// assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1)) +// assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1)) +// assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2)) +// assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2)) +// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2)) +// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2)) +// assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2)) +// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2)) +// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2)) + +// // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1" +// // and serial should be missing from the tags with SkipSerialNumber set +// err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc) +// assert.Equal(t, expectedAllDiskIOPoints+7, acc.NFields()) + +// dtags3 := map[string]string{ +// "name": "sdb1", +// } + +// assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3)) +// assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3)) +// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3)) +// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3)) +// assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3)) +// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3)) +// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3)) +// } diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/system/memory.go new file mode 100644 index 000000000..32a2f2b09 --- /dev/null +++ b/plugins/inputs/system/memory.go @@ -0,0 +1,77 @@ +package system + +import ( + "fmt" + + "github.com/influxdata/telegraf/plugins/inputs" +) + +type MemStats struct { + ps PS +} + +func (_ *MemStats) Description() string { + return "Read metrics about memory usage" +} + +func (_ *MemStats) SampleConfig() string { return "" } + +func (s *MemStats) Gather(acc inputs.Accumulator) error { + vm, err := s.ps.VMStat() + if err != nil { + return fmt.Errorf("error getting virtual memory info: %s", err) + } + + fields := map[string]interface{}{ + "total": vm.Total, + "available": vm.Available, + "used": vm.Used, + "free": vm.Free, + "cached": vm.Cached, + "buffered": vm.Buffers, + "used_percent": 100 * float64(vm.Used) / float64(vm.Total), + "available_percent": 100 * float64(vm.Available) / float64(vm.Total), + } + acc.AddFields("mem", fields, nil) + + return nil +} + +type SwapStats struct { + ps PS +} + +func (_ *SwapStats) Description() string { + return "Read metrics about swap memory usage" +} + +func (_ *SwapStats) SampleConfig() string { return "" } + +func (s *SwapStats) Gather(acc inputs.Accumulator) error { + swap, err := s.ps.SwapStat() + if err != nil { + return fmt.Errorf("error getting swap memory info: %s", err) + } + + fields := map[string]interface{}{ + "total": swap.Total, + "used": swap.Used, + "free": swap.Free, + "used_percent": swap.UsedPercent, + "in": swap.Sin, + "out": swap.Sout, + } + acc.AddFields("swap", fields, nil) + + return nil +} + +func init() { + inputs.Add("mem", func() inputs.Input { + return &MemStats{ps: &systemPS{}} + }) + + inputs.Add("swap", func() inputs.Input { + return &SwapStats{ps: &systemPS{}} + }) +} diff --git a/plugins/inputs/system/memory_test.go b/plugins/inputs/system/memory_test.go new file mode 100644 index 000000000..0a85bc869 --- /dev/null +++ b/plugins/inputs/system/memory_test.go @@ -0,0 +1,72 @@ +package system + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/shirou/gopsutil/mem" + "github.com/stretchr/testify/require" +) + +func TestMemStats(t *testing.T) { + var mps MockPS + var err error + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + vms := &mem.VirtualMemoryStat{ + Total: 12400, + Available: 7600, + Used: 5000, + Free: 1235, + // Active: 8134, + // Inactive: 1124, + // Buffers: 771, + // Cached: 4312, + // Wired: 134, + // Shared: 2142, + } + + mps.On("VMStat").Return(vms, nil) + + sms := &mem.SwapMemoryStat{ + Total: 8123, + Used: 1232, + Free: 6412, + UsedPercent: 12.2, + Sin: 7, + Sout: 830, + } + + mps.On("SwapStat").Return(sms, nil) + + err = (&MemStats{&mps}).Gather(&acc) + require.NoError(t, err) + + memfields := map[string]interface{}{ + "total": uint64(12400), + "available": uint64(7600), + "used": uint64(5000), + "available_percent": float64(7600) / float64(12400) * 100, + "used_percent": float64(5000) / float64(12400) * 100, + "free": uint64(1235), + "cached": uint64(0), + "buffered": uint64(0), + } + acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string)) + + acc.Points = nil + + err = (&SwapStats{&mps}).Gather(&acc) + require.NoError(t, err) + + swapfields := map[string]interface{}{ + "total": uint64(8123), + "used": uint64(1232), + "used_percent": float64(12.2), + "free": uint64(6412), + "in": uint64(7), + "out": uint64(830), + } + acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string)) +} diff --git a/plugins/system/mock_PS.go b/plugins/inputs/system/mock_PS.go similarity index 87% rename from plugins/system/mock_PS.go rename to plugins/inputs/system/mock_PS.go index 6e8bfe224..6e9a5f93e 100644 --- a/plugins/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -33,8 +33,8 @@ func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { return r0, r1 } -func (m *MockPS) DiskUsage() ([]*disk.DiskUsageStat, error) { - ret := m.Called() +func (m *MockPS) DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) { + ret := m.Called(mountPointFilter) r0 := ret.Get(0).([]*disk.DiskUsageStat) r1 := ret.Error(1) @@ -87,15 +87,6 @@ func (m *MockPS) SwapStat() (*mem.SwapMemoryStat, error) { return r0, r1 } -func (m *MockPS) DockerStat() ([]*DockerContainerStat, error) { - ret := m.Called() - - r0 := ret.Get(0).([]*DockerContainerStat) - r1 := ret.Error(1) - - return r0, r1 -} - func (m *MockPS) NetConnections() ([]net.NetConnectionStat, error) { ret := m.Called() diff --git a/plugins/system/net.go b/plugins/inputs/system/net.go similarity index 71% rename from plugins/system/net.go rename to plugins/inputs/system/net.go index 9dbcc4577..7f71f5200 100644 --- a/plugins/system/net.go +++ b/plugins/inputs/system/net.go @@ -5,7 +5,7 @@ import ( "net" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type NetIOStats struct { @@ -31,7 +31,7 @@ func (_ *NetIOStats) SampleConfig() string { return netSampleConfig } -func (s *NetIOStats) Gather(acc plugins.Accumulator) error { +func (s *NetIOStats) Gather(acc inputs.Accumulator) error { netio, err := s.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %s", err) @@ -70,32 +70,40 @@ func (s *NetIOStats) Gather(acc plugins.Accumulator) error { "interface": io.Name, } - acc.Add("bytes_sent", io.BytesSent, tags) - acc.Add("bytes_recv", io.BytesRecv, tags) - acc.Add("packets_sent", io.PacketsSent, tags) - acc.Add("packets_recv", io.PacketsRecv, tags) - acc.Add("err_in", io.Errin, tags) - acc.Add("err_out", io.Errout, tags) - acc.Add("drop_in", io.Dropin, tags) - acc.Add("drop_out", io.Dropout, tags) + fields := map[string]interface{}{ + "bytes_sent": io.BytesSent, + "bytes_recv": io.BytesRecv, + "packets_sent": io.PacketsSent, + "packets_recv": io.PacketsRecv, + "err_in": io.Errin, + "err_out": io.Errout, + "drop_in": io.Dropin, + "drop_out": io.Dropout, + } + acc.AddFields("net", fields, tags) } // Get system wide stats for different network protocols // (ignore these stats if the call fails) netprotos, _ := s.ps.NetProto() + fields := make(map[string]interface{}) for _, proto := range netprotos { for stat, value := range proto.Stats { name := fmt.Sprintf("%s_%s", strings.ToLower(proto.Protocol), strings.ToLower(stat)) - acc.Add(name, value, nil) + fields[name] = value } } + tags := map[string]string{ + "interface": "all", + } + acc.AddFields("net", fields, tags) return nil } func init() { - plugins.Add("net", func() plugins.Plugin { + inputs.Add("net", func() inputs.Input { return &NetIOStats{ps: &systemPS{}} }) } diff --git a/plugins/inputs/system/net_test.go b/plugins/inputs/system/net_test.go new file mode 100644 index 000000000..3297acf07 --- /dev/null +++ b/plugins/inputs/system/net_test.go @@ -0,0 +1,109 @@ +package system + +import ( + "syscall" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/shirou/gopsutil/net" + "github.com/stretchr/testify/require" +) + +func TestNetStats(t *testing.T) { + var mps MockPS + var err error + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + netio := net.NetIOCountersStat{ + Name: "eth0", + BytesSent: 1123, + BytesRecv: 8734422, + PacketsSent: 781, + PacketsRecv: 23456, + Errin: 832, + Errout: 8, + Dropin: 7, + Dropout: 1, + } + + mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil) + + netprotos := []net.NetProtoCountersStat{ + net.NetProtoCountersStat{ + Protocol: "Udp", + Stats: map[string]int64{ + "InDatagrams": 4655, + "NoPorts": 892592, + }, + }, + } + mps.On("NetProto").Return(netprotos, nil) + + netstats := []net.NetConnectionStat{ + net.NetConnectionStat{ + Type: syscall.SOCK_DGRAM, + }, + net.NetConnectionStat{ + Status: "ESTABLISHED", + }, + net.NetConnectionStat{ + Status: "ESTABLISHED", + }, + net.NetConnectionStat{ + Status: "CLOSE", + }, + } + + mps.On("NetConnections").Return(netstats, nil) + + err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) + require.NoError(t, err) + + ntags := map[string]string{ + "interface": "eth0", + } + + fields1 := map[string]interface{}{ + "bytes_sent": uint64(1123), + "bytes_recv": uint64(8734422), + "packets_sent": uint64(781), + "packets_recv": uint64(23456), + "err_in": uint64(832), + "err_out": uint64(8), + "drop_in": uint64(7), + "drop_out": uint64(1), + } + acc.AssertContainsTaggedFields(t, "net", fields1, ntags) + + fields2 := map[string]interface{}{ + "udp_noports": int64(892592), + "udp_indatagrams": int64(4655), + } + ntags = map[string]string{ + "interface": "all", + } + acc.AssertContainsTaggedFields(t, "net", fields2, ntags) + + acc.Points = nil + + err = (&NetStats{&mps}).Gather(&acc) + require.NoError(t, err) + + fields3 := map[string]interface{}{ + "tcp_established": 2, + "tcp_syn_sent": 0, + "tcp_syn_recv": 0, + "tcp_fin_wait1": 0, + "tcp_fin_wait2": 0, + "tcp_time_wait": 0, + "tcp_close": 1, + "tcp_close_wait": 0, + "tcp_last_ack": 0, + "tcp_listen": 0, + "tcp_closing": 0, + "tcp_none": 0, + "udp_socket": 1, + } + acc.AssertContainsTaggedFields(t, "netstat", fields3, make(map[string]string)) +} diff --git a/plugins/system/netstat.go b/plugins/inputs/system/netstat.go similarity index 52% rename from plugins/system/netstat.go rename to plugins/inputs/system/netstat.go index 9fe512ddd..0fe704ee0 100644 --- a/plugins/system/netstat.go +++ b/plugins/inputs/system/netstat.go @@ -4,7 +4,7 @@ import ( "fmt" "syscall" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type NetStats struct { @@ -21,7 +21,7 @@ func (_ *NetStats) SampleConfig() string { return tcpstatSampleConfig } -func (s *NetStats) Gather(acc plugins.Accumulator) error { +func (s *NetStats) Gather(acc inputs.Accumulator) error { netconns, err := s.ps.NetConnections() if err != nil { return fmt.Errorf("error getting net connections info: %s", err) @@ -42,25 +42,29 @@ func (s *NetStats) Gather(acc plugins.Accumulator) error { } counts[netcon.Status] = c + 1 } - acc.Add("tcp_established", counts["ESTABLISHED"], tags) - acc.Add("tcp_syn_sent", counts["SYN_SENT"], tags) - acc.Add("tcp_syn_recv", counts["SYN_RECV"], tags) - acc.Add("tcp_fin_wait1", counts["FIN_WAIT1"], tags) - acc.Add("tcp_fin_wait2", counts["FIN_WAIT2"], tags) - acc.Add("tcp_time_wait", counts["TIME_WAIT"], tags) - acc.Add("tcp_close", counts["CLOSE"], tags) - acc.Add("tcp_close_wait", counts["CLOSE_WAIT"], tags) - acc.Add("tcp_last_ack", counts["LAST_ACK"], tags) - acc.Add("tcp_listen", counts["LISTEN"], tags) - acc.Add("tcp_closing", counts["CLOSING"], tags) - acc.Add("tcp_none", counts["NONE"], tags) - acc.Add("udp_socket", counts["UDP"], tags) + + fields := map[string]interface{}{ + "tcp_established": counts["ESTABLISHED"], + "tcp_syn_sent": counts["SYN_SENT"], + "tcp_syn_recv": counts["SYN_RECV"], + "tcp_fin_wait1": counts["FIN_WAIT1"], + "tcp_fin_wait2": counts["FIN_WAIT2"], + "tcp_time_wait": counts["TIME_WAIT"], + "tcp_close": counts["CLOSE"], + "tcp_close_wait": counts["CLOSE_WAIT"], + "tcp_last_ack": counts["LAST_ACK"], + "tcp_listen": counts["LISTEN"], + "tcp_closing": counts["CLOSING"], + "tcp_none": counts["NONE"], + "udp_socket": counts["UDP"], + } + acc.AddFields("netstat", fields, tags) return nil } func init() { - plugins.Add("netstat", func() plugins.Plugin { + inputs.Add("netstat", func() inputs.Input { return &NetStats{ps: &systemPS{}} }) } diff --git a/plugins/system/ps.go b/plugins/inputs/system/ps.go similarity index 58% rename from plugins/system/ps.go rename to plugins/inputs/system/ps.go index d0c35c62c..98c9b8b31 100644 --- a/plugins/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -1,52 +1,36 @@ package system import ( - gonet "net" "os" - "strings" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" - dc "github.com/fsouza/go-dockerclient" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" - "github.com/shirou/gopsutil/docker" "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/net" ) -type DockerContainerStat struct { - Id string - Name string - Command string - Labels map[string]string - CPU *cpu.CPUTimesStat - Mem *docker.CgroupMemStat -} - type PS interface { CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) - DiskUsage() ([]*disk.DiskUsageStat, error) + DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) NetIO() ([]net.NetIOCountersStat, error) NetProto() ([]net.NetProtoCountersStat, error) DiskIO() (map[string]disk.DiskIOCountersStat, error) VMStat() (*mem.VirtualMemoryStat, error) SwapStat() (*mem.SwapMemoryStat, error) - DockerStat() ([]*DockerContainerStat, error) NetConnections() ([]net.NetConnectionStat, error) } -func add(acc plugins.Accumulator, +func add(acc inputs.Accumulator, name string, val float64, tags map[string]string) { if val >= 0 { acc.Add(name, val, tags) } } -type systemPS struct { - dockerClient *dc.Client -} +type systemPS struct{} func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { var cpuTimes []cpu.CPUTimesStat @@ -67,15 +51,31 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { return cpuTimes, nil } -func (s *systemPS) DiskUsage() ([]*disk.DiskUsageStat, error) { +func (s *systemPS) DiskUsage( + mountPointFilter []string, +) ([]*disk.DiskUsageStat, error) { parts, err := disk.DiskPartitions(true) if err != nil { return nil, err } + // Make a "set" out of the filter slice + filterSet := make(map[string]bool) + for _, filter := range mountPointFilter { + filterSet[filter] = true + } + var usage []*disk.DiskUsageStat for _, p := range parts { + if len(mountPointFilter) > 0 { + // If the mount point is not a member of the filter set, + // don't gather info on it. + _, ok := filterSet[p.Mountpoint] + if !ok { + continue + } + } if _, err := os.Stat(p.Mountpoint); err == nil { du, err := disk.DiskUsage(p.Mountpoint) if err != nil { @@ -117,52 +117,3 @@ func (s *systemPS) VMStat() (*mem.VirtualMemoryStat, error) { func (s *systemPS) SwapStat() (*mem.SwapMemoryStat, error) { return mem.SwapMemory() } - -func (s *systemPS) DockerStat() ([]*DockerContainerStat, error) { - if s.dockerClient == nil { - c, err := dc.NewClient("unix:///var/run/docker.sock") - if err != nil { - return nil, err - } - - s.dockerClient = c - } - - opts := dc.ListContainersOptions{} - - containers, err := s.dockerClient.ListContainers(opts) - if err != nil { - if _, ok := err.(*gonet.OpError); ok { - return nil, nil - } - - return nil, err - } - - var stats []*DockerContainerStat - - for _, container := range containers { - ctu, err := docker.CgroupCPUDocker(container.ID) - if err != nil { - return nil, err - } - - mem, err := docker.CgroupMemDocker(container.ID) - if err != nil { - return nil, err - } - - name := strings.Join(container.Names, " ") - - stats = append(stats, &DockerContainerStat{ - Id: container.ID, - Name: name, - Command: container.Command, - Labels: container.Labels, - CPU: ctu, - Mem: mem, - }) - } - - return stats, nil -} diff --git a/plugins/system/system.go b/plugins/inputs/system/system.go similarity index 62% rename from plugins/system/system.go rename to plugins/inputs/system/system.go index 4481ac0a3..4a0a76d48 100644 --- a/plugins/system/system.go +++ b/plugins/inputs/system/system.go @@ -8,7 +8,7 @@ import ( "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/load" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type SystemStats struct{} @@ -19,14 +19,7 @@ func (_ *SystemStats) Description() string { func (_ *SystemStats) SampleConfig() string { return "" } -func (_ *SystemStats) add(acc plugins.Accumulator, - name string, val float64, tags map[string]string) { - if val >= 0 { - acc.Add(name, val, tags) - } -} - -func (_ *SystemStats) Gather(acc plugins.Accumulator) error { +func (_ *SystemStats) Gather(acc inputs.Accumulator) error { loadavg, err := load.LoadAvg() if err != nil { return err @@ -37,11 +30,14 @@ func (_ *SystemStats) Gather(acc plugins.Accumulator) error { return err } - acc.Add("load1", loadavg.Load1, nil) - acc.Add("load5", loadavg.Load5, nil) - acc.Add("load15", loadavg.Load15, nil) - acc.Add("uptime", float64(hostinfo.Uptime), nil) - acc.Add("uptime_format", format_uptime(hostinfo.Uptime), nil) + fields := map[string]interface{}{ + "load1": loadavg.Load1, + "load5": loadavg.Load5, + "load15": loadavg.Load15, + "uptime": hostinfo.Uptime, + "uptime_format": format_uptime(hostinfo.Uptime), + } + acc.AddFields("system", fields, nil) return nil } @@ -72,7 +68,7 @@ func format_uptime(uptime uint64) string { } func init() { - plugins.Add("system", func() plugins.Plugin { + inputs.Add("system", func() inputs.Input { return &SystemStats{} }) } diff --git a/plugins/trig/trig.go b/plugins/inputs/trig/trig.go similarity index 79% rename from plugins/trig/trig.go rename to plugins/inputs/trig/trig.go index e966cbd46..604f9734a 100644 --- a/plugins/trig/trig.go +++ b/plugins/inputs/trig/trig.go @@ -3,7 +3,7 @@ package trig import ( "math" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Trig struct { @@ -24,7 +24,7 @@ func (s *Trig) Description() string { return "Inserts sine and cosine waves for demonstration purposes" } -func (s *Trig) Gather(acc plugins.Accumulator) error { +func (s *Trig) Gather(acc inputs.Accumulator) error { sinner := math.Sin((s.x*math.Pi)/5.0) * s.Amplitude cosinner := math.Cos((s.x*math.Pi)/5.0) * s.Amplitude @@ -41,6 +41,5 @@ func (s *Trig) Gather(acc plugins.Accumulator) error { } func init() { - - plugins.Add("Trig", func() plugins.Plugin { return &Trig{x: 0.0} }) + inputs.Add("Trig", func() inputs.Input { return &Trig{x: 0.0} }) } diff --git a/plugins/trig/trig_test.go b/plugins/inputs/trig/trig_test.go similarity index 69% rename from plugins/trig/trig_test.go rename to plugins/inputs/trig/trig_test.go index 24218fe11..1471edbea 100644 --- a/plugins/trig/trig_test.go +++ b/plugins/inputs/trig/trig_test.go @@ -4,9 +4,7 @@ import ( "math" "testing" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - // "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) func TestTrig(t *testing.T) { @@ -27,7 +25,6 @@ func TestTrig(t *testing.T) { fields["sine"] = sine fields["cosine"] = cosine - assert.True(t, acc.CheckFieldsValue("trig", fields)) - + acc.AssertContainsFields(t, "trig", fields) } } diff --git a/plugins/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go similarity index 58% rename from plugins/twemproxy/twemproxy.go rename to plugins/inputs/twemproxy/twemproxy.go index 0b1f6139e..6dcce8058 100644 --- a/plugins/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -5,28 +5,21 @@ import ( "errors" "io/ioutil" "net" - "strings" - "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) type Twemproxy struct { - Instances []TwemproxyInstance -} - -type TwemproxyInstance struct { Addr string Pools []string } var sampleConfig = ` - [[plugins.twemproxy.instances]] - # Twemproxy stats address and port (no scheme) - addr = "localhost:22222" - # Monitor pool name - pools = ["redis_pool", "mc_pool"] + # Twemproxy stats address and port (no scheme) + addr = "localhost:22222" + # Monitor pool name + pools = ["redis_pool", "mc_pool"] ` func (t *Twemproxy) SampleConfig() string { @@ -38,36 +31,8 @@ func (t *Twemproxy) Description() string { } // Gather data from all Twemproxy instances -func (t *Twemproxy) Gather(acc plugins.Accumulator) error { - var wg sync.WaitGroup - errorChan := make(chan error, len(t.Instances)) - for _, inst := range t.Instances { - wg.Add(1) - go func(inst TwemproxyInstance) { - defer wg.Done() - if err := inst.Gather(acc); err != nil { - errorChan <- err - } - }(inst) - } - wg.Wait() - - close(errorChan) - errs := []string{} - for err := range errorChan { - errs = append(errs, err.Error()) - } - if len(errs) == 0 { - return nil - } - return errors.New(strings.Join(errs, "\n")) -} - -// Gather data from one Twemproxy -func (ti *TwemproxyInstance) Gather( - acc plugins.Accumulator, -) error { - conn, err := net.DialTimeout("tcp", ti.Addr, 1*time.Second) +func (t *Twemproxy) Gather(acc inputs.Accumulator) error { + conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second) if err != nil { return err } @@ -82,15 +47,15 @@ func (ti *TwemproxyInstance) Gather( } tags := make(map[string]string) - tags["twemproxy"] = ti.Addr - ti.processStat(acc, tags, stats) + tags["twemproxy"] = t.Addr + t.processStat(acc, tags, stats) return nil } // Process Twemproxy server stats -func (ti *TwemproxyInstance) processStat( - acc plugins.Accumulator, +func (t *Twemproxy) processStat( + acc inputs.Accumulator, tags map[string]string, data map[string]interface{}, ) { @@ -100,40 +65,42 @@ func (ti *TwemproxyInstance) processStat( } } + fields := make(map[string]interface{}) metrics := []string{"total_connections", "curr_connections", "timestamp"} for _, m := range metrics { if value, ok := data[m]; ok { if val, ok := value.(float64); ok { - acc.Add(m, val, tags) + fields[m] = val } } } + acc.AddFields("twemproxy", fields, tags) - for _, pool := range ti.Pools { + for _, pool := range t.Pools { if poolStat, ok := data[pool]; ok { if data, ok := poolStat.(map[string]interface{}); ok { poolTags := copyTags(tags) poolTags["pool"] = pool - ti.processPool(acc, poolTags, pool+"_", data) + t.processPool(acc, poolTags, data) } } } } // Process pool data in Twemproxy stats -func (ti *TwemproxyInstance) processPool( - acc plugins.Accumulator, +func (t *Twemproxy) processPool( + acc inputs.Accumulator, tags map[string]string, - prefix string, data map[string]interface{}, ) { serverTags := make(map[string]map[string]string) + fields := make(map[string]interface{}) for key, value := range data { switch key { case "client_connections", "forward_error", "client_err", "server_ejects", "fragments", "client_eof": if val, ok := value.(float64); ok { - acc.Add(prefix+key, val, tags) + fields[key] = val } default: if data, ok := value.(map[string]interface{}); ok { @@ -141,27 +108,29 @@ func (ti *TwemproxyInstance) processPool( serverTags[key] = copyTags(tags) serverTags[key]["server"] = key } - ti.processServer(acc, serverTags[key], prefix, data) + t.processServer(acc, serverTags[key], data) } } } + acc.AddFields("twemproxy_pool", fields, tags) } // Process backend server(redis/memcached) stats -func (ti *TwemproxyInstance) processServer( - acc plugins.Accumulator, +func (t *Twemproxy) processServer( + acc inputs.Accumulator, tags map[string]string, - prefix string, data map[string]interface{}, ) { + fields := make(map[string]interface{}) for key, value := range data { switch key { default: if val, ok := value.(float64); ok { - acc.Add(prefix+key, val, tags) + fields[key] = val } } } + acc.AddFields("twemproxy_pool_server", fields, tags) } // Tags is not expected to be mutated after passing to Add. @@ -174,7 +143,7 @@ func copyTags(tags map[string]string) map[string]string { } func init() { - plugins.Add("twemproxy", func() plugins.Plugin { + inputs.Add("twemproxy", func() inputs.Input { return &Twemproxy{} }) } diff --git a/plugins/inputs/twemproxy/twemproxy_test.go b/plugins/inputs/twemproxy/twemproxy_test.go new file mode 100644 index 000000000..dd79048e0 --- /dev/null +++ b/plugins/inputs/twemproxy/twemproxy_test.go @@ -0,0 +1,171 @@ +package twemproxy + +import ( + "encoding/json" + "net" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const sampleAddr = "127.0.0.1:22222" + +const sampleStats = `{ + "total_connections": 276448, + "uptime": 160657, + "version": "0.4.1", + "service": "nutcracker", + "curr_connections": 1322, + "source": "server1.website.com", + "demo": { + "client_connections": 1305, + "forward_error": 11684, + "client_err": 147942, + "server_ejects": 0, + "fragments": 0, + "client_eof": 126813, + "10.16.29.1:6379": { + "requests": 43604566, + "server_eof": 0, + "out_queue": 0, + "server_err": 0, + "out_queue_bytes": 0, + "in_queue": 0, + "server_timedout": 24, + "request_bytes": 2775840400, + "server_connections": 1, + "response_bytes": 7663182096, + "in_queue_bytes": 0, + "server_ejected_at": 0, + "responses": 43603900 + }, + "10.16.29.2:6379": { + "requests": 37870211, + "server_eof": 0, + "out_queue": 0, + "server_err": 0, + "out_queue_bytes": 0, + "in_queue": 0, + "server_timedout": 25, + "request_bytes": 2412114759, + "server_connections": 1, + "response_bytes": 5228980582, + "in_queue_bytes": 0, + "server_ejected_at": 0, + "responses": 37869551 + } + }, + "timestamp": 1447312436 +}` + +func mockTwemproxyServer() (net.Listener, error) { + listener, err := net.Listen("tcp", sampleAddr) + if err != nil { + return nil, err + } + go func(l net.Listener) { + for { + conn, _ := l.Accept() + conn.Write([]byte(sampleStats)) + conn.Close() + break + } + }(listener) + + return listener, nil +} + +func TestGather(t *testing.T) { + mockServer, err := mockTwemproxyServer() + if err != nil { + panic(err) + } + defer mockServer.Close() + + twemproxy := &Twemproxy{ + Addr: sampleAddr, + Pools: []string{"demo"}, + } + + var acc testutil.Accumulator + acc.SetDebug(true) + err = twemproxy.Gather(&acc) + require.NoError(t, err) + + var sourceData map[string]interface{} + if err := json.Unmarshal([]byte(sampleStats), &sourceData); err != nil { + panic(err) + } + + fields := map[string]interface{}{ + "total_connections": float64(276448), + "curr_connections": float64(1322), + "timestamp": float64(1.447312436e+09), + } + tags := map[string]string{ + "twemproxy": sampleAddr, + "source": sourceData["source"].(string), + } + acc.AssertContainsTaggedFields(t, "twemproxy", fields, tags) + + poolName := "demo" + poolFields := map[string]interface{}{ + "client_connections": float64(1305), + "client_eof": float64(126813), + "client_err": float64(147942), + "forward_error": float64(11684), + "fragments": float64(0), + "server_ejects": float64(0), + } + tags["pool"] = poolName + acc.AssertContainsTaggedFields(t, "twemproxy_pool", poolFields, tags) + + poolServerTags1 := map[string]string{ + "pool": "demo", + "server": "10.16.29.2:6379", + "source": "server1.website.com", + "twemproxy": "127.0.0.1:22222", + } + poolServerFields1 := map[string]interface{}{ + "in_queue": float64(0), + "in_queue_bytes": float64(0), + "out_queue": float64(0), + "out_queue_bytes": float64(0), + "request_bytes": float64(2.412114759e+09), + "requests": float64(3.7870211e+07), + "response_bytes": float64(5.228980582e+09), + "responses": float64(3.7869551e+07), + "server_connections": float64(1), + "server_ejected_at": float64(0), + "server_eof": float64(0), + "server_err": float64(0), + "server_timedout": float64(25), + } + acc.AssertContainsTaggedFields(t, "twemproxy_pool_server", + poolServerFields1, poolServerTags1) + + poolServerTags2 := map[string]string{ + "pool": "demo", + "server": "10.16.29.1:6379", + "source": "server1.website.com", + "twemproxy": "127.0.0.1:22222", + } + poolServerFields2 := map[string]interface{}{ + "in_queue": float64(0), + "in_queue_bytes": float64(0), + "out_queue": float64(0), + "out_queue_bytes": float64(0), + "request_bytes": float64(2.7758404e+09), + "requests": float64(4.3604566e+07), + "response_bytes": float64(7.663182096e+09), + "responses": float64(4.36039e+07), + "server_connections": float64(1), + "server_ejected_at": float64(0), + "server_eof": float64(0), + "server_err": float64(0), + "server_timedout": float64(24), + } + acc.AssertContainsTaggedFields(t, "twemproxy_pool_server", + poolServerFields2, poolServerTags2) +} diff --git a/plugins/zfs/README.md b/plugins/inputs/zfs/README.md similarity index 100% rename from plugins/zfs/README.md rename to plugins/inputs/zfs/README.md diff --git a/plugins/zfs/zfs.go b/plugins/inputs/zfs/zfs.go similarity index 85% rename from plugins/zfs/zfs.go rename to plugins/inputs/zfs/zfs.go index f655c4abf..13f2d9806 100644 --- a/plugins/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -6,8 +6,8 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) type Zfs struct { @@ -68,7 +68,7 @@ func getTags(pools []poolInfo) map[string]string { return map[string]string{"pools": poolNames} } -func gatherPoolStats(pool poolInfo, acc plugins.Accumulator) error { +func gatherPoolStats(pool poolInfo, acc inputs.Accumulator) error { lines, err := internal.ReadLines(pool.ioFilename) if err != nil { return err @@ -88,20 +88,20 @@ func gatherPoolStats(pool poolInfo, acc plugins.Accumulator) error { } tag := map[string]string{"pool": pool.name} - + fields := make(map[string]interface{}) for i := 0; i < keyCount; i++ { value, err := strconv.ParseInt(values[i], 10, 64) if err != nil { return err } - - acc.Add(keys[i], value, tag) + fields[keys[i]] = value } + acc.AddFields("zfs_pool", fields, tag) return nil } -func (z *Zfs) Gather(acc plugins.Accumulator) error { +func (z *Zfs) Gather(acc inputs.Accumulator) error { kstatMetrics := z.KstatMetrics if len(kstatMetrics) == 0 { kstatMetrics = []string{"arcstats", "zfetchstats", "vdev_cache_stats"} @@ -124,6 +124,7 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error { } } + fields := make(map[string]interface{}) for _, metric := range kstatMetrics { lines, err := internal.ReadLines(kstatPath + "/" + metric) if err != nil { @@ -140,14 +141,15 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error { key := metric + "_" + rawData[0] rawValue := rawData[len(rawData)-1] value, _ := strconv.ParseInt(rawValue, 10, 64) - acc.Add(key, value, tags) + fields[key] = value } } + acc.AddFields("zfs", fields, tags) return nil } func init() { - plugins.Add("zfs", func() plugins.Plugin { + inputs.Add("zfs", func() inputs.Input { return &Zfs{} }) } diff --git a/plugins/inputs/zfs/zfs_test.go b/plugins/inputs/zfs/zfs_test.go new file mode 100644 index 000000000..e40d91c02 --- /dev/null +++ b/plugins/inputs/zfs/zfs_test.go @@ -0,0 +1,366 @@ +package zfs + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const arcstatsContents = `5 1 0x01 86 4128 23617128247 12081618582809582 +name type data +hits 4 5968846374 +misses 4 1659178751 +demand_data_hits 4 4860247322 +demand_data_misses 4 501499535 +demand_metadata_hits 4 708608325 +demand_metadata_misses 4 156591375 +prefetch_data_hits 4 367047144 +prefetch_data_misses 4 974529898 +prefetch_metadata_hits 4 32943583 +prefetch_metadata_misses 4 26557943 +mru_hits 4 301176811 +mru_ghost_hits 4 47066067 +mfu_hits 4 5520612438 +mfu_ghost_hits 4 45784009 +deleted 4 1718937704 +recycle_miss 4 481222994 +mutex_miss 4 20575623 +evict_skip 4 14655903906543 +evict_l2_cached 4 145310202998272 +evict_l2_eligible 4 16345402777088 +evict_l2_ineligible 4 7437226893312 +hash_elements 4 36617980 +hash_elements_max 4 36618318 +hash_collisions 4 554145157 +hash_chains 4 4187651 +hash_chain_max 4 26 +p 4 13963222064 +c 4 16381258376 +c_min 4 4194304 +c_max 4 16884125696 +size 4 16319887096 +hdr_size 4 42567864 +data_size 4 60066304 +meta_size 4 1701534208 +other_size 4 1661543168 +anon_size 4 94720 +anon_evict_data 4 0 +anon_evict_metadata 4 0 +mru_size 4 973099008 +mru_evict_data 4 9175040 +mru_evict_metadata 4 32768 +mru_ghost_size 4 32768 +mru_ghost_evict_data 4 0 +mru_ghost_evict_metadata 4 32768 +mfu_size 4 788406784 +mfu_evict_data 4 50881024 +mfu_evict_metadata 4 81920 +mfu_ghost_size 4 0 +mfu_ghost_evict_data 4 0 +mfu_ghost_evict_metadata 4 0 +l2_hits 4 573868618 +l2_misses 4 1085309718 +l2_feeds 4 12182087 +l2_rw_clash 4 9610 +l2_read_bytes 4 32695938336768 +l2_write_bytes 4 2826774778880 +l2_writes_sent 4 4267687 +l2_writes_done 4 4267687 +l2_writes_error 4 0 +l2_writes_hdr_miss 4 164 +l2_evict_lock_retry 4 5 +l2_evict_reading 4 0 +l2_free_on_write 4 1606914 +l2_cdata_free_on_write 4 1775 +l2_abort_lowmem 4 83462 +l2_cksum_bad 4 393860640 +l2_io_error 4 53881460 +l2_size 4 2471466648576 +l2_asize 4 2461690072064 +l2_hdr_size 4 12854175552 +l2_compress_successes 4 12184849 +l2_compress_zeros 4 0 +l2_compress_failures 4 0 +memory_throttle_count 4 0 +duplicate_buffers 4 0 +duplicate_buffers_size 4 0 +duplicate_reads 4 0 +memory_direct_count 4 5159942 +memory_indirect_count 4 3034640 +arc_no_grow 4 0 +arc_tempreserve 4 0 +arc_loaned_bytes 4 0 +arc_prune 4 114554259559 +arc_meta_used 4 16259820792 +arc_meta_limit 4 12663094272 +arc_meta_max 4 18327165696 +` + +const zfetchstatsContents = `3 1 0x01 11 528 23607270446 12081656848148208 +name type data +hits 4 7812959060 +misses 4 4154484207 +colinear_hits 4 1366368 +colinear_misses 4 4153117839 +stride_hits 4 7309776732 +stride_misses 4 222766182 +reclaim_successes 4 107788388 +reclaim_failures 4 4045329451 +streams_resets 4 20989756 +streams_noresets 4 503182328 +bogus_streams 4 0 +` +const vdev_cache_statsContents = `7 1 0x01 3 144 23617323692 12081684236238879 +name type data +delegations 4 0 +hits 4 0 +misses 4 0 +` +const pool_ioContents = `11 3 0x00 1 80 2225326830828 32953476980628 +nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt +1884160 6450688 22 978 272187126 2850519036 2263669418655 424226814 2850519036 2263669871823 0 0 +` + +var testKstatPath = os.TempDir() + "/telegraf/proc/spl/kstat/zfs" + +func TestZfsPoolMetrics(t *testing.T) { + err := os.MkdirAll(testKstatPath, 0755) + require.NoError(t, err) + + err = os.MkdirAll(testKstatPath+"/HOME", 0755) + require.NoError(t, err) + + err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(pool_ioContents), 0644) + require.NoError(t, err) + + err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + require.NoError(t, err) + + poolMetrics := getPoolMetrics() + + var acc testutil.Accumulator + + z := &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} + err = z.Gather(&acc) + require.NoError(t, err) + + require.False(t, acc.HasMeasurement("zfs_pool")) + acc.Points = nil + + z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}, PoolMetrics: true} + err = z.Gather(&acc) + require.NoError(t, err) + + //one pool, all metrics + tags := map[string]string{ + "pool": "HOME", + } + + acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags) + + err = os.RemoveAll(os.TempDir() + "/telegraf") + require.NoError(t, err) +} + +func TestZfsGeneratesMetrics(t *testing.T) { + err := os.MkdirAll(testKstatPath, 0755) + require.NoError(t, err) + + err = os.MkdirAll(testKstatPath+"/HOME", 0755) + require.NoError(t, err) + + err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) + require.NoError(t, err) + + err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + require.NoError(t, err) + + err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) + require.NoError(t, err) + + err = ioutil.WriteFile(testKstatPath+"/vdev_cache_stats", []byte(vdev_cache_statsContents), 0644) + require.NoError(t, err) + + intMetrics := getKstatMetricsAll() + + var acc testutil.Accumulator + + //one pool, all metrics + tags := map[string]string{ + "pools": "HOME", + } + + z := &Zfs{KstatPath: testKstatPath} + err = z.Gather(&acc) + require.NoError(t, err) + + acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) + acc.Points = nil + + //two pools, all metrics + err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) + require.NoError(t, err) + + err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) + require.NoError(t, err) + + tags = map[string]string{ + "pools": "HOME::STORAGE", + } + + z = &Zfs{KstatPath: testKstatPath} + acc = testutil.Accumulator{} + err = z.Gather(&acc) + require.NoError(t, err) + + acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) + acc.Points = nil + + intMetrics = getKstatMetricsArcOnly() + + //two pools, one metric + z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} + acc = testutil.Accumulator{} + err = z.Gather(&acc) + require.NoError(t, err) + + acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) + + err = os.RemoveAll(os.TempDir() + "/telegraf") + require.NoError(t, err) +} + +func getKstatMetricsArcOnly() map[string]interface{} { + return map[string]interface{}{ + "arcstats_hits": int64(5968846374), + "arcstats_misses": int64(1659178751), + "arcstats_demand_data_hits": int64(4860247322), + "arcstats_demand_data_misses": int64(501499535), + "arcstats_demand_metadata_hits": int64(708608325), + "arcstats_demand_metadata_misses": int64(156591375), + "arcstats_prefetch_data_hits": int64(367047144), + "arcstats_prefetch_data_misses": int64(974529898), + "arcstats_prefetch_metadata_hits": int64(32943583), + "arcstats_prefetch_metadata_misses": int64(26557943), + "arcstats_mru_hits": int64(301176811), + "arcstats_mru_ghost_hits": int64(47066067), + "arcstats_mfu_hits": int64(5520612438), + "arcstats_mfu_ghost_hits": int64(45784009), + "arcstats_deleted": int64(1718937704), + "arcstats_recycle_miss": int64(481222994), + "arcstats_mutex_miss": int64(20575623), + "arcstats_evict_skip": int64(14655903906543), + "arcstats_evict_l2_cached": int64(145310202998272), + "arcstats_evict_l2_eligible": int64(16345402777088), + "arcstats_evict_l2_ineligible": int64(7437226893312), + "arcstats_hash_elements": int64(36617980), + "arcstats_hash_elements_max": int64(36618318), + "arcstats_hash_collisions": int64(554145157), + "arcstats_hash_chains": int64(4187651), + "arcstats_hash_chain_max": int64(26), + "arcstats_p": int64(13963222064), + "arcstats_c": int64(16381258376), + "arcstats_c_min": int64(4194304), + "arcstats_c_max": int64(16884125696), + "arcstats_size": int64(16319887096), + "arcstats_hdr_size": int64(42567864), + "arcstats_data_size": int64(60066304), + "arcstats_meta_size": int64(1701534208), + "arcstats_other_size": int64(1661543168), + "arcstats_anon_size": int64(94720), + "arcstats_anon_evict_data": int64(0), + "arcstats_anon_evict_metadata": int64(0), + "arcstats_mru_size": int64(973099008), + "arcstats_mru_evict_data": int64(9175040), + "arcstats_mru_evict_metadata": int64(32768), + "arcstats_mru_ghost_size": int64(32768), + "arcstats_mru_ghost_evict_data": int64(0), + "arcstats_mru_ghost_evict_metadata": int64(32768), + "arcstats_mfu_size": int64(788406784), + "arcstats_mfu_evict_data": int64(50881024), + "arcstats_mfu_evict_metadata": int64(81920), + "arcstats_mfu_ghost_size": int64(0), + "arcstats_mfu_ghost_evict_data": int64(0), + "arcstats_mfu_ghost_evict_metadata": int64(0), + "arcstats_l2_hits": int64(573868618), + "arcstats_l2_misses": int64(1085309718), + "arcstats_l2_feeds": int64(12182087), + "arcstats_l2_rw_clash": int64(9610), + "arcstats_l2_read_bytes": int64(32695938336768), + "arcstats_l2_write_bytes": int64(2826774778880), + "arcstats_l2_writes_sent": int64(4267687), + "arcstats_l2_writes_done": int64(4267687), + "arcstats_l2_writes_error": int64(0), + "arcstats_l2_writes_hdr_miss": int64(164), + "arcstats_l2_evict_lock_retry": int64(5), + "arcstats_l2_evict_reading": int64(0), + "arcstats_l2_free_on_write": int64(1606914), + "arcstats_l2_cdata_free_on_write": int64(1775), + "arcstats_l2_abort_lowmem": int64(83462), + "arcstats_l2_cksum_bad": int64(393860640), + "arcstats_l2_io_error": int64(53881460), + "arcstats_l2_size": int64(2471466648576), + "arcstats_l2_asize": int64(2461690072064), + "arcstats_l2_hdr_size": int64(12854175552), + "arcstats_l2_compress_successes": int64(12184849), + "arcstats_l2_compress_zeros": int64(0), + "arcstats_l2_compress_failures": int64(0), + "arcstats_memory_throttle_count": int64(0), + "arcstats_duplicate_buffers": int64(0), + "arcstats_duplicate_buffers_size": int64(0), + "arcstats_duplicate_reads": int64(0), + "arcstats_memory_direct_count": int64(5159942), + "arcstats_memory_indirect_count": int64(3034640), + "arcstats_arc_no_grow": int64(0), + "arcstats_arc_tempreserve": int64(0), + "arcstats_arc_loaned_bytes": int64(0), + "arcstats_arc_prune": int64(114554259559), + "arcstats_arc_meta_used": int64(16259820792), + "arcstats_arc_meta_limit": int64(12663094272), + "arcstats_arc_meta_max": int64(18327165696), + } +} + +func getKstatMetricsAll() map[string]interface{} { + otherMetrics := map[string]interface{}{ + "zfetchstats_hits": int64(7812959060), + "zfetchstats_misses": int64(4154484207), + "zfetchstats_colinear_hits": int64(1366368), + "zfetchstats_colinear_misses": int64(4153117839), + "zfetchstats_stride_hits": int64(7309776732), + "zfetchstats_stride_misses": int64(222766182), + "zfetchstats_reclaim_successes": int64(107788388), + "zfetchstats_reclaim_failures": int64(4045329451), + "zfetchstats_streams_resets": int64(20989756), + "zfetchstats_streams_noresets": int64(503182328), + "zfetchstats_bogus_streams": int64(0), + "vdev_cache_stats_delegations": int64(0), + "vdev_cache_stats_hits": int64(0), + "vdev_cache_stats_misses": int64(0), + } + arcMetrics := getKstatMetricsArcOnly() + for k, v := range otherMetrics { + arcMetrics[k] = v + } + return arcMetrics +} + +func getPoolMetrics() map[string]interface{} { + return map[string]interface{}{ + "nread": int64(1884160), + "nwritten": int64(6450688), + "reads": int64(22), + "writes": int64(978), + "wtime": int64(272187126), + "wlentime": int64(2850519036), + "wupdate": int64(2263669418655), + "rtime": int64(424226814), + "rlentime": int64(2850519036), + "rupdate": int64(2263669871823), + "wcnt": int64(0), + "rcnt": int64(0), + } +} diff --git a/plugins/zookeeper/README.md b/plugins/inputs/zookeeper/README.md similarity index 100% rename from plugins/zookeeper/README.md rename to plugins/inputs/zookeeper/README.md diff --git a/plugins/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go similarity index 75% rename from plugins/zookeeper/zookeeper.go rename to plugins/inputs/zookeeper/zookeeper.go index 395bd3fdd..c2940f5e3 100644 --- a/plugins/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdata/telegraf/plugins/inputs" ) // Zookeeper is a zookeeper plugin @@ -40,7 +40,7 @@ func (z *Zookeeper) Description() string { } // Gather reads stats from all configured servers accumulates stats -func (z *Zookeeper) Gather(acc plugins.Accumulator) error { +func (z *Zookeeper) Gather(acc inputs.Accumulator) error { if len(z.Servers) == 0 { return nil } @@ -53,7 +53,7 @@ func (z *Zookeeper) Gather(acc plugins.Accumulator) error { return nil } -func (z *Zookeeper) gatherServer(address string, acc plugins.Accumulator) error { +func (z *Zookeeper) gatherServer(address string, acc inputs.Accumulator) error { _, _, err := net.SplitHostPort(address) if err != nil { address = address + ":2181" @@ -67,41 +67,43 @@ func (z *Zookeeper) gatherServer(address string, acc plugins.Accumulator) error defer c.Close() fmt.Fprintf(c, "%s\n", "mntr") - rdr := bufio.NewReader(c) - scanner := bufio.NewScanner(rdr) + service := strings.Split(address, ":") + if len(service) != 2 { + return fmt.Errorf("Invalid service address: %s", address) + } + tags := map[string]string{"server": service[0], "port": service[1]} + + fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) parts := re.FindStringSubmatch(string(line)) - service := strings.Split(address, ":") - - if len(parts) != 3 || len(service) != 2 { + if len(parts) != 3 { return fmt.Errorf("unexpected line in mntr response: %q", line) } - tags := map[string]string{"server": service[0], "port": service[1]} - measurement := strings.TrimPrefix(parts[1], "zk_") sValue := string(parts[2]) iVal, err := strconv.ParseInt(sValue, 10, 64) if err == nil { - acc.Add(measurement, iVal, tags) + fields[measurement] = iVal } else { - acc.Add(measurement, sValue, tags) + fields[measurement] = sValue } } + acc.AddFields("zookeeper", fields, tags) return nil } func init() { - plugins.Add("zookeeper", func() plugins.Plugin { + inputs.Add("zookeeper", func() inputs.Input { return &Zookeeper{} }) } diff --git a/plugins/zookeeper/zookeeper_test.go b/plugins/inputs/zookeeper/zookeeper_test.go similarity index 87% rename from plugins/zookeeper/zookeeper_test.go rename to plugins/inputs/zookeeper/zookeeper_test.go index 075ca521d..bc02ffb9d 100644 --- a/plugins/zookeeper/zookeeper_test.go +++ b/plugins/inputs/zookeeper/zookeeper_test.go @@ -3,7 +3,7 @@ package zookeeper import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -38,6 +38,6 @@ func TestZookeeperGeneratesMetrics(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric), metric) + assert.True(t, acc.HasIntField("zookeeper", metric), metric) } } diff --git a/plugins/mailchimp/mailchimp.go b/plugins/mailchimp/mailchimp.go deleted file mode 100644 index c1c7027f0..000000000 --- a/plugins/mailchimp/mailchimp.go +++ /dev/null @@ -1,113 +0,0 @@ -package mailchimp - -import ( - "fmt" - "time" - - "github.com/influxdb/telegraf/plugins" -) - -type MailChimp struct { - api *ChimpAPI - - ApiKey string - DaysOld int - CampaignId string -} - -var sampleConfig = ` - # MailChimp API key - # get from https://admin.mailchimp.com/account/api/ - api_key = "" # required - # Reports for campaigns sent more than days_old ago will not be collected. - # 0 means collect all. - days_old = 0 - # Campaign ID to get, if empty gets all campaigns, this option overrides days_old - # campaign_id = "" -` - -func (m *MailChimp) SampleConfig() string { - return sampleConfig -} - -func (m *MailChimp) Description() string { - return "Gathers metrics from the /3.0/reports MailChimp API" -} - -func (m *MailChimp) Gather(acc plugins.Accumulator) error { - if m.api == nil { - m.api = NewChimpAPI(m.ApiKey) - } - m.api.Debug = false - - if m.CampaignId == "" { - since := "" - if m.DaysOld > 0 { - now := time.Now() - d, _ := time.ParseDuration(fmt.Sprintf("%dh", 24*m.DaysOld)) - since = now.Add(-d).Format(time.RFC3339) - } - - reports, err := m.api.GetReports(ReportsParams{ - SinceSendTime: since, - }) - if err != nil { - return err - } - now := time.Now() - - for _, report := range reports.Reports { - gatherReport(acc, report, now) - } - } else { - report, err := m.api.GetReport(m.CampaignId) - if err != nil { - return err - } - now := time.Now() - gatherReport(acc, report, now) - } - - return nil -} - -func gatherReport(acc plugins.Accumulator, report Report, now time.Time) { - tags := make(map[string]string) - tags["id"] = report.ID - tags["campaign_title"] = report.CampaignTitle - acc.Add("emails_sent", report.EmailsSent, tags, now) - acc.Add("abuse_reports", report.AbuseReports, tags, now) - acc.Add("unsubscribed", report.Unsubscribed, tags, now) - acc.Add("hard_bounces", report.Bounces.HardBounces, tags, now) - acc.Add("soft_bounces", report.Bounces.SoftBounces, tags, now) - acc.Add("syntax_errors", report.Bounces.SyntaxErrors, tags, now) - acc.Add("forwards_count", report.Forwards.ForwardsCount, tags, now) - acc.Add("forwards_opens", report.Forwards.ForwardsOpens, tags, now) - acc.Add("opens_total", report.Opens.OpensTotal, tags, now) - acc.Add("unique_opens", report.Opens.UniqueOpens, tags, now) - acc.Add("open_rate", report.Opens.OpenRate, tags, now) - acc.Add("clicks_total", report.Clicks.ClicksTotal, tags, now) - acc.Add("unique_clicks", report.Clicks.UniqueClicks, tags, now) - acc.Add("unique_subscriber_clicks", report.Clicks.UniqueSubscriberClicks, tags, now) - acc.Add("click_rate", report.Clicks.ClickRate, tags, now) - acc.Add("facebook_recipient_likes", report.FacebookLikes.RecipientLikes, tags, now) - acc.Add("facebook_unique_likes", report.FacebookLikes.UniqueLikes, tags, now) - acc.Add("facebook_likes", report.FacebookLikes.FacebookLikes, tags, now) - acc.Add("industry_type", report.IndustryStats.Type, tags, now) - acc.Add("industry_open_rate", report.IndustryStats.OpenRate, tags, now) - acc.Add("industry_click_rate", report.IndustryStats.ClickRate, tags, now) - acc.Add("industry_bounce_rate", report.IndustryStats.BounceRate, tags, now) - acc.Add("industry_unopen_rate", report.IndustryStats.UnopenRate, tags, now) - acc.Add("industry_unsub_rate", report.IndustryStats.UnsubRate, tags, now) - acc.Add("industry_abuse_rate", report.IndustryStats.AbuseRate, tags, now) - acc.Add("list_stats_sub_rate", report.ListStats.SubRate, tags, now) - acc.Add("list_stats_unsub_rate", report.ListStats.UnsubRate, tags, now) - acc.Add("list_stats_open_rate", report.ListStats.OpenRate, tags, now) - acc.Add("list_stats_click_rate", report.ListStats.ClickRate, tags, now) -} - -func init() { - plugins.Add("mailchimp", func() plugins.Plugin { - return &MailChimp{} - }) -} diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go new file mode 100644 index 000000000..ac8357c90 --- /dev/null +++ b/plugins/outputs/all/all.go @@ -0,0 +1,18 @@ +package all + +import ( + _ "github.com/influxdata/telegraf/plugins/outputs/amon" + _ "github.com/influxdata/telegraf/plugins/outputs/amqp" + _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" + _ "github.com/influxdata/telegraf/plugins/outputs/datadog" + _ "github.com/influxdata/telegraf/plugins/outputs/graphite" + _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/outputs/kafka" + _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" + _ "github.com/influxdata/telegraf/plugins/outputs/librato" + _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" + _ "github.com/influxdata/telegraf/plugins/outputs/nsq" + _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" + _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" + _ "github.com/influxdata/telegraf/plugins/outputs/riemann" +) diff --git a/outputs/amon/README.md b/plugins/outputs/amon/README.md similarity index 100% rename from outputs/amon/README.md rename to plugins/outputs/amon/README.md diff --git a/outputs/amon/amon.go b/plugins/outputs/amon/amon.go similarity index 73% rename from outputs/amon/amon.go rename to plugins/outputs/amon/amon.go index 2ab068b75..e9f2c9f30 100644 --- a/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -8,9 +8,9 @@ import ( "net/http" "strings" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type Amon struct { @@ -58,21 +58,26 @@ func (a *Amon) Write(points []*client.Point) error { return nil } ts := TimeSeries{} - var tempSeries = make([]*Metric, len(points)) - var acceptablePoints = 0 + tempSeries := []*Metric{} + metricCounter := 0 + for _, pt := range points { - metric := &Metric{ - Metric: strings.Replace(pt.Name(), "_", ".", -1), - } - if p, err := buildPoint(pt); err == nil { - metric.Points[0] = p - tempSeries[acceptablePoints] = metric - acceptablePoints += 1 + mname := strings.Replace(pt.Name(), "_", ".", -1) + if amonPts, err := buildPoints(pt); err == nil { + for fieldName, amonPt := range amonPts { + metric := &Metric{ + Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1), + } + metric.Points[0] = amonPt + tempSeries = append(tempSeries, metric) + metricCounter++ + } } else { log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) } } - ts.Series = make([]*Metric, acceptablePoints) + + ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { @@ -110,13 +115,17 @@ func (a *Amon) authenticatedUrl() string { return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey) } -func buildPoint(pt *client.Point) (Point, error) { - var p Point - if err := p.setValue(pt.Fields()["value"]); err != nil { - return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) +func buildPoints(pt *client.Point) (map[string]Point, error) { + pts := make(map[string]Point) + for k, v := range pt.Fields() { + var p Point + if err := p.setValue(v); err != nil { + return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) + } + p[0] = float64(pt.Time().Unix()) + pts[k] = p } - p[0] = float64(pt.Time().Unix()) - return p, nil + return pts, nil } func (p *Point) setValue(v interface{}) error { diff --git a/outputs/amon/amon_test.go b/plugins/outputs/amon/amon_test.go similarity index 69% rename from outputs/amon/amon_test.go rename to plugins/outputs/amon/amon_test.go index a220a304d..b725bab9e 100644 --- a/outputs/amon/amon_test.go +++ b/plugins/outputs/amon/amon_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) func TestBuildPoint(t *testing.T) { @@ -18,7 +18,7 @@ func TestBuildPoint(t *testing.T) { err error }{ { - testutil.TestPoint(float64(0.0)), + testutil.TestPoint(float64(0.0), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 0.0, @@ -26,7 +26,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(float64(1.0)), + testutil.TestPoint(float64(1.0), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 1.0, @@ -34,7 +34,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(int(10)), + testutil.TestPoint(int(10), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 10.0, @@ -42,7 +42,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(int32(112345)), + testutil.TestPoint(int32(112345), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 112345.0, @@ -50,7 +50,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(int64(112345)), + testutil.TestPoint(int64(112345), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 112345.0, @@ -58,7 +58,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(float32(11234.5)), + testutil.TestPoint(float32(11234.5), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 11234.5, @@ -66,7 +66,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint("11234.5"), + testutil.TestPoint("11234.5", "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 11234.5, @@ -75,15 +75,16 @@ func TestBuildPoint(t *testing.T) { }, } for _, tt := range tagtests { - pt, err := buildPoint(tt.ptIn) + pt, err := buildPoints(tt.ptIn) if err != nil && tt.err == nil { t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err) } if tt.err != nil && err == nil { t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error()) } - if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt) + if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", + tt.ptIn.Name(), tt.outPt, pt["value"]) } } } diff --git a/outputs/amqp/README.md b/plugins/outputs/amqp/README.md similarity index 100% rename from outputs/amqp/README.md rename to plugins/outputs/amqp/README.md diff --git a/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go similarity index 76% rename from outputs/amqp/amqp.go rename to plugins/outputs/amqp/amqp.go index a5c8c5a9f..bdbf47b86 100644 --- a/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -2,13 +2,16 @@ package amqp import ( "bytes" + "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" "log" "sync" "time" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/streadway/amqp" ) @@ -17,6 +20,12 @@ type AMQP struct { URL string // AMQP exchange Exchange string + // path to CA file + SslCa string + // path to host cert file + SslCert string + // path to cert key file + SslKey string // Routing Key Tag RoutingTag string `toml:"routing_tag"` // InfluxDB database @@ -46,6 +55,11 @@ var sampleConfig = ` # ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" + # Use ssl + #ssl_ca = "/etc/telegraf/ca.pem" + #ssl_cert = "/etc/telegraf/cert.pem" + #ssl_key = "/etc/telegraf/key.pem" + # InfluxDB retention policy #retention_policy = "default" # InfluxDB database @@ -64,7 +78,32 @@ func (q *AMQP) Connect() error { "retention_policy": q.RetentionPolicy, } - connection, err := amqp.Dial(q.URL) + var connection *amqp.Connection + var err error + if q.SslCert != "" && q.SslKey != "" { + // make new tls config + cfg := new(tls.Config) + if q.SslCa != "" { + // create ca pool + cfg.RootCAs = x509.NewCertPool() + + // add self-signed cert + if ca, err := ioutil.ReadFile(q.SslCa); err == nil { + cfg.RootCAs.AppendCertsFromPEM(ca) + } else { + log.Println(err) + } + } + if cert, err := tls.LoadX509KeyPair(q.SslCert, q.SslKey); err == nil { + cfg.Certificates = append(cfg.Certificates, cert) + } else { + log.Println(err) + } + connection, err = amqp.DialTLS(q.URL, cfg) + + } else { + connection, err = amqp.Dial(q.URL) + } if err != nil { return err } diff --git a/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go similarity index 92% rename from outputs/amqp/amqp_test.go rename to plugins/outputs/amqp/amqp_test.go index 4c6a9a8d3..a65634cab 100644 --- a/outputs/amqp/amqp_test.go +++ b/plugins/outputs/amqp/amqp_test.go @@ -3,7 +3,7 @@ package amqp import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md new file mode 100644 index 000000000..853d038c3 --- /dev/null +++ b/plugins/outputs/cloudwatch/README.md @@ -0,0 +1,33 @@ +## Amazon CloudWatch Output for Telegraf + +This plugin will send points to Amazon CloudWatch. + +## Amazon Authentication + +This plugin uses a credential chain for Authentication with the CloudWatch +API endpoint. In the following order the plugin will attempt to authenticate. +1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk) +3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk) + +## Config + +For this output plugin to function correctly the following variables +must be configured. + +* region +* namespace + +### region + +The region is the Amazon region that you wish to connect to. +Examples include but are not limited to: +* us-west-1 +* us-west-2 +* us-east-1 +* ap-southeast-1 +* ap-southeast-2 + +### namespace + +The namespace used for AWS CloudWatch metrics. diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go new file mode 100644 index 000000000..1e20836da --- /dev/null +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -0,0 +1,236 @@ +package cloudwatch + +import ( + "log" + "math" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" + + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type CloudWatch struct { + Region string // AWS Region + Namespace string // CloudWatch Metrics Namespace + svc *cloudwatch.CloudWatch +} + +var sampleConfig = ` + # Amazon REGION + region = 'us-east-1' + + # Namespace for the CloudWatch MetricDatums + namespace = 'InfluxData/Telegraf' +` + +func (c *CloudWatch) SampleConfig() string { + return sampleConfig +} + +func (c *CloudWatch) Description() string { + return "Configuration for AWS CloudWatch output." +} + +func (c *CloudWatch) Connect() error { + Config := &aws.Config{ + Region: aws.String(c.Region), + Credentials: credentials.NewChainCredentials( + []credentials.Provider{ + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + }), + } + + svc := cloudwatch.New(session.New(Config)) + + params := &cloudwatch.ListMetricsInput{ + Namespace: aws.String(c.Namespace), + } + + _, err := svc.ListMetrics(params) // Try a read-only call to test connection. + + if err != nil { + log.Printf("cloudwatch: Error in ListMetrics API call : %+v \n", err.Error()) + } + + c.svc = svc + + return err +} + +func (c *CloudWatch) Close() error { + return nil +} + +func (c *CloudWatch) Write(points []*client.Point) error { + for _, pt := range points { + err := c.WriteSinglePoint(pt) + if err != nil { + return err + } + } + + return nil +} + +// Write data for a single point. A point can have many fields and one field +// is equal to one MetricDatum. There is a limit on how many MetricDatums a +// request can have so we process one Point at a time. +func (c *CloudWatch) WriteSinglePoint(point *client.Point) error { + datums := BuildMetricDatum(point) + + const maxDatumsPerCall = 20 // PutMetricData only supports up to 20 data points per call + + for _, partition := range PartitionDatums(maxDatumsPerCall, datums) { + err := c.WriteToCloudWatch(partition) + + if err != nil { + return err + } + } + + return nil +} + +func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { + params := &cloudwatch.PutMetricDataInput{ + MetricData: datums, + Namespace: aws.String(c.Namespace), + } + + _, err := c.svc.PutMetricData(params) + + if err != nil { + log.Printf("CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error()) + } + + return err +} + +// Partition the MetricDatums into smaller slices of a max size so that are under the limit +// for the AWS API calls. +func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch.MetricDatum { + + numberOfPartitions := len(datums) / size + if len(datums)%size != 0 { + numberOfPartitions += 1 + } + + partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions) + + for i := 0; i < numberOfPartitions; i++ { + start := size * i + end := size * (i + 1) + if end > len(datums) { + end = len(datums) + } + + partitions[i] = datums[start:end] + } + + return partitions +} + +// Make a MetricDatum for each field in a Point. Only fields with values that can be +// converted to float64 are supported. Non-supported fields are skipped. +func BuildMetricDatum(point *client.Point) []*cloudwatch.MetricDatum { + datums := make([]*cloudwatch.MetricDatum, len(point.Fields())) + i := 0 + + var value float64 + + for k, v := range point.Fields() { + switch t := v.(type) { + case int: + value = float64(t) + case int32: + value = float64(t) + case int64: + value = float64(t) + case float64: + value = t + case bool: + if t { + value = 1 + } else { + value = 0 + } + case time.Time: + value = float64(t.Unix()) + default: + // Skip unsupported type. + datums = datums[:len(datums)-1] + continue + } + + datums[i] = &cloudwatch.MetricDatum{ + MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")), + Value: aws.Float64(value), + Dimensions: BuildDimensions(point.Tags()), + Timestamp: aws.Time(point.Time()), + } + + i += 1 + } + + return datums +} + +// Make a list of Dimensions by using a Point's tags. CloudWatch supports up to +// 10 dimensions per metric so we only keep up to the first 10 alphabetically. +// This always includes the "host" tag if it exists. +func BuildDimensions(ptTags map[string]string) []*cloudwatch.Dimension { + + const MaxDimensions = 10 + dimensions := make([]*cloudwatch.Dimension, int(math.Min(float64(len(ptTags)), MaxDimensions))) + + i := 0 + + // This is pretty ugly but we always want to include the "host" tag if it exists. + if host, ok := ptTags["host"]; ok { + dimensions[i] = &cloudwatch.Dimension{ + Name: aws.String("host"), + Value: aws.String(host), + } + i += 1 + } + + var keys []string + for k := range ptTags { + if k != "host" { + keys = append(keys, k) + } + } + sort.Strings(keys) + + for _, k := range keys { + if i >= MaxDimensions { + break + } + + dimensions[i] = &cloudwatch.Dimension{ + Name: aws.String(k), + Value: aws.String(ptTags[k]), + } + + i += 1 + } + + return dimensions +} + +func init() { + outputs.Add("cloudwatch", func() outputs.Output { + return &CloudWatch{} + }) +} diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go new file mode 100644 index 000000000..2041e14fd --- /dev/null +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -0,0 +1,88 @@ +package cloudwatch + +import ( + "sort" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatch" + + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +// Test that each tag becomes one dimension +func TestBuildDimensions(t *testing.T) { + const MaxDimensions = 10 + + assert := assert.New(t) + + testPoint := testutil.TestPoint(1) + dimensions := BuildDimensions(testPoint.Tags()) + + tagKeys := make([]string, len(testPoint.Tags())) + i := 0 + for k, _ := range testPoint.Tags() { + tagKeys[i] = k + i += 1 + } + + sort.Strings(tagKeys) + + if len(testPoint.Tags()) >= MaxDimensions { + assert.Equal(MaxDimensions, len(dimensions), "Number of dimensions should be less than MaxDimensions") + } else { + assert.Equal(len(testPoint.Tags()), len(dimensions), "Number of dimensions should be equal to number of tags") + } + + for i, key := range tagKeys { + if i >= 10 { + break + } + assert.Equal(key, *dimensions[i].Name, "Key should be equal") + assert.Equal(testPoint.Tags()[key], *dimensions[i].Value, "Value should be equal") + } +} + +// Test that points with valid values have a MetricDatum created where as non valid do not. +// Skips "time.Time" type as something is converting the value to string. +func TestBuildMetricDatums(t *testing.T) { + assert := assert.New(t) + + validPoints := []*client.Point{ + testutil.TestPoint(1), + testutil.TestPoint(int32(1)), + testutil.TestPoint(int64(1)), + testutil.TestPoint(float64(1)), + testutil.TestPoint(true), + } + + for _, point := range validPoints { + datums := BuildMetricDatum(point) + assert.Equal(1, len(datums), "Valid type should create a Datum") + } + + nonValidPoint := testutil.TestPoint("Foo") + + assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum") +} + +func TestPartitionDatums(t *testing.T) { + + assert := assert.New(t) + + testDatum := cloudwatch.MetricDatum{ + MetricName: aws.String("Foo"), + Value: aws.Float64(1), + } + + oneDatum := []*cloudwatch.MetricDatum{&testDatum} + twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum} + threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum} + + assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) + assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) +} diff --git a/outputs/datadog/README.md b/plugins/outputs/datadog/README.md similarity index 100% rename from outputs/datadog/README.md rename to plugins/outputs/datadog/README.md diff --git a/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go similarity index 76% rename from outputs/datadog/datadog.go rename to plugins/outputs/datadog/datadog.go index f37c81a9c..7d6539789 100644 --- a/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -10,9 +10,9 @@ import ( "sort" "strings" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type Datadog struct { @@ -67,23 +67,26 @@ func (d *Datadog) Write(points []*client.Point) error { return nil } ts := TimeSeries{} - var tempSeries = make([]*Metric, len(points)) - var acceptablePoints = 0 + tempSeries := []*Metric{} + metricCounter := 0 + for _, pt := range points { - metric := &Metric{ - Metric: strings.Replace(pt.Name(), "_", ".", -1), - Tags: buildTags(pt.Tags()), - Host: pt.Tags()["host"], - } - if p, err := buildPoint(pt); err == nil { - metric.Points[0] = p - tempSeries[acceptablePoints] = metric - acceptablePoints += 1 + mname := strings.Replace(pt.Name(), "_", ".", -1) + if amonPts, err := buildPoints(pt); err == nil { + for fieldName, amonPt := range amonPts { + metric := &Metric{ + Metric: mname + strings.Replace(fieldName, "_", ".", -1), + } + metric.Points[0] = amonPt + tempSeries = append(tempSeries, metric) + metricCounter++ + } } else { log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) } } - ts.Series = make([]*Metric, acceptablePoints) + + ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { @@ -123,13 +126,17 @@ func (d *Datadog) authenticatedUrl() string { return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode()) } -func buildPoint(pt *client.Point) (Point, error) { - var p Point - if err := p.setValue(pt.Fields()["value"]); err != nil { - return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) +func buildPoints(pt *client.Point) (map[string]Point, error) { + pts := make(map[string]Point) + for k, v := range pt.Fields() { + var p Point + if err := p.setValue(v); err != nil { + return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) + } + p[0] = float64(pt.Time().Unix()) + pts[k] = p } - p[0] = float64(pt.Time().Unix()) - return p, nil + return pts, nil } func buildTags(ptTags map[string]string) []string { diff --git a/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go similarity index 93% rename from outputs/datadog/datadog_test.go rename to plugins/outputs/datadog/datadog_test.go index 49da0a7b3..968a8e9c8 100644 --- a/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -162,15 +162,16 @@ func TestBuildPoint(t *testing.T) { }, } for _, tt := range tagtests { - pt, err := buildPoint(tt.ptIn) + pt, err := buildPoints(tt.ptIn) if err != nil && tt.err == nil { t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err) } if tt.err != nil && err == nil { t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error()) } - if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt) + if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", + tt.ptIn.Name(), tt.outPt, pt["value"]) } } } diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md new file mode 100644 index 000000000..48313a886 --- /dev/null +++ b/plugins/outputs/graphite/README.md @@ -0,0 +1,13 @@ +# Graphite Output Plugin + +This plugin writes to [Graphite](http://graphite.readthedocs.org/en/latest/index.html) via raw TCP. + +Parameters: + + Servers []string + Prefix string + Timeout int + +* `servers`: List of strings, ["mygraphiteserver:2003"]. +* `prefix`: String use to prefix all sent metrics. +* `timeout`: Connection timeout in second. diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go new file mode 100644 index 000000000..f9781041f --- /dev/null +++ b/plugins/outputs/graphite/graphite.go @@ -0,0 +1,134 @@ +package graphite + +import ( + "errors" + "fmt" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" + "log" + "math/rand" + "net" + "strings" + "time" +) + +type Graphite struct { + // URL is only for backwards compatability + Servers []string + Prefix string + Timeout int + conns []net.Conn +} + +var sampleConfig = ` + # TCP endpoint for your graphite instance. + servers = ["localhost:2003"] + # Prefix metrics name + prefix = "" + # timeout in seconds for the write connection to graphite + timeout = 2 +` + +func (g *Graphite) Connect() error { + // Set default values + if g.Timeout <= 0 { + g.Timeout = 2 + } + if len(g.Servers) == 0 { + g.Servers = append(g.Servers, "localhost:2003") + } + // Get Connections + var conns []net.Conn + for _, server := range g.Servers { + conn, err := net.DialTimeout("tcp", server, time.Duration(g.Timeout)*time.Second) + if err == nil { + conns = append(conns, conn) + } + } + g.conns = conns + return nil +} + +func (g *Graphite) Close() error { + // Closing all connections + for _, conn := range g.conns { + conn.Close() + } + return nil +} + +func (g *Graphite) SampleConfig() string { + return sampleConfig +} + +func (g *Graphite) Description() string { + return "Configuration for Graphite server to send metrics to" +} + +// Choose a random server in the cluster to write to until a successful write +// occurs, logging each unsuccessful. If all servers fail, return error. +func (g *Graphite) Write(points []*client.Point) error { + // Prepare data + var bp []string + for _, point := range points { + // Get name + name := point.Name() + // Convert UnixNano to Unix timestamps + timestamp := point.UnixNano() / 1000000000 + + for field_name, value := range point.Fields() { + // Convert value + value_str := fmt.Sprintf("%#v", value) + // Write graphite point + var graphitePoint string + if name == field_name { + graphitePoint = fmt.Sprintf("%s.%s %s %d\n", + strings.Replace(point.Tags()["host"], ".", "_", -1), + strings.Replace(name, ".", "_", -1), + value_str, + timestamp) + } else { + graphitePoint = fmt.Sprintf("%s.%s.%s %s %d\n", + strings.Replace(point.Tags()["host"], ".", "_", -1), + strings.Replace(name, ".", "_", -1), + strings.Replace(field_name, ".", "_", -1), + value_str, + timestamp) + } + if g.Prefix != "" { + graphitePoint = fmt.Sprintf("%s.%s", g.Prefix, graphitePoint) + } + bp = append(bp, graphitePoint) + //fmt.Printf(graphitePoint) + } + } + graphitePoints := strings.Join(bp, "") + + // This will get set to nil if a successful write occurs + err := errors.New("Could not write to any Graphite server in cluster\n") + + // Send data to a random server + p := rand.Perm(len(g.conns)) + for _, n := range p { + if _, e := fmt.Fprintf(g.conns[n], graphitePoints); e != nil { + // Error + log.Println("ERROR: " + err.Error()) + // Let's try the next one + } else { + // Success + err = nil + break + } + } + // try to reconnect + if err != nil { + g.Connect() + } + return err +} + +func init() { + outputs.Add("graphite", func() outputs.Output { + return &Graphite{} + }) +} diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go new file mode 100644 index 000000000..be4cc2472 --- /dev/null +++ b/plugins/outputs/graphite/graphite_test.go @@ -0,0 +1,104 @@ +package graphite + +import ( + "bufio" + "net" + "net/textproto" + "sync" + "testing" + "time" + + "github.com/influxdata/influxdb/client/v2" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGraphiteError(t *testing.T) { + // Init plugin + g := Graphite{ + Servers: []string{"127.0.0.1:2003", "127.0.0.1:12003"}, + Prefix: "my.prefix", + } + // Init points + pt1, _ := client.NewPoint( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"mymeasurement": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + // Prepare point list + var points []*client.Point + points = append(points, pt1) + // Error + err1 := g.Connect() + require.NoError(t, err1) + err2 := g.Write(points) + require.Error(t, err2) + assert.Equal(t, "Could not write to any Graphite server in cluster\n", err2.Error()) +} + +func TestGraphiteOK(t *testing.T) { + var wg sync.WaitGroup + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + } + // Init points + pt1, _ := client.NewPoint( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"mymeasurement": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + pt2, _ := client.NewPoint( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + pt3, _ := client.NewPoint( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + // Prepare point list + var points []*client.Point + points = append(points, pt1) + points = append(points, pt2) + points = append(points, pt3) + // Start TCP server + wg.Add(1) + go TCPServer(t, &wg) + wg.Wait() + // Connect + wg.Add(1) + err1 := g.Connect() + wg.Wait() + require.NoError(t, err1) + // Send Data + err2 := g.Write(points) + require.NoError(t, err2) + wg.Add(1) + // Waiting TCPserver + wg.Wait() + g.Close() +} + +func TCPServer(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + wg.Done() + conn, _ := tcpServer.Accept() + wg.Done() + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + data1, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data1) + data2, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.value 3.14 1289430000", data2) + data3, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.value 3.14 1289430000", data3) + conn.Close() + wg.Done() +} diff --git a/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md similarity index 100% rename from outputs/influxdb/README.md rename to plugins/outputs/influxdb/README.md diff --git a/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go similarity index 95% rename from outputs/influxdb/influxdb.go rename to plugins/outputs/influxdb/influxdb.go index a9fa2edc3..f45f020b6 100644 --- a/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -7,10 +7,11 @@ import ( "math/rand" "net/url" "strings" + "time" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type InfluxDB struct { @@ -110,6 +111,7 @@ func (i *InfluxDB) Connect() error { } i.conns = conns + rand.Seed(time.Now().UnixNano()) return nil } diff --git a/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go similarity index 94% rename from outputs/influxdb/influxdb_test.go rename to plugins/outputs/influxdb/influxdb_test.go index cf1d7d9b3..5da0c056f 100644 --- a/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go new file mode 100644 index 000000000..b16347c92 --- /dev/null +++ b/plugins/outputs/kafka/kafka.go @@ -0,0 +1,146 @@ +package kafka + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "github.com/Shopify/sarama" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" + "io/ioutil" +) + +type Kafka struct { + // Kafka brokers to send metrics to + Brokers []string + // Kafka topic + Topic string + // Routing Key Tag + RoutingTag string `toml:"routing_tag"` + // TLS client certificate + Certificate string + // TLS client key + Key string + // TLS certificate authority + CA string + // Verfiy SSL certificate chain + VerifySsl bool + + tlsConfig tls.Config + producer sarama.SyncProducer +} + +var sampleConfig = ` + # URLs of kafka brokers + brokers = ["localhost:9092"] + # Kafka topic for producer messages + topic = "telegraf" + # Telegraf tag to use as a routing key + # ie, if this tag exists, it's value will be used as the routing key + routing_tag = "host" + + # Optional TLS configuration: + # Client certificate + certificate = "" + # Client key + key = "" + # Certificate authority file + ca = "" + # Verify SSL certificate chain + verify_ssl = false +` + +func createTlsConfiguration(k *Kafka) (t *tls.Config, err error) { + if k.Certificate != "" && k.Key != "" && k.CA != "" { + cert, err := tls.LoadX509KeyPair(k.Certificate, k.Key) + if err != nil { + return nil, errors.New(fmt.Sprintf("Cout not load Kafka TLS client key/certificate: %s", + err)) + } + + caCert, err := ioutil.ReadFile(k.CA) + if err != nil { + return nil, errors.New(fmt.Sprintf("Cout not load Kafka TLS CA: %s", + err)) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + t = &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + InsecureSkipVerify: k.VerifySsl, + } + } + // will be nil by default if nothing is provided + return t, nil +} + +func (k *Kafka) Connect() error { + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message + config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message + tlsConfig, err := createTlsConfiguration(k) + if err != nil { + return err + } + + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + producer, err := sarama.NewSyncProducer(k.Brokers, config) + if err != nil { + return err + } + k.producer = producer + return nil +} + +func (k *Kafka) Close() error { + return k.producer.Close() +} + +func (k *Kafka) SampleConfig() string { + return sampleConfig +} + +func (k *Kafka) Description() string { + return "Configuration for the Kafka server to send metrics to" +} + +func (k *Kafka) Write(points []*client.Point) error { + if len(points) == 0 { + return nil + } + + for _, p := range points { + // Combine tags from Point and BatchPoints and grab the resulting + // line-protocol output string to write to Kafka + value := p.String() + + m := &sarama.ProducerMessage{ + Topic: k.Topic, + Value: sarama.StringEncoder(value), + } + if h, ok := p.Tags()[k.RoutingTag]; ok { + m.Key = sarama.StringEncoder(h) + } + + _, _, err := k.producer.SendMessage(m) + if err != nil { + return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n", + err)) + } + } + return nil +} + +func init() { + outputs.Add("kafka", func() outputs.Output { + return &Kafka{} + }) +} diff --git a/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go similarity index 92% rename from outputs/kafka/kafka_test.go rename to plugins/outputs/kafka/kafka_test.go index 2c1734857..2af343778 100644 --- a/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -3,7 +3,7 @@ package kafka import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md similarity index 100% rename from outputs/kinesis/README.md rename to plugins/outputs/kinesis/README.md diff --git a/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go similarity index 97% rename from outputs/kinesis/kinesis.go rename to plugins/outputs/kinesis/kinesis.go index 144131707..f04f1c7c6 100644 --- a/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -15,8 +15,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" ) type KinesisOutput struct { diff --git a/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go similarity index 94% rename from outputs/kinesis/kinesis_test.go rename to plugins/outputs/kinesis/kinesis_test.go index 4c667c860..76eb6ebca 100644 --- a/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -1,7 +1,7 @@ package kinesis import ( - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" "testing" ) diff --git a/outputs/librato/README.md b/plugins/outputs/librato/README.md similarity index 100% rename from outputs/librato/README.md rename to plugins/outputs/librato/README.md diff --git a/outputs/librato/librato.go b/plugins/outputs/librato/librato.go similarity index 74% rename from outputs/librato/librato.go rename to plugins/outputs/librato/librato.go index 9f8f6dd0d..6afcb4542 100644 --- a/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -7,9 +7,9 @@ import ( "log" "net/http" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type Librato struct { @@ -74,17 +74,21 @@ func (l *Librato) Write(points []*client.Point) error { return nil } metrics := Metrics{} - var tempGauges = make([]*Gauge, len(points)) - var acceptablePoints = 0 + tempGauges := []*Gauge{} + metricCounter := 0 + for _, pt := range points { - if gauge, err := l.buildGauge(pt); err == nil { - tempGauges[acceptablePoints] = gauge - acceptablePoints += 1 + if gauges, err := l.buildGauges(pt); err == nil { + for _, gauge := range gauges { + tempGauges = append(tempGauges, gauge) + metricCounter++ + } } else { log.Printf("unable to build Gauge for %s, skipping\n", pt.Name()) } } - metrics.Gauges = make([]*Gauge, acceptablePoints) + + metrics.Gauges = make([]*Gauge, metricCounter) copy(metrics.Gauges, tempGauges[0:]) metricsBytes, err := json.Marshal(metrics) if err != nil { @@ -118,22 +122,28 @@ func (l *Librato) Description() string { return "Configuration for Librato API to send metrics to." } -func (l *Librato) buildGauge(pt *client.Point) (*Gauge, error) { - gauge := &Gauge{ - Name: pt.Name(), - MeasureTime: pt.Time().Unix(), - } - if err := gauge.setValue(pt.Fields()["value"]); err != nil { - return gauge, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error()) - } - if l.SourceTag != "" { - if source, ok := pt.Tags()[l.SourceTag]; ok { - gauge.Source = source - } else { - return gauge, fmt.Errorf("undeterminable Source type from Field, %s\n", l.SourceTag) +func (l *Librato) buildGauges(pt *client.Point) ([]*Gauge, error) { + gauges := []*Gauge{} + for fieldName, value := range pt.Fields() { + gauge := &Gauge{ + Name: pt.Name() + "_" + fieldName, + MeasureTime: pt.Time().Unix(), + } + if err := gauge.setValue(value); err != nil { + return gauges, fmt.Errorf("unable to extract value from Fields, %s\n", + err.Error()) + } + if l.SourceTag != "" { + if source, ok := pt.Tags()[l.SourceTag]; ok { + gauge.Source = source + } else { + return gauges, + fmt.Errorf("undeterminable Source type from Field, %s\n", + l.SourceTag) + } } } - return gauge, nil + return gauges, nil } func (g *Gauge) setValue(v interface{}) error { diff --git a/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go similarity index 88% rename from outputs/librato/librato_test.go rename to plugins/outputs/librato/librato_test.go index 71d726769..25418baa5 100644 --- a/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" "github.com/stretchr/testify/require" ) @@ -142,15 +142,20 @@ func TestBuildGauge(t *testing.T) { l := NewLibrato(fakeUrl) for _, gt := range gaugeTests { - gauge, err := l.buildGauge(gt.ptIn) + gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err) } if gt.err != nil && err == nil { - t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error()) + t.Errorf("%s: expected an error (%s) but none returned", + gt.ptIn.Name(), gt.err.Error()) } - if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge) + if len(gauges) == 0 { + continue + } + if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", + gt.ptIn.Name(), gt.outGauge, gauges[0]) } } } @@ -198,15 +203,18 @@ func TestBuildGaugeWithSource(t *testing.T) { l := NewLibrato(fakeUrl) l.SourceTag = "hostname" for _, gt := range gaugeTests { - gauge, err := l.buildGauge(gt.ptIn) + gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err) } if gt.err != nil && err == nil { t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error()) } - if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge) + if len(gauges) == 0 { + continue + } + if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauges[0]) } } } diff --git a/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go similarity index 96% rename from outputs/mqtt/mqtt.go rename to plugins/outputs/mqtt/mqtt.go index aa6e17bc7..7c47cf741 100644 --- a/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -10,9 +10,9 @@ import ( "sync" paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) const MaxClientIdLen = 8 diff --git a/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go similarity index 92% rename from outputs/mqtt/mqtt_test.go rename to plugins/outputs/mqtt/mqtt_test.go index 0922b83ed..f25f4497f 100644 --- a/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -3,7 +3,7 @@ package mqtt import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/outputs/nsq/README.md b/plugins/outputs/nsq/README.md similarity index 100% rename from outputs/nsq/README.md rename to plugins/outputs/nsq/README.md diff --git a/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go similarity index 92% rename from outputs/nsq/nsq.go rename to plugins/outputs/nsq/nsq.go index 65b139c77..79818ec5c 100644 --- a/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -2,8 +2,8 @@ package nsq import ( "fmt" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/nsqio/go-nsq" ) diff --git a/outputs/nsq/nsq_test.go b/plugins/outputs/nsq/nsq_test.go similarity index 92% rename from outputs/nsq/nsq_test.go rename to plugins/outputs/nsq/nsq_test.go index 4448383f4..b2d703a70 100644 --- a/outputs/nsq/nsq_test.go +++ b/plugins/outputs/nsq/nsq_test.go @@ -3,7 +3,7 @@ package nsq import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/outputs/opentsdb/README.md b/plugins/outputs/opentsdb/README.md similarity index 100% rename from outputs/opentsdb/README.md rename to plugins/outputs/opentsdb/README.md diff --git a/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go similarity index 76% rename from outputs/opentsdb/opentsdb.go rename to plugins/outputs/opentsdb/opentsdb.go index 22c8c91fc..6e9f3e26a 100644 --- a/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -8,8 +8,8 @@ import ( "strings" "time" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" ) type OpenTSDB struct { @@ -62,7 +62,8 @@ func (o *OpenTSDB) Write(points []*client.Point) error { if len(points) == 0 { return nil } - var timeNow = time.Now() + now := time.Now() + // Send Data with telnet / socket communication uri := fmt.Sprintf("%s:%d", o.Host, o.Port) tcpAddr, _ := net.ResolveTCPAddr("tcp", uri) @@ -70,32 +71,21 @@ func (o *OpenTSDB) Write(points []*client.Point) error { if err != nil { return fmt.Errorf("OpenTSDB: Telnet connect fail") } + defer connection.Close() + for _, pt := range points { - metric := &MetricLine{ - Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Name()), - Timestamp: timeNow.Unix(), - } - - metricValue, buildError := buildValue(pt) - if buildError != nil { - fmt.Printf("OpenTSDB: %s\n", buildError.Error()) - continue - } - metric.Value = metricValue - - tagsSlice := buildTags(pt.Tags()) - metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) - - messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags) - if o.Debug { - fmt.Print(messageLine) - } - _, err := connection.Write([]byte(messageLine)) - if err != nil { - return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error()) + for _, metric := range buildMetrics(pt, now, o.Prefix) { + messageLine := fmt.Sprintf("put %s %v %s %s\n", + metric.Metric, metric.Timestamp, metric.Value, metric.Tags) + if o.Debug { + fmt.Print(messageLine) + } + _, err := connection.Write([]byte(messageLine)) + if err != nil { + return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error()) + } } } - defer connection.Close() return nil } @@ -111,9 +101,29 @@ func buildTags(ptTags map[string]string) []string { return tags } -func buildValue(pt *client.Point) (string, error) { +func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine { + ret := []*MetricLine{} + for fieldName, value := range pt.Fields() { + metric := &MetricLine{ + Metric: fmt.Sprintf("%s%s_%s", prefix, pt.Name(), fieldName), + Timestamp: now.Unix(), + } + + metricValue, buildError := buildValue(value) + if buildError != nil { + fmt.Printf("OpenTSDB: %s\n", buildError.Error()) + continue + } + metric.Value = metricValue + tagsSlice := buildTags(pt.Tags()) + metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) + ret = append(ret, metric) + } + return ret +} + +func buildValue(v interface{}) (string, error) { var retv string - var v = pt.Fields()["value"] switch p := v.(type) { case int64: retv = IntToString(int64(p)) diff --git a/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go similarity index 97% rename from outputs/opentsdb/opentsdb_test.go rename to plugins/outputs/opentsdb/opentsdb_test.go index f75bd7205..92df3fb52 100644 --- a/outputs/opentsdb/opentsdb_test.go +++ b/plugins/outputs/opentsdb/opentsdb_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md similarity index 100% rename from outputs/prometheus_client/README.md rename to plugins/outputs/prometheus_client/README.md diff --git a/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go similarity index 96% rename from outputs/prometheus_client/prometheus_client.go rename to plugins/outputs/prometheus_client/prometheus_client.go index 6df69a0a4..4e429722a 100644 --- a/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -5,8 +5,8 @@ import ( "log" "net/http" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/prometheus/client_golang/prometheus" ) diff --git a/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go similarity index 83% rename from outputs/prometheus_client/prometheus_client_test.go rename to plugins/outputs/prometheus_client/prometheus_client_test.go index 6bb1ec614..73163ee1d 100644 --- a/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -3,11 +3,11 @@ package prometheus_client import ( "testing" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/prometheus" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/inputs/prometheus" + "github.com/influxdata/telegraf/testutil" ) var pTesting *PrometheusClient @@ -48,7 +48,8 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { require.NoError(t, p.Gather(&acc)) for _, e := range expected { - assert.NoError(t, acc.ValidateValue(e.name, e.value)) + acc.AssertContainsFields(t, "prometheus_"+e.name, + map[string]interface{}{"value": e.value}) } } @@ -88,7 +89,8 @@ func TestPrometheusWritePointTag(t *testing.T) { require.NoError(t, p.Gather(&acc)) for _, e := range expected { - assert.True(t, acc.CheckTaggedValue(e.name, e.value, tags)) + acc.AssertContainsFields(t, "prometheus_"+e.name, + map[string]interface{}{"value": e.value}) } } diff --git a/outputs/registry.go b/plugins/outputs/registry.go similarity index 96% rename from outputs/registry.go rename to plugins/outputs/registry.go index d7ea30492..d4c6ba1e5 100644 --- a/outputs/registry.go +++ b/plugins/outputs/registry.go @@ -1,7 +1,7 @@ package outputs import ( - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) type Output interface { diff --git a/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go similarity index 64% rename from outputs/riemann/riemann.go rename to plugins/outputs/riemann/riemann.go index 1a02216e6..c1b22ec46 100644 --- a/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -6,8 +6,8 @@ import ( "os" "github.com/amir/raidman" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" ) type Riemann struct { @@ -55,8 +55,10 @@ func (r *Riemann) Write(points []*client.Point) error { var events []*raidman.Event for _, p := range points { - ev := buildEvent(p) - events = append(events, ev) + evs := buildEvents(p) + for _, ev := range evs { + events = append(events, ev) + } } var senderr = r.client.SendMulti(events) @@ -68,24 +70,28 @@ func (r *Riemann) Write(points []*client.Point) error { return nil } -func buildEvent(p *client.Point) *raidman.Event { - host, ok := p.Tags()["host"] - if !ok { - hostname, err := os.Hostname() - if err != nil { - host = "unknown" - } else { - host = hostname +func buildEvents(p *client.Point) []*raidman.Event { + events := []*raidman.Event{} + for fieldName, value := range p.Fields() { + host, ok := p.Tags()["host"] + if !ok { + hostname, err := os.Hostname() + if err != nil { + host = "unknown" + } else { + host = hostname + } } + + event := &raidman.Event{ + Host: host, + Service: p.Name() + "_" + fieldName, + Metric: value, + } + events = append(events, event) } - var event = &raidman.Event{ - Host: host, - Service: p.Name(), - Metric: p.Fields()["value"], - } - - return event + return events } func init() { diff --git a/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go similarity index 90% rename from outputs/riemann/riemann_test.go rename to plugins/outputs/riemann/riemann_test.go index 31e9478b1..8b3f27ac0 100644 --- a/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -3,7 +3,7 @@ package riemann import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/phpfpm/README.md b/plugins/phpfpm/README.md deleted file mode 100644 index d2e52534c..000000000 --- a/plugins/phpfpm/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Telegraf plugin: phpfpm - -Get phpfpm stat using either HTTP status page or fpm socket. - -# Measurements - -Meta: - -- tags: `url= pool=poolname` - -Measurement names: - -- accepted_conn -- listen_queue -- max_listen_queue -- listen_queue_len -- idle_processes -- active_processes -- total_processes -- max_active_processes -- max_children_reached -- slow_requests - -# Example output - -Using this configuration: - -``` -[phpfpm] - # An array of address to gather stats about. Specify an ip on hostname - # with optional port and path. ie localhost, 10.10.3.33/server-status, etc. - # - # We can configure in three modes: - # - unixsocket: the string is the path to fpm socket like - # /var/run/php5-fpm.sock - # - http: the URL has to start with http:// or https:// - # - fcgi: the URL has to start with fcgi:// or cgi://, and socket port must present - # - # If no servers are specified, then default to 127.0.0.1/server-status - urls = ["http://localhost/status", "10.0.0.12:/var/run/php5-fpm-www2.sock", "fcgi://10.0.0.12:9000/status"] -``` - -When run with: - -``` -./telegraf -config telegraf.conf -filter phpfpm -test -``` - -It produces: - -``` -* Plugin: phpfpm, Collection 1 -> [url="10.0.0.12" pool="www"] phpfpm_idle_processes value=1 -> [url="10.0.0.12" pool="www"] phpfpm_total_processes value=2 -> [url="10.0.0.12" pool="www"] phpfpm_max_children_reached value=0 -> [url="10.0.0.12" pool="www"] phpfpm_max_listen_queue value=0 -> [url="10.0.0.12" pool="www"] phpfpm_listen_queue value=0 -> [url="10.0.0.12" pool="www"] phpfpm_listen_queue_len value=0 -> [url="10.0.0.12" pool="www"] phpfpm_active_processes value=1 -> [url="10.0.0.12" pool="www"] phpfpm_max_active_processes value=2 -> [url="10.0.0.12" pool="www"] phpfpm_slow_requests value=0 -> [url="10.0.0.12" pool="www"] phpfpm_accepted_conn value=305 - -> [url="localhost" pool="www2"] phpfpm_max_children_reached value=0 -> [url="localhost" pool="www2"] phpfpm_slow_requests value=0 -> [url="localhost" pool="www2"] phpfpm_max_listen_queue value=0 -> [url="localhost" pool="www2"] phpfpm_active_processes value=1 -> [url="localhost" pool="www2"] phpfpm_listen_queue_len value=0 -> [url="localhost" pool="www2"] phpfpm_idle_processes value=1 -> [url="localhost" pool="www2"] phpfpm_total_processes value=2 -> [url="localhost" pool="www2"] phpfpm_max_active_processes value=2 -> [url="localhost" pool="www2"] phpfpm_accepted_conn value=306 -> [url="localhost" pool="www2"] phpfpm_listen_queue value=0 - -> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_children_reached value=0 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_slow_requests value=1 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_listen_queue value=0 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_active_processes value=1 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue_len value=0 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_idle_processes value=2 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_total_processes value=2 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_active_processes value=2 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_accepted_conn value=307 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue value=0 -``` diff --git a/plugins/phpfpm/phpfpm.go b/plugins/phpfpm/phpfpm.go deleted file mode 100644 index 8408c86f7..000000000 --- a/plugins/phpfpm/phpfpm.go +++ /dev/null @@ -1,213 +0,0 @@ -package phpfpm - -import ( - "bufio" - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - - "github.com/influxdb/telegraf/plugins" -) - -const ( - PF_POOL = "pool" - PF_PROCESS_MANAGER = "process manager" - PF_ACCEPTED_CONN = "accepted conn" - PF_LISTEN_QUEUE = "listen queue" - PF_MAX_LISTEN_QUEUE = "max listen queue" - PF_LISTEN_QUEUE_LEN = "listen queue len" - PF_IDLE_PROCESSES = "idle processes" - PF_ACTIVE_PROCESSES = "active processes" - PF_TOTAL_PROCESSES = "total processes" - PF_MAX_ACTIVE_PROCESSES = "max active processes" - PF_MAX_CHILDREN_REACHED = "max children reached" - PF_SLOW_REQUESTS = "slow requests" -) - -type metric map[string]int64 -type poolStat map[string]metric - -type phpfpm struct { - Urls []string - - client *http.Client -} - -var sampleConfig = ` - # An array of addresses to gather stats about. Specify an ip or hostname - # with optional port and path. - # - # Plugin can be configured in three modes (both can be used): - # - http: the URL must start with http:// or https://, ex: - # "http://localhost/status" - # "http://192.168.130.1/status?full" - # - unixsocket: path to fpm socket, ex: - # "/var/run/php5-fpm.sock" - # "192.168.10.10:/var/run/php5-fpm-www2.sock" - # - fcgi: the URL mush start with fcgi:// or cgi://, and port must present, ex: - # "fcgi://10.0.0.12:9000/status" - # "cgi://10.0.10.12:9001/status" - # - # If no servers are specified, then default to 127.0.0.1/server-status - urls = ["http://localhost/status"] -` - -func (r *phpfpm) SampleConfig() string { - return sampleConfig -} - -func (r *phpfpm) Description() string { - return "Read metrics of phpfpm, via HTTP status page or socket(pending)" -} - -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (g *phpfpm) Gather(acc plugins.Accumulator) error { - if len(g.Urls) == 0 { - return g.gatherServer("http://127.0.0.1/status", acc) - } - - var wg sync.WaitGroup - - var outerr error - - for _, serv := range g.Urls { - wg.Add(1) - go func(serv string) { - defer wg.Done() - outerr = g.gatherServer(serv, acc) - }(serv) - } - - wg.Wait() - - return outerr -} - -// Request status page to get stat raw data -func (g *phpfpm) gatherServer(addr string, acc plugins.Accumulator) error { - if g.client == nil { - - client := &http.Client{} - g.client = client - } - - if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { - u, err := url.Parse(addr) - if err != nil { - return fmt.Errorf("Unable parse server address '%s': %s", addr, err) - } - - req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme, - u.Host, u.Path), nil) - res, err := g.client.Do(req) - if err != nil { - return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v", - addr, err) - } - - if res.StatusCode != 200 { - return fmt.Errorf("Unable to get valid stat result from '%s': %v", - addr, err) - } - - importMetric(res.Body, acc, u.Host) - } else { - var ( - fcgi *FCGIClient - fcgiAddr string - ) - if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { - u, err := url.Parse(addr) - if err != nil { - return fmt.Errorf("Unable parse server address '%s': %s", addr, err) - } - socketAddr := strings.Split(u.Host, ":") - fcgiIp := socketAddr[0] - fcgiPort, _ := strconv.Atoi(socketAddr[1]) - fcgiAddr = u.Host - fcgi, _ = NewClient(fcgiIp, fcgiPort) - } else { - socketAddr := strings.Split(addr, ":") - fcgiAddr = socketAddr[0] - fcgi, _ = NewClient("unix", socketAddr[1]) - } - resOut, resErr, err := fcgi.Request(map[string]string{ - "SCRIPT_NAME": "/status", - "SCRIPT_FILENAME": "status", - "REQUEST_METHOD": "GET", - }, "") - - if len(resErr) == 0 && err == nil { - importMetric(bytes.NewReader(resOut), acc, fcgiAddr) - } - - } - - return nil -} - -// Import HTTP stat data into Telegraf system -func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat, error) { - stats := make(poolStat) - var currentPool string - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - statLine := scanner.Text() - keyvalue := strings.Split(statLine, ":") - - if len(keyvalue) < 2 { - continue - } - fieldName := strings.Trim(keyvalue[0], " ") - // We start to gather data for a new pool here - if fieldName == PF_POOL { - currentPool = strings.Trim(keyvalue[1], " ") - stats[currentPool] = make(metric) - continue - } - - // Start to parse metric for current pool - switch fieldName { - case PF_ACCEPTED_CONN, - PF_LISTEN_QUEUE, - PF_MAX_LISTEN_QUEUE, - PF_LISTEN_QUEUE_LEN, - PF_IDLE_PROCESSES, - PF_ACTIVE_PROCESSES, - PF_TOTAL_PROCESSES, - PF_MAX_ACTIVE_PROCESSES, - PF_MAX_CHILDREN_REACHED, - PF_SLOW_REQUESTS: - fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64) - if err == nil { - stats[currentPool][fieldName] = fieldValue - } - } - } - - // Finally, we push the pool metric - for pool := range stats { - tags := map[string]string{ - "url": host, - "pool": pool, - } - for k, v := range stats[pool] { - acc.Add(strings.Replace(k, " ", "_", -1), v, tags) - } - } - - return stats, nil -} - -func init() { - plugins.Add("phpfpm", func() plugins.Plugin { - return &phpfpm{} - }) -} diff --git a/plugins/phpfpm/phpfpm_test.go b/plugins/phpfpm/phpfpm_test.go deleted file mode 100644 index 8fa192806..000000000 --- a/plugins/phpfpm/phpfpm_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package phpfpm - -import ( - "fmt" - "testing" - - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "net/http" - "net/http/httptest" -) - -func TestPhpFpmGeneratesMetrics(t *testing.T) { - //We create a fake server to return test data - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, outputSample) - })) - defer ts.Close() - - //Now we tested again above server, with our authentication data - r := &phpfpm{ - Urls: []string{ts.URL}, - } - - var acc testutil.Accumulator - - err := r.Gather(&acc) - require.NoError(t, err) - - tags := map[string]string{ - "url": ts.Listener.Addr().String(), - "pool": "www", - } - assert.NoError(t, acc.ValidateTaggedValue("accepted_conn", int64(3), tags)) - - checkInt := []struct { - name string - value int64 - }{ - {"accepted_conn", 3}, - {"listen_queue", 1}, - {"max_listen_queue", 0}, - {"listen_queue_len", 0}, - {"idle_processes", 1}, - {"active_processes", 1}, - {"total_processes", 2}, - {"max_active_processes", 1}, - {"max_children_reached", 2}, - {"slow_requests", 1}, - } - - for _, c := range checkInt { - assert.Equal(t, true, acc.CheckValue(c.name, c.value)) - } -} - -//When not passing server config, we default to localhost -//We just want to make sure we did request stat from localhost -func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{} - - var acc testutil.Accumulator - - err := r.Gather(&acc) - require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1/status") -} - -const outputSample = ` -pool: www -process manager: dynamic -start time: 11/Oct/2015:23:38:51 +0000 -start since: 1991 -accepted conn: 3 -listen queue: 1 -max listen queue: 0 -listen queue len: 0 -idle processes: 1 -active processes: 1 -total processes: 2 -max active processes: 1 -max children reached: 2 -slow requests: 1 -` diff --git a/plugins/puppetagent/puppetagent_test.go b/plugins/puppetagent/puppetagent_test.go deleted file mode 100644 index 4d6a4c5f4..000000000 --- a/plugins/puppetagent/puppetagent_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package puppetagent - -import ( - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestGather(t *testing.T) { - var acc testutil.Accumulator - - pa := PuppetAgent{ - Location: "last_run_summary.yaml", - } - pa.Gather(&acc) - - checkInt := []struct { - name string - value int64 - }{ - {"events_failure", 0}, - {"events_total", 0}, - {"events_success", 0}, - {"resources_failed", 0}, - {"resources_scheduled", 0}, - {"resources_changed", 0}, - {"resources_skipped", 0}, - {"resources_total", 109}, - {"resources_failedtorestart", 0}, - {"resources_restarted", 0}, - {"resources_outofsync", 0}, - {"changes_total", 0}, - {"time_lastrun", 1444936531}, - {"version_config", 1444936521}, - } - - for _, c := range checkInt { - assert.Equal(t, true, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"time_user", 0.004331}, - {"time_schedule", 0.001123}, - {"time_filebucket", 0.000353}, - {"time_file", 0.441472}, - {"time_exec", 0.508123}, - {"time_anchor", 0.000555}, - {"time_sshauthorizedkey", 0.000764}, - {"time_service", 1.807795}, - {"time_package", 1.325788}, - {"time_total", 8.85354707064819}, - {"time_configretrieval", 4.75567007064819}, - {"time_cron", 0.000584}, - } - - for _, f := range checkFloat { - assert.Equal(t, true, acc.CheckValue(f.name, f.value)) - } - - checkString := []struct { - name string - value string - }{ - {"version_puppet", "3.7.5"}, - } - - for _, s := range checkString { - assert.Equal(t, true, acc.CheckValue(s.name, s.value)) - } - -} diff --git a/plugins/system/docker.go b/plugins/system/docker.go deleted file mode 100644 index 94f8ad059..000000000 --- a/plugins/system/docker.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build linux - -package system - -import ( - "fmt" - - "github.com/influxdb/telegraf/plugins" -) - -type DockerStats struct { - ps PS -} - -func (_ *DockerStats) Description() string { - return "Read metrics about docker containers" -} - -func (_ *DockerStats) SampleConfig() string { return "" } - -func (s *DockerStats) Gather(acc plugins.Accumulator) error { - containers, err := s.ps.DockerStat() - if err != nil { - return fmt.Errorf("error getting docker info: %s", err) - } - - for _, cont := range containers { - tags := map[string]string{ - "id": cont.Id, - "name": cont.Name, - "command": cont.Command, - } - for k, v := range cont.Labels { - tags[k] = v - } - - cts := cont.CPU - - acc.Add("user", cts.User, tags) - acc.Add("system", cts.System, tags) - acc.Add("idle", cts.Idle, tags) - acc.Add("nice", cts.Nice, tags) - acc.Add("iowait", cts.Iowait, tags) - acc.Add("irq", cts.Irq, tags) - acc.Add("softirq", cts.Softirq, tags) - acc.Add("steal", cts.Steal, tags) - acc.Add("guest", cts.Guest, tags) - acc.Add("guest_nice", cts.GuestNice, tags) - - acc.Add("cache", cont.Mem.Cache, tags) - acc.Add("rss", cont.Mem.RSS, tags) - acc.Add("rss_huge", cont.Mem.RSSHuge, tags) - acc.Add("mapped_file", cont.Mem.MappedFile, tags) - acc.Add("swap_in", cont.Mem.Pgpgin, tags) - acc.Add("swap_out", cont.Mem.Pgpgout, tags) - acc.Add("page_fault", cont.Mem.Pgfault, tags) - acc.Add("page_major_fault", cont.Mem.Pgmajfault, tags) - acc.Add("inactive_anon", cont.Mem.InactiveAnon, tags) - acc.Add("active_anon", cont.Mem.ActiveAnon, tags) - acc.Add("inactive_file", cont.Mem.InactiveFile, tags) - acc.Add("active_file", cont.Mem.ActiveFile, tags) - acc.Add("unevictable", cont.Mem.Unevictable, tags) - acc.Add("memory_limit", cont.Mem.HierarchicalMemoryLimit, tags) - acc.Add("total_cache", cont.Mem.TotalCache, tags) - acc.Add("total_rss", cont.Mem.TotalRSS, tags) - acc.Add("total_rss_huge", cont.Mem.TotalRSSHuge, tags) - acc.Add("total_mapped_file", cont.Mem.TotalMappedFile, tags) - acc.Add("total_swap_in", cont.Mem.TotalPgpgIn, tags) - acc.Add("total_swap_out", cont.Mem.TotalPgpgOut, tags) - acc.Add("total_page_fault", cont.Mem.TotalPgFault, tags) - acc.Add("total_page_major_fault", cont.Mem.TotalPgMajFault, tags) - acc.Add("total_inactive_anon", cont.Mem.TotalInactiveAnon, tags) - acc.Add("total_active_anon", cont.Mem.TotalActiveAnon, tags) - acc.Add("total_inactive_file", cont.Mem.TotalInactiveFile, tags) - acc.Add("total_active_file", cont.Mem.TotalActiveFile, tags) - acc.Add("total_unevictable", cont.Mem.TotalUnevictable, tags) - } - - return nil -} - -func init() { - plugins.Add("docker", func() plugins.Plugin { - return &DockerStats{ps: &systemPS{}} - }) -} diff --git a/plugins/system/docker_test.go b/plugins/system/docker_test.go deleted file mode 100644 index 1fbf76d10..000000000 --- a/plugins/system/docker_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// +build linux - -package system - -import ( - "testing" - - "github.com/influxdb/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/docker" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDockerStats_GenerateStats(t *testing.T) { - var mps MockPS - var acc testutil.Accumulator - - ds := &DockerContainerStat{ - Name: "blah", - CPU: &cpu.CPUTimesStat{ - CPU: "all", - User: 3.1, - System: 8.2, - Idle: 80.1, - Nice: 1.3, - Iowait: 0.2, - Irq: 0.1, - Softirq: 0.11, - Steal: 0.0001, - Guest: 8.1, - GuestNice: 0.324, - }, - Mem: &docker.CgroupMemStat{ - ContainerID: "blah", - Cache: 1, - RSS: 2, - RSSHuge: 3, - MappedFile: 4, - Pgpgin: 5, - Pgpgout: 6, - Pgfault: 7, - Pgmajfault: 8, - InactiveAnon: 9, - ActiveAnon: 10, - InactiveFile: 11, - ActiveFile: 12, - Unevictable: 13, - HierarchicalMemoryLimit: 14, - TotalCache: 15, - TotalRSS: 16, - TotalRSSHuge: 17, - TotalMappedFile: 18, - TotalPgpgIn: 19, - TotalPgpgOut: 20, - TotalPgFault: 21, - TotalPgMajFault: 22, - TotalInactiveAnon: 23, - TotalActiveAnon: 24, - TotalInactiveFile: 25, - TotalActiveFile: 26, - TotalUnevictable: 27, - }, - } - - mps.On("DockerStat").Return([]*DockerContainerStat{ds}, nil) - - err := (&DockerStats{&mps}).Gather(&acc) - require.NoError(t, err) - - dockertags := map[string]string{ - "name": "blah", - "id": "", - "command": "", - } - - assert.True(t, acc.CheckTaggedValue("user", 3.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("system", 8.2, dockertags)) - assert.True(t, acc.CheckTaggedValue("idle", 80.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("nice", 1.3, dockertags)) - assert.True(t, acc.CheckTaggedValue("iowait", 0.2, dockertags)) - assert.True(t, acc.CheckTaggedValue("irq", 0.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("softirq", 0.11, dockertags)) - assert.True(t, acc.CheckTaggedValue("steal", 0.0001, dockertags)) - assert.True(t, acc.CheckTaggedValue("guest", 8.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("guest_nice", 0.324, dockertags)) - - assert.True(t, acc.CheckTaggedValue("cache", uint64(1), dockertags)) - assert.True(t, acc.CheckTaggedValue("rss", uint64(2), dockertags)) - assert.True(t, acc.CheckTaggedValue("rss_huge", uint64(3), dockertags)) - assert.True(t, acc.CheckTaggedValue("mapped_file", uint64(4), dockertags)) - assert.True(t, acc.CheckTaggedValue("swap_in", uint64(5), dockertags)) - assert.True(t, acc.CheckTaggedValue("swap_out", uint64(6), dockertags)) - assert.True(t, acc.CheckTaggedValue("page_fault", uint64(7), dockertags)) - assert.True(t, acc.CheckTaggedValue("page_major_fault", uint64(8), dockertags)) - assert.True(t, acc.CheckTaggedValue("inactive_anon", uint64(9), dockertags)) - assert.True(t, acc.CheckTaggedValue("active_anon", uint64(10), dockertags)) - assert.True(t, acc.CheckTaggedValue("inactive_file", uint64(11), dockertags)) - assert.True(t, acc.CheckTaggedValue("active_file", uint64(12), dockertags)) - assert.True(t, acc.CheckTaggedValue("unevictable", uint64(13), dockertags)) - assert.True(t, acc.CheckTaggedValue("memory_limit", uint64(14), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_cache", uint64(15), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_rss", uint64(16), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_rss_huge", uint64(17), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_mapped_file", uint64(18), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_swap_in", uint64(19), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_swap_out", uint64(20), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_page_fault", uint64(21), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_page_major_fault", uint64(22), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_inactive_anon", uint64(23), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_active_anon", uint64(24), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_inactive_file", uint64(25), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_active_file", uint64(26), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_unevictable", uint64(27), dockertags)) -} diff --git a/plugins/system/memory.go b/plugins/system/memory.go deleted file mode 100644 index 11e7afbb1..000000000 --- a/plugins/system/memory.go +++ /dev/null @@ -1,77 +0,0 @@ -package system - -import ( - "fmt" - - "github.com/influxdb/telegraf/plugins" -) - -type MemStats struct { - ps PS -} - -func (_ *MemStats) Description() string { - return "Read metrics about memory usage" -} - -func (_ *MemStats) SampleConfig() string { return "" } - -func (s *MemStats) Gather(acc plugins.Accumulator) error { - vm, err := s.ps.VMStat() - if err != nil { - return fmt.Errorf("error getting virtual memory info: %s", err) - } - - vmtags := map[string]string(nil) - - acc.Add("total", vm.Total, vmtags) - acc.Add("available", vm.Available, vmtags) - acc.Add("used", vm.Used, vmtags) - acc.Add("free", vm.Free, vmtags) - acc.Add("cached", vm.Cached, vmtags) - acc.Add("buffered", vm.Buffers, vmtags) - acc.Add("used_percent", 100*float64(vm.Used)/float64(vm.Total), vmtags) - acc.Add("available_percent", - 100*float64(vm.Available)/float64(vm.Total), - vmtags) - - return nil -} - -type SwapStats struct { - ps PS -} - -func (_ *SwapStats) Description() string { - return "Read metrics about swap memory usage" -} - -func (_ *SwapStats) SampleConfig() string { return "" } - -func (s *SwapStats) Gather(acc plugins.Accumulator) error { - swap, err := s.ps.SwapStat() - if err != nil { - return fmt.Errorf("error getting swap memory info: %s", err) - } - - swaptags := map[string]string(nil) - - acc.Add("total", swap.Total, swaptags) - acc.Add("used", swap.Used, swaptags) - acc.Add("free", swap.Free, swaptags) - acc.Add("used_percent", swap.UsedPercent, swaptags) - acc.Add("in", swap.Sin, swaptags) - acc.Add("out", swap.Sout, swaptags) - - return nil -} - -func init() { - plugins.Add("mem", func() plugins.Plugin { - return &MemStats{ps: &systemPS{}} - }) - - plugins.Add("swap", func() plugins.Plugin { - return &SwapStats{ps: &systemPS{}} - }) -} diff --git a/plugins/system/system_test.go b/plugins/system/system_test.go deleted file mode 100644 index fca1d2c35..000000000 --- a/plugins/system/system_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package system - -import ( - "fmt" - "reflect" - "syscall" - "testing" - - "github.com/influxdb/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/disk" - "github.com/shirou/gopsutil/mem" - "github.com/shirou/gopsutil/net" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSystemStats_GenerateStats(t *testing.T) { - var mps MockPS - - defer mps.AssertExpectations(t) - - var acc testutil.Accumulator - - cts := cpu.CPUTimesStat{ - CPU: "cpu0", - User: 3.1, - System: 8.2, - Idle: 80.1, - Nice: 1.3, - Iowait: 0.2, - Irq: 0.1, - Softirq: 0.11, - Steal: 0.0511, - Guest: 8.1, - GuestNice: 0.324, - } - - cts2 := cpu.CPUTimesStat{ - CPU: "cpu0", - User: 11.4, // increased by 8.3 - System: 10.9, // increased by 2.7 - Idle: 158.8699, // increased by 78.7699 (for total increase of 100) - Nice: 2.5, // increased by 1.2 - Iowait: 0.7, // increased by 0.5 - Irq: 1.2, // increased by 1.1 - Softirq: 0.31, // increased by 0.2 - Steal: 0.2812, // increased by 0.0001 - Guest: 12.9, // increased by 4.8 - GuestNice: 2.524, // increased by 2.2 - } - - mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil) - - du := []*disk.DiskUsageStat{ - { - Path: "/", - Fstype: "ext4", - Total: 128, - Free: 23, - InodesTotal: 1234, - InodesFree: 234, - }, - { - Path: "/home", - Fstype: "ext4", - Total: 256, - Free: 46, - InodesTotal: 2468, - InodesFree: 468, - }, - } - - mps.On("DiskUsage").Return(du, nil) - - diskio1 := disk.DiskIOCountersStat{ - - ReadCount: 888, - WriteCount: 5341, - ReadBytes: 100000, - WriteBytes: 200000, - ReadTime: 7123, - WriteTime: 9087, - Name: "sda1", - IoTime: 123552, - SerialNumber: "ab-123-ad", - } - diskio2 := disk.DiskIOCountersStat{ - ReadCount: 444, - WriteCount: 2341, - ReadBytes: 200000, - WriteBytes: 400000, - ReadTime: 3123, - WriteTime: 6087, - Name: "sdb1", - IoTime: 246552, - SerialNumber: "bb-123-ad", - } - - mps.On("DiskIO").Return(map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, nil) - - netio := net.NetIOCountersStat{ - Name: "eth0", - BytesSent: 1123, - BytesRecv: 8734422, - PacketsSent: 781, - PacketsRecv: 23456, - Errin: 832, - Errout: 8, - Dropin: 7, - Dropout: 1, - } - - mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil) - - netprotos := []net.NetProtoCountersStat{ - net.NetProtoCountersStat{ - Protocol: "Udp", - Stats: map[string]int64{ - "InDatagrams": 4655, - "NoPorts": 892592, - }, - }, - } - mps.On("NetProto").Return(netprotos, nil) - - vms := &mem.VirtualMemoryStat{ - Total: 12400, - Available: 7600, - Used: 5000, - Free: 1235, - // Active: 8134, - // Inactive: 1124, - // Buffers: 771, - // Cached: 4312, - // Wired: 134, - // Shared: 2142, - } - - mps.On("VMStat").Return(vms, nil) - - sms := &mem.SwapMemoryStat{ - Total: 8123, - Used: 1232, - Free: 6412, - UsedPercent: 12.2, - Sin: 7, - Sout: 830, - } - - mps.On("SwapStat").Return(sms, nil) - - netstats := []net.NetConnectionStat{ - net.NetConnectionStat{ - Type: syscall.SOCK_DGRAM, - }, - net.NetConnectionStat{ - Status: "ESTABLISHED", - }, - net.NetConnectionStat{ - Status: "ESTABLISHED", - }, - net.NetConnectionStat{ - Status: "CLOSE", - }, - } - - mps.On("NetConnections").Return(netstats, nil) - - cs := NewCPUStats(&mps) - - cputags := map[string]string{ - "cpu": "cpu0", - } - - preCPUPoints := len(acc.Points) - err := cs.Gather(&acc) - require.NoError(t, err) - numCPUPoints := len(acc.Points) - preCPUPoints - - expectedCPUPoints := 10 - assert.Equal(t, expectedCPUPoints, numCPUPoints) - - // Computed values are checked with delta > 0 becasue of floating point arithmatic - // imprecision - assertContainsTaggedFloat(t, &acc, "time_user", 3.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_iowait", 0.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_irq", 0.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest", 8.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0, cputags) - - mps2 := MockPS{} - mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil) - cs.ps = &mps2 - - // Should have added cpu percentages too - err = cs.Gather(&acc) - require.NoError(t, err) - - numCPUPoints = len(acc.Points) - (preCPUPoints + numCPUPoints) - expectedCPUPoints = 20 - assert.Equal(t, expectedCPUPoints, numCPUPoints) - - assertContainsTaggedFloat(t, &acc, "time_user", 11.4, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_idle", 158.8699, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_nice", 2.5, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_iowait", 0.7, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest", 12.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0, cputags) - - assertContainsTaggedFloat(t, &acc, "usage_user", 8.3, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_idle", 78.7699, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_nice", 1.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.5, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_irq", 1.1, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_guest", 4.8, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005, cputags) - - preDiskPoints := len(acc.Points) - - err = (&DiskStats{ps: &mps}).Gather(&acc) - require.NoError(t, err) - - numDiskPoints := len(acc.Points) - preDiskPoints - expectedAllDiskPoints := 12 - assert.Equal(t, expectedAllDiskPoints, numDiskPoints) - - tags1 := map[string]string{ - "path": "/", - "fstype": "ext4", - } - tags2 := map[string]string{ - "path": "/home", - "fstype": "ext4", - } - - assert.True(t, acc.CheckTaggedValue("total", uint64(128), tags1)) - assert.True(t, acc.CheckTaggedValue("used", uint64(105), tags1)) - assert.True(t, acc.CheckTaggedValue("free", uint64(23), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(1234), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(234), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(1000), tags1)) - assert.True(t, acc.CheckTaggedValue("total", uint64(256), tags2)) - assert.True(t, acc.CheckTaggedValue("used", uint64(210), tags2)) - assert.True(t, acc.CheckTaggedValue("free", uint64(46), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(2468), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(468), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(2000), tags2)) - - // We expect 6 more DiskPoints to show up with an explicit match on "/" - // and /home not matching the /dev in Mountpoints - err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, preDiskPoints+expectedAllDiskPoints+6, len(acc.Points)) - - // We should see all the diskpoints as Mountpoints includes both - // / and /home - err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, preDiskPoints+2*expectedAllDiskPoints+6, len(acc.Points)) - - err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) - require.NoError(t, err) - - ntags := map[string]string{ - "interface": "eth0", - } - - assert.NoError(t, acc.ValidateTaggedValue("bytes_sent", uint64(1123), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("bytes_recv", uint64(8734422), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_sent", uint64(781), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_recv", uint64(23456), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("err_in", uint64(832), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("err_out", uint64(8), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("drop_in", uint64(7), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("drop_out", uint64(1), ntags)) - assert.NoError(t, acc.ValidateValue("udp_noports", int64(892592))) - assert.NoError(t, acc.ValidateValue("udp_indatagrams", int64(4655))) - - preDiskIOPoints := len(acc.Points) - - err = (&DiskIOStats{ps: &mps}).Gather(&acc) - require.NoError(t, err) - - numDiskIOPoints := len(acc.Points) - preDiskIOPoints - expectedAllDiskIOPoints := 14 - assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints) - - dtags1 := map[string]string{ - "name": "sda1", - "serial": "ab-123-ad", - } - dtags2 := map[string]string{ - "name": "sdb1", - "serial": "bb-123-ad", - } - - assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1)) - assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2)) - - // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1" - // and serial should be missing from the tags with SkipSerialNumber set - err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc) - assert.Equal(t, preDiskIOPoints+expectedAllDiskIOPoints+7, len(acc.Points)) - - dtags3 := map[string]string{ - "name": "sdb1", - } - - assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3)) - - err = (&MemStats{&mps}).Gather(&acc) - require.NoError(t, err) - - vmtags := map[string]string(nil) - - assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags)) - assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags)) - assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags)) - assert.True(t, acc.CheckTaggedValue("available_percent", - float64(7600)/float64(12400)*100, - vmtags)) - assert.True(t, acc.CheckTaggedValue("used_percent", - float64(5000)/float64(12400)*100, - vmtags)) - assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags)) - - acc.Points = nil - - err = (&SwapStats{&mps}).Gather(&acc) - require.NoError(t, err) - - swaptags := map[string]string(nil) - - assert.NoError(t, acc.ValidateTaggedValue("total", uint64(8123), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("used", uint64(1232), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("used_percent", float64(12.2), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags)) - - acc.Points = nil - - err = (&NetStats{&mps}).Gather(&acc) - require.NoError(t, err) - netstattags := map[string]string(nil) - - assert.NoError(t, acc.ValidateTaggedValue("tcp_established", 2, netstattags)) - assert.NoError(t, acc.ValidateTaggedValue("tcp_close", 1, netstattags)) - assert.NoError(t, acc.ValidateTaggedValue("udp_socket", 1, netstattags)) - -} - -// Asserts that a given accumulator contains a measurment of type float64 with -// specific tags within a certain distance of a given expected value. Asserts a failure -// if the measurement is of the wrong type, or if no matching measurements are found -// -// Paramaters: -// t *testing.T : Testing object to use -// acc testutil.Accumulator: Accumulator to examine -// measurement string : Name of the measurement to examine -// expectedValue float64 : Value to search for within the measurement -// delta float64 : Maximum acceptable distance of an accumulated value -// from the expectedValue parameter. Useful when -// floating-point arithmatic imprecision makes looking -// for an exact match impractical -// tags map[string]string : Tag set the found measurement must have. Set to nil to -// ignore the tag set. -func assertContainsTaggedFloat( - t *testing.T, - acc *testutil.Accumulator, - measurement string, - expectedValue float64, - delta float64, - tags map[string]string, -) { - var actualValue float64 - for _, pt := range acc.Points { - if pt.Measurement == measurement { - if (tags == nil) || reflect.DeepEqual(pt.Tags, tags) { - if value, ok := pt.Fields["value"].(float64); ok { - actualValue = value - if (value >= expectedValue-delta) && (value <= expectedValue+delta) { - // Found the point, return without failing - return - } - } else { - assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", - measurement)) - } - - } - } - } - msg := fmt.Sprintf("Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", - measurement, delta, expectedValue, actualValue) - assert.Fail(t, msg) -} diff --git a/plugins/twemproxy/twemproxy_test.go b/plugins/twemproxy/twemproxy_test.go deleted file mode 100644 index c941cc197..000000000 --- a/plugins/twemproxy/twemproxy_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package twemproxy - -import ( - "encoding/json" - "net" - "testing" - - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const sampleAddr = "127.0.0.1:22222" - -const sampleStats = `{ - "total_connections": 276448, - "uptime": 160657, - "version": "0.4.1", - "service": "nutcracker", - "curr_connections": 1322, - "source": "server1.website.com", - "demo": { - "client_connections": 1305, - "forward_error": 11684, - "client_err": 147942, - "server_ejects": 0, - "fragments": 0, - "client_eof": 126813, - "10.16.29.1:6379": { - "requests": 43604566, - "server_eof": 0, - "out_queue": 0, - "server_err": 0, - "out_queue_bytes": 0, - "in_queue": 0, - "server_timedout": 24, - "request_bytes": 2775840400, - "server_connections": 1, - "response_bytes": 7663182096, - "in_queue_bytes": 0, - "server_ejected_at": 0, - "responses": 43603900 - }, - "10.16.29.2:6379": { - "requests": 37870211, - "server_eof": 0, - "out_queue": 0, - "server_err": 0, - "out_queue_bytes": 0, - "in_queue": 0, - "server_timedout": 25, - "request_bytes": 2412114759, - "server_connections": 1, - "response_bytes": 5228980582, - "in_queue_bytes": 0, - "server_ejected_at": 0, - "responses": 37869551 - } - }, - "timestamp": 1447312436 -}` - -func mockTwemproxyServer() (net.Listener, error) { - listener, err := net.Listen("tcp", sampleAddr) - if err != nil { - return nil, err - } - go func(l net.Listener) { - for { - conn, _ := l.Accept() - conn.Write([]byte(sampleStats)) - conn.Close() - break - } - }(listener) - - return listener, nil -} - -func TestGather(t *testing.T) { - mockServer, err := mockTwemproxyServer() - if err != nil { - panic(err) - } - defer mockServer.Close() - - twemproxy := &Twemproxy{ - Instances: []TwemproxyInstance{ - TwemproxyInstance{ - Addr: sampleAddr, - Pools: []string{"demo"}, - }, - }, - } - - var acc testutil.Accumulator - err = twemproxy.Instances[0].Gather(&acc) - require.NoError(t, err) - - var sourceData map[string]interface{} - if err := json.Unmarshal([]byte(sampleStats), &sourceData); err != nil { - panic(err) - } - - metrics := []string{"total_connections", "curr_connections", "timestamp"} - tags := map[string]string{ - "twemproxy": sampleAddr, - "source": sourceData["source"].(string), - } - for _, m := range metrics { - assert.NoError(t, acc.ValidateTaggedValue(m, sourceData[m].(float64), tags)) - } - - poolName := "demo" - poolMetrics := []string{ - "client_connections", "forward_error", "client_err", "server_ejects", - "fragments", "client_eof", - } - tags["pool"] = poolName - poolData := sourceData[poolName].(map[string]interface{}) - for _, m := range poolMetrics { - measurement := poolName + "_" + m - assert.NoError(t, acc.ValidateTaggedValue(measurement, poolData[m].(float64), tags)) - } - poolServers := []string{"10.16.29.1:6379", "10.16.29.2:6379"} - for _, s := range poolServers { - tags["server"] = s - serverData := poolData[s].(map[string]interface{}) - for k, v := range serverData { - measurement := poolName + "_" + k - assert.NoError(t, acc.ValidateTaggedValue(measurement, v, tags)) - } - } -} diff --git a/plugins/zfs/zfs_test.go b/plugins/zfs/zfs_test.go deleted file mode 100644 index c81e4889a..000000000 --- a/plugins/zfs/zfs_test.go +++ /dev/null @@ -1,717 +0,0 @@ -package zfs - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const arcstatsContents = `5 1 0x01 86 4128 23617128247 12081618582809582 -name type data -hits 4 5968846374 -misses 4 1659178751 -demand_data_hits 4 4860247322 -demand_data_misses 4 501499535 -demand_metadata_hits 4 708608325 -demand_metadata_misses 4 156591375 -prefetch_data_hits 4 367047144 -prefetch_data_misses 4 974529898 -prefetch_metadata_hits 4 32943583 -prefetch_metadata_misses 4 26557943 -mru_hits 4 301176811 -mru_ghost_hits 4 47066067 -mfu_hits 4 5520612438 -mfu_ghost_hits 4 45784009 -deleted 4 1718937704 -recycle_miss 4 481222994 -mutex_miss 4 20575623 -evict_skip 4 14655903906543 -evict_l2_cached 4 145310202998272 -evict_l2_eligible 4 16345402777088 -evict_l2_ineligible 4 7437226893312 -hash_elements 4 36617980 -hash_elements_max 4 36618318 -hash_collisions 4 554145157 -hash_chains 4 4187651 -hash_chain_max 4 26 -p 4 13963222064 -c 4 16381258376 -c_min 4 4194304 -c_max 4 16884125696 -size 4 16319887096 -hdr_size 4 42567864 -data_size 4 60066304 -meta_size 4 1701534208 -other_size 4 1661543168 -anon_size 4 94720 -anon_evict_data 4 0 -anon_evict_metadata 4 0 -mru_size 4 973099008 -mru_evict_data 4 9175040 -mru_evict_metadata 4 32768 -mru_ghost_size 4 32768 -mru_ghost_evict_data 4 0 -mru_ghost_evict_metadata 4 32768 -mfu_size 4 788406784 -mfu_evict_data 4 50881024 -mfu_evict_metadata 4 81920 -mfu_ghost_size 4 0 -mfu_ghost_evict_data 4 0 -mfu_ghost_evict_metadata 4 0 -l2_hits 4 573868618 -l2_misses 4 1085309718 -l2_feeds 4 12182087 -l2_rw_clash 4 9610 -l2_read_bytes 4 32695938336768 -l2_write_bytes 4 2826774778880 -l2_writes_sent 4 4267687 -l2_writes_done 4 4267687 -l2_writes_error 4 0 -l2_writes_hdr_miss 4 164 -l2_evict_lock_retry 4 5 -l2_evict_reading 4 0 -l2_free_on_write 4 1606914 -l2_cdata_free_on_write 4 1775 -l2_abort_lowmem 4 83462 -l2_cksum_bad 4 393860640 -l2_io_error 4 53881460 -l2_size 4 2471466648576 -l2_asize 4 2461690072064 -l2_hdr_size 4 12854175552 -l2_compress_successes 4 12184849 -l2_compress_zeros 4 0 -l2_compress_failures 4 0 -memory_throttle_count 4 0 -duplicate_buffers 4 0 -duplicate_buffers_size 4 0 -duplicate_reads 4 0 -memory_direct_count 4 5159942 -memory_indirect_count 4 3034640 -arc_no_grow 4 0 -arc_tempreserve 4 0 -arc_loaned_bytes 4 0 -arc_prune 4 114554259559 -arc_meta_used 4 16259820792 -arc_meta_limit 4 12663094272 -arc_meta_max 4 18327165696 -` - -const zfetchstatsContents = `3 1 0x01 11 528 23607270446 12081656848148208 -name type data -hits 4 7812959060 -misses 4 4154484207 -colinear_hits 4 1366368 -colinear_misses 4 4153117839 -stride_hits 4 7309776732 -stride_misses 4 222766182 -reclaim_successes 4 107788388 -reclaim_failures 4 4045329451 -streams_resets 4 20989756 -streams_noresets 4 503182328 -bogus_streams 4 0 -` -const vdev_cache_statsContents = `7 1 0x01 3 144 23617323692 12081684236238879 -name type data -delegations 4 0 -hits 4 0 -misses 4 0 -` -const pool_ioContents = `11 3 0x00 1 80 2225326830828 32953476980628 -nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt -1884160 6450688 22 978 272187126 2850519036 2263669418655 424226814 2850519036 2263669871823 0 0 -` - -var testKstatPath = os.TempDir() + "/telegraf/proc/spl/kstat/zfs" - -type metrics struct { - name string - value int64 -} - -func TestZfsPoolMetrics(t *testing.T) { - err := os.MkdirAll(testKstatPath, 0755) - require.NoError(t, err) - - err = os.MkdirAll(testKstatPath+"/HOME", 0755) - require.NoError(t, err) - - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(pool_ioContents), 0644) - require.NoError(t, err) - - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) - require.NoError(t, err) - - poolMetrics := getPoolMetrics() - - var acc testutil.Accumulator - - //one pool, all metrics - tags := map[string]string{ - "pool": "HOME", - } - - z := &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} - err = z.Gather(&acc) - require.NoError(t, err) - - for _, metric := range poolMetrics { - assert.True(t, !acc.HasIntValue(metric.name), metric.name) - assert.True(t, !acc.CheckTaggedValue(metric.name, metric.value, tags)) - } - - z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}, PoolMetrics: true} - err = z.Gather(&acc) - require.NoError(t, err) - - for _, metric := range poolMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } - - err = os.RemoveAll(os.TempDir() + "/telegraf") - require.NoError(t, err) -} - -func TestZfsGeneratesMetrics(t *testing.T) { - err := os.MkdirAll(testKstatPath, 0755) - require.NoError(t, err) - - err = os.MkdirAll(testKstatPath+"/HOME", 0755) - require.NoError(t, err) - - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) - require.NoError(t, err) - - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) - require.NoError(t, err) - - err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) - require.NoError(t, err) - - err = ioutil.WriteFile(testKstatPath+"/vdev_cache_stats", []byte(vdev_cache_statsContents), 0644) - require.NoError(t, err) - - intMetrics := getKstatMetricsAll() - - var acc testutil.Accumulator - - //one pool, all metrics - tags := map[string]string{ - "pools": "HOME", - } - - z := &Zfs{KstatPath: testKstatPath} - err = z.Gather(&acc) - require.NoError(t, err) - - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } - - //two pools, all metrics - err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) - require.NoError(t, err) - - err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) - require.NoError(t, err) - - tags = map[string]string{ - "pools": "HOME::STORAGE", - } - - z = &Zfs{KstatPath: testKstatPath} - acc = testutil.Accumulator{} - err = z.Gather(&acc) - require.NoError(t, err) - - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } - - intMetrics = getKstatMetricsArcOnly() - - //two pools, one metric - z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} - acc = testutil.Accumulator{} - err = z.Gather(&acc) - require.NoError(t, err) - - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } - - err = os.RemoveAll(os.TempDir() + "/telegraf") - require.NoError(t, err) -} - -func getKstatMetricsArcOnly() []*metrics { - return []*metrics{ - { - name: "arcstats_hits", - value: 5968846374, - }, - { - name: "arcstats_misses", - value: 1659178751, - }, - { - name: "arcstats_demand_data_hits", - value: 4860247322, - }, - { - name: "arcstats_demand_data_misses", - value: 501499535, - }, - { - name: "arcstats_demand_metadata_hits", - value: 708608325, - }, - { - name: "arcstats_demand_metadata_misses", - value: 156591375, - }, - { - name: "arcstats_prefetch_data_hits", - value: 367047144, - }, - { - name: "arcstats_prefetch_data_misses", - value: 974529898, - }, - { - name: "arcstats_prefetch_metadata_hits", - value: 32943583, - }, - { - name: "arcstats_prefetch_metadata_misses", - value: 26557943, - }, - { - name: "arcstats_mru_hits", - value: 301176811, - }, - { - name: "arcstats_mru_ghost_hits", - value: 47066067, - }, - { - name: "arcstats_mfu_hits", - value: 5520612438, - }, - { - name: "arcstats_mfu_ghost_hits", - value: 45784009, - }, - { - name: "arcstats_deleted", - value: 1718937704, - }, - { - name: "arcstats_recycle_miss", - value: 481222994, - }, - { - name: "arcstats_mutex_miss", - value: 20575623, - }, - { - name: "arcstats_evict_skip", - value: 14655903906543, - }, - { - name: "arcstats_evict_l2_cached", - value: 145310202998272, - }, - { - name: "arcstats_evict_l2_eligible", - value: 16345402777088, - }, - { - name: "arcstats_evict_l2_ineligible", - value: 7437226893312, - }, - { - name: "arcstats_hash_elements", - value: 36617980, - }, - { - name: "arcstats_hash_elements_max", - value: 36618318, - }, - { - name: "arcstats_hash_collisions", - value: 554145157, - }, - { - name: "arcstats_hash_chains", - value: 4187651, - }, - { - name: "arcstats_hash_chain_max", - value: 26, - }, - { - name: "arcstats_p", - value: 13963222064, - }, - { - name: "arcstats_c", - value: 16381258376, - }, - { - name: "arcstats_c_min", - value: 4194304, - }, - { - name: "arcstats_c_max", - value: 16884125696, - }, - { - name: "arcstats_size", - value: 16319887096, - }, - { - name: "arcstats_hdr_size", - value: 42567864, - }, - { - name: "arcstats_data_size", - value: 60066304, - }, - { - name: "arcstats_meta_size", - value: 1701534208, - }, - { - name: "arcstats_other_size", - value: 1661543168, - }, - { - name: "arcstats_anon_size", - value: 94720, - }, - { - name: "arcstats_anon_evict_data", - value: 0, - }, - { - name: "arcstats_anon_evict_metadata", - value: 0, - }, - { - name: "arcstats_mru_size", - value: 973099008, - }, - { - name: "arcstats_mru_evict_data", - value: 9175040, - }, - { - name: "arcstats_mru_evict_metadata", - value: 32768, - }, - { - name: "arcstats_mru_ghost_size", - value: 32768, - }, - { - name: "arcstats_mru_ghost_evict_data", - value: 0, - }, - { - name: "arcstats_mru_ghost_evict_metadata", - value: 32768, - }, - { - name: "arcstats_mfu_size", - value: 788406784, - }, - { - name: "arcstats_mfu_evict_data", - value: 50881024, - }, - { - name: "arcstats_mfu_evict_metadata", - value: 81920, - }, - { - name: "arcstats_mfu_ghost_size", - value: 0, - }, - { - name: "arcstats_mfu_ghost_evict_data", - value: 0, - }, - { - name: "arcstats_mfu_ghost_evict_metadata", - value: 0, - }, - { - name: "arcstats_l2_hits", - value: 573868618, - }, - { - name: "arcstats_l2_misses", - value: 1085309718, - }, - { - name: "arcstats_l2_feeds", - value: 12182087, - }, - { - name: "arcstats_l2_rw_clash", - value: 9610, - }, - { - name: "arcstats_l2_read_bytes", - value: 32695938336768, - }, - { - name: "arcstats_l2_write_bytes", - value: 2826774778880, - }, - { - name: "arcstats_l2_writes_sent", - value: 4267687, - }, - { - name: "arcstats_l2_writes_done", - value: 4267687, - }, - { - name: "arcstats_l2_writes_error", - value: 0, - }, - { - name: "arcstats_l2_writes_hdr_miss", - value: 164, - }, - { - name: "arcstats_l2_evict_lock_retry", - value: 5, - }, - { - name: "arcstats_l2_evict_reading", - value: 0, - }, - { - name: "arcstats_l2_free_on_write", - value: 1606914, - }, - { - name: "arcstats_l2_cdata_free_on_write", - value: 1775, - }, - { - name: "arcstats_l2_abort_lowmem", - value: 83462, - }, - { - name: "arcstats_l2_cksum_bad", - value: 393860640, - }, - { - name: "arcstats_l2_io_error", - value: 53881460, - }, - { - name: "arcstats_l2_size", - value: 2471466648576, - }, - { - name: "arcstats_l2_asize", - value: 2461690072064, - }, - { - name: "arcstats_l2_hdr_size", - value: 12854175552, - }, - { - name: "arcstats_l2_compress_successes", - value: 12184849, - }, - { - name: "arcstats_l2_compress_zeros", - value: 0, - }, - { - name: "arcstats_l2_compress_failures", - value: 0, - }, - { - name: "arcstats_memory_throttle_count", - value: 0, - }, - { - name: "arcstats_duplicate_buffers", - value: 0, - }, - { - name: "arcstats_duplicate_buffers_size", - value: 0, - }, - { - name: "arcstats_duplicate_reads", - value: 0, - }, - { - name: "arcstats_memory_direct_count", - value: 5159942, - }, - { - name: "arcstats_memory_indirect_count", - value: 3034640, - }, - { - name: "arcstats_arc_no_grow", - value: 0, - }, - { - name: "arcstats_arc_tempreserve", - value: 0, - }, - { - name: "arcstats_arc_loaned_bytes", - value: 0, - }, - { - name: "arcstats_arc_prune", - value: 114554259559, - }, - { - name: "arcstats_arc_meta_used", - value: 16259820792, - }, - { - name: "arcstats_arc_meta_limit", - value: 12663094272, - }, - { - name: "arcstats_arc_meta_max", - value: 18327165696, - }, - } -} - -func getKstatMetricsAll() []*metrics { - otherMetrics := []*metrics{ - { - name: "zfetchstats_hits", - value: 7812959060, - }, - { - name: "zfetchstats_misses", - value: 4154484207, - }, - { - name: "zfetchstats_colinear_hits", - value: 1366368, - }, - { - name: "zfetchstats_colinear_misses", - value: 4153117839, - }, - { - name: "zfetchstats_stride_hits", - value: 7309776732, - }, - { - name: "zfetchstats_stride_misses", - value: 222766182, - }, - { - name: "zfetchstats_reclaim_successes", - value: 107788388, - }, - { - name: "zfetchstats_reclaim_failures", - value: 4045329451, - }, - { - name: "zfetchstats_streams_resets", - value: 20989756, - }, - { - name: "zfetchstats_streams_noresets", - value: 503182328, - }, - { - name: "zfetchstats_bogus_streams", - value: 0, - }, - { - name: "vdev_cache_stats_delegations", - value: 0, - }, - { - name: "vdev_cache_stats_hits", - value: 0, - }, - { - name: "vdev_cache_stats_misses", - value: 0, - }, - } - - return append(getKstatMetricsArcOnly(), otherMetrics...) -} - -func getPoolMetrics() []*metrics { - return []*metrics{ - { - name: "nread", - value: 1884160, - }, - { - name: "nwritten", - value: 6450688, - }, - { - name: "reads", - value: 22, - }, - { - name: "writes", - value: 978, - }, - { - name: "wtime", - value: 272187126, - }, - { - name: "wlentime", - value: 2850519036, - }, - { - name: "wupdate", - value: 2263669418655, - }, - { - name: "rtime", - value: 424226814, - }, - { - name: "rlentime", - value: 2850519036, - }, - { - name: "rupdate", - value: 2263669871823, - }, - { - name: "wcnt", - value: 0, - }, - { - name: "rcnt", - value: 0, - }, - } -} diff --git a/scripts/Vagrantfile b/scripts/Vagrantfile index 3c0199bdb..a04450d6d 100644 --- a/scripts/Vagrantfile +++ b/scripts/Vagrantfile @@ -7,7 +7,7 @@ VAGRANTFILE_API_VERSION = "2" Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.box = "ubuntu/trusty64" - config.vm.synced_folder "..", "/home/vagrant/go/src/github.com/influxdb/telegraf", + config.vm.synced_folder "..", "/home/vagrant/go/src/github.com/influxdata/telegraf", type: "rsync", rsync__args: ["--verbose", "--archive", "--delete", "-z", "--safe-links"], rsync__exclude: ["./telegraf", ".vagrant/"] @@ -26,7 +26,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| gvm use go1.4.2 --default echo "export PATH=$PATH:$GOPATH/bin" >> "$HOME/.bashrc" echo 'export GOPATH=/home/vagrant/go' >> "$HOME/.bashrc" - cd "$HOME/go/src/github.com/influxdb/telegraf" &&\ + cd "$HOME/go/src/github.com/influxdata/telegraf" &&\ rm -rf Godeps/_workspace/pkg &&\ GOPATH="$HOME/go" make SHELL diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index f00ac7d00..bbad51506 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -34,7 +34,7 @@ export GOPATH=$BUILD_DIR # Turning off GOGC speeds up build times export GOGC=off export PATH=$GOPATH/bin:$PATH -exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb +exit_if_fail mkdir -p $GOPATH/src/github.com/influxdata # Dump some test config to the log. echo "Test configuration" @@ -44,8 +44,8 @@ echo "\$GOPATH: $GOPATH" echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH" # Move the checked-out source to a better location -exit_if_fail mv $HOME/telegraf $GOPATH/src/github.com/influxdb -exit_if_fail cd $GOPATH/src/github.com/influxdb/telegraf +exit_if_fail mv $HOME/telegraf $GOPATH/src/github.com/influxdata +exit_if_fail cd $GOPATH/src/github.com/influxdata/telegraf # Verify that go fmt has been run check_go_fmt @@ -66,6 +66,6 @@ exit_if_fail "./telegraf -version | grep $VERSION" tmpdir=$(mktemp -d) ./telegraf -sample-config > $tmpdir/config.toml exit_if_fail ./telegraf -config $tmpdir/config.toml \ - -test -filter cpu:mem + -test -input-filter cpu:mem exit $rc diff --git a/scripts/init.sh b/scripts/init.sh index 91e9b47b3..81932bb48 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -51,7 +51,6 @@ if [ ! -f "$STDERR" ]; then mkdir -p `dirname $STDERR` fi - OPEN_FILE_LIMIT=65536 function pidofproc() { @@ -98,7 +97,7 @@ function log_success_msg() { name=telegraf # Daemon name, where is the actual executable -daemon=/opt/telegraf/telegraf +daemon=/usr/bin/telegraf # pid file for the daemon pidfile=/var/run/telegraf/telegraf.pid @@ -106,12 +105,12 @@ piddir=`dirname $pidfile` if [ ! -d "$piddir" ]; then mkdir -p $piddir - chown $GROUP:$USER $piddir + chown $USER:$GROUP $piddir fi # Configuration file -config=/etc/opt/telegraf/telegraf.conf -confdir=/etc/opt/telegraf/telegraf.d +config=/etc/telegraf/telegraf.conf +confdir=/etc/telegraf/telegraf.d # If the daemon is not there, then exit. [ -x $daemon ] || exit 5 @@ -137,9 +136,9 @@ case $1 in log_success_msg "Starting the process" "$name" if which start-stop-daemon > /dev/null 2>&1; then - start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -configdirectory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & + start-stop-daemon --chuid $USER:$GROUP --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & else - nohup $daemon -pidfile $pidfile -config $config -configdirectory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & + nohup $daemon -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & fi log_success_msg "$name process was started" ;; diff --git a/scripts/package.sh b/scripts/package.sh deleted file mode 100755 index fbbf39eb8..000000000 --- a/scripts/package.sh +++ /dev/null @@ -1,365 +0,0 @@ -#!/usr/bin/env bash - -########################################################################### -# Packaging script which creates debian and RPM packages. It optionally -# tags the repo with the given version. -# -# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS -# CLI tools must also be installed. -# -# https://github.com/jordansissel/fpm -# http://aws.amazon.com/cli/ -# -# Packaging process: to package a build, simply execute: -# -# package.sh -# -# The script will automatically determined the version number from git using -# `git describe --always --tags` -# -# AWS upload: the script will also offer to upload the packages to S3. If -# this option is selected, the credentials should be present in the file -# ~/aws.conf. The contents should be of the form: -# -# [default] -# aws_access_key_id= -# aws_secret_access_key= -# region = us-east-1 -# -# Trim the leading spaces when creating the file. The script will exit if -# S3 upload is requested, but this file does not exist. - -AWS_FILE=~/aws.conf - -INSTALL_ROOT_DIR=/opt/telegraf -TELEGRAF_LOG_DIR=/var/log/telegraf -CONFIG_ROOT_DIR=/etc/opt/telegraf -CONFIG_D_DIR=/etc/opt/telegraf/telegraf.d -LOGROTATE_DIR=/etc/logrotate.d - -SAMPLE_CONFIGURATION=etc/telegraf.conf -LOGROTATE_CONFIGURATION=etc/logrotate.d/telegraf -INITD_SCRIPT=scripts/init.sh -SYSTEMD_SCRIPT=scripts/telegraf.service - -TMP_WORK_DIR=`mktemp -d` -POST_INSTALL_PATH=`mktemp` -ARCH=`uname -i` -LICENSE=MIT -URL=influxdb.com -MAINTAINER=support@influxdb.com -VENDOR=InfluxDB -DESCRIPTION="InfluxDB Telegraf agent" -PKG_DEPS=(coreutils) -GO_VERSION="go1.5" -GOPATH_INSTALL= -BINS=( - telegraf - ) - -########################################################################### -# Helper functions. - -# usage prints simple usage information. -usage() { - echo -e "$0\n" - cleanup_exit $1 -} - -# make_dir_tree creates the directory structure within the packages. -make_dir_tree() { - work_dir=$1 - version=$2 - mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts - if [ $? -ne 0 ]; then - echo "Failed to create installation directory -- aborting." - cleanup_exit 1 - fi - mkdir -p $work_dir/$CONFIG_ROOT_DIR - if [ $? -ne 0 ]; then - echo "Failed to create configuration directory -- aborting." - cleanup_exit 1 - fi - mkdir -p $work_dir/$CONFIG_D_DIR - if [ $? -ne 0 ]; then - echo "Failed to create configuration subdirectory -- aborting." - cleanup_exit 1 - fi - mkdir -p $work_dir/$LOGROTATE_DIR - if [ $? -ne 0 ]; then - echo "Failed to create logrotate directory -- aborting." - cleanup_exit 1 - fi - -} - -# cleanup_exit removes all resources created during the process and exits with -# the supplied returned code. -cleanup_exit() { - rm -r $TMP_WORK_DIR - rm $POST_INSTALL_PATH - exit $1 -} - -# check_gopath sanity checks the value of the GOPATH env variable, and determines -# the path where build artifacts are installed. GOPATH may be a colon-delimited -# list of directories. -check_gopath() { - [ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1 - GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1` - [ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1 - echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation." -} - -# check_clean_tree ensures that no source file is locally modified. -check_clean_tree() { - modified=$(git ls-files --modified | wc -l) - if [ $modified -ne 0 ]; then - echo "The source tree is not clean -- aborting." - cleanup_exit 1 - fi - echo "Git tree is clean." -} - -# do_build builds the code. The version and commit must be passed in. -do_build() { - version=$1 - commit=`git rev-parse HEAD` - if [ $? -ne 0 ]; then - echo "Unable to retrieve current commit -- aborting" - cleanup_exit 1 - fi - - for b in ${BINS[*]}; do - rm -f $GOPATH_INSTALL/bin/$b - done - - gdm restore - go install -ldflags="-X main.Version $version" ./... - if [ $? -ne 0 ]; then - echo "Build failed, unable to create package -- aborting" - cleanup_exit 1 - fi - echo "Build completed successfully." -} - -# generate_postinstall_script creates the post-install script for the -# package. It must be passed the version. -generate_postinstall_script() { - version=$1 - cat <$POST_INSTALL_PATH -#!/bin/sh -rm -f $INSTALL_ROOT_DIR/telegraf -rm -f $INSTALL_ROOT_DIR/init.sh -ln -sfn $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf - -if ! id telegraf >/dev/null 2>&1; then - useradd --help 2>&1| grep -- --system > /dev/null 2>&1 - old_useradd=\$? - if [ \$old_useradd -eq 0 ] - then - useradd --system -U -M telegraf - else - groupadd telegraf && useradd -M -g telegraf telegraf - fi -fi - -# Systemd -if which systemctl > /dev/null 2>&1 ; then - cp $INSTALL_ROOT_DIR/versions/$version/scripts/telegraf.service \ - /lib/systemd/system/telegraf.service - systemctl enable telegraf - - # restart on upgrade of package - if [ "$#" -eq 2 ]; then - systemctl restart telegraf - fi - -# Sysv -else - ln -sfn $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh \ - $INSTALL_ROOT_DIR/init.sh - rm -f /etc/init.d/telegraf - ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf - chmod +x /etc/init.d/telegraf - # update-rc.d sysv service: - if which update-rc.d > /dev/null 2>&1 ; then - update-rc.d -f telegraf remove - update-rc.d telegraf defaults - # CentOS-style sysv: - else - chkconfig --add telegraf - fi - - # restart on upgrade of package - if [ "$#" -eq 2 ]; then - /etc/init.d/telegraf restart - fi - - mkdir -p $TELEGRAF_LOG_DIR - chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR -fi - -chown -R -L telegraf:telegraf $INSTALL_ROOT_DIR -chmod -R a+rX $INSTALL_ROOT_DIR - -EOF - echo "Post-install script created successfully at $POST_INSTALL_PATH" -} - -########################################################################### -# Start the packaging process. - -if [ "$1" == "-h" ]; then - usage 0 -elif [ "$1" == "" ]; then - VERSION=`git describe --always --tags | tr -d v` -else - VERSION="$1" -fi - -cd `git rev-parse --show-toplevel` -echo -e "\nStarting package process, version: $VERSION\n" - -check_gopath -do_build $VERSION -make_dir_tree $TMP_WORK_DIR $VERSION - -########################################################################### -# Copy the assets to the installation directories. - -for b in ${BINS[*]}; do - cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION - if [ $? -ne 0 ]; then - echo "Failed to copy binaries to packaging directory -- aborting." - cleanup_exit 1 - fi -done - -echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION" - -cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts -if [ $? -ne 0 ]; then - echo "Failed to copy init.d script to packaging directory -- aborting." - cleanup_exit 1 -fi -echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" - -cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts -if [ $? -ne 0 ]; then - echo "Failed to copy systemd file to packaging directory -- aborting." - cleanup_exit 1 -fi -echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" - -cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/telegraf.conf -if [ $? -ne 0 ]; then - echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting." - cleanup_exit 1 -fi - -cp $LOGROTATE_CONFIGURATION $TMP_WORK_DIR/$LOGROTATE_DIR/telegraf -if [ $? -ne 0 ]; then - echo "Failed to copy $LOGROTATE_CONFIGURATION to packaging directory -- aborting." - cleanup_exit 1 -fi - -generate_postinstall_script $VERSION - -########################################################################### -# Create the actual packages. - -if [ "$CIRCLE_BRANCH" == "" ]; then - echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` - if [ "x$response" == "xn" ]; then - echo "Packaging aborted." - cleanup_exit 1 - fi -fi - -if [ $ARCH == "i386" ]; then - rpm_package=telegraf-$VERSION-1.i686.rpm - debian_package=telegraf_${VERSION}_i686.deb - deb_args="-a i686" - rpm_args="setarch i686" -elif [ $ARCH == "arm" ]; then - rpm_package=telegraf-$VERSION-1.armel.rpm - debian_package=telegraf_${VERSION}_armel.deb -else - rpm_package=telegraf-$VERSION-1.x86_64.rpm - debian_package=telegraf_${VERSION}_amd64.deb -fi - -COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE \ - --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH \ - --name telegraf --provides telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ." -$rpm_args fpm -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS -if [ $? -ne 0 ]; then - echo "Failed to create RPM package -- aborting." - cleanup_exit 1 -fi -echo "RPM package created successfully." - -fpm -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS -if [ $? -ne 0 ]; then - echo "Failed to create Debian package -- aborting." - cleanup_exit 1 -fi -echo "Debian package created successfully." - -########################################################################### -# Offer to publish the packages. - -if [ "$CIRCLE_BRANCH" == "" ]; then - echo -n "Publish packages to S3? [y/N] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` - if [ "x$response" == "xy" ]; then - echo "Publishing packages to S3." - if [ ! -e "$AWS_FILE" ]; then - echo "$AWS_FILE does not exist -- aborting." - cleanup_exit 1 - fi - - # Upload .deb and .rpm packages - for filepath in `ls *.{deb,rpm}`; do - echo "Uploading $filepath to S3" - filename=`basename $filepath` - echo "Uploading $filename to s3://get.influxdb.org/telegraf/$filename" - AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath \ - s3://get.influxdb.org/telegraf/$filename \ - --acl public-read --region us-east-1 - if [ $? -ne 0 ]; then - echo "Upload failed -- aborting". - cleanup_exit 1 - fi - rm $filepath - done - - # Make and upload linux amd64, 386, and arm - make build-linux-bins - for b in `ls telegraf_*`; do - zippedbin=${b}_${VERSION}.tar.gz - # Zip the binary - tar -zcf $TMP_WORK_DIR/$zippedbin ./$b - echo "Uploading binary: $zippedbin to S3" - AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $TMP_WORK_DIR/$zippedbin \ - s3://get.influxdb.org/telegraf/$zippedbin \ - --acl public-read --region us-east-1 - if [ $? -ne 0 ]; then - echo "Binary upload failed -- aborting". - cleanup_exit 1 - fi - done - else - echo "Not publishing packages to S3." - fi -fi - -########################################################################### -# All done. - -echo -e "\nPackaging process complete." -cleanup_exit 0 diff --git a/scripts/post-install.sh b/scripts/post-install.sh new file mode 100644 index 000000000..bb4803f8d --- /dev/null +++ b/scripts/post-install.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +BIN_DIR=/usr/bin +LOG_DIR=/var/log/telegraf +SCRIPT_DIR=/usr/lib/telegraf/scripts +LOGROTATE_DIR=/etc/logrotate.d + +function install_init { + cp -f $SCRIPT_DIR/init.sh /etc/init.d/telegraf + chmod +x /etc/init.d/telegraf +} + +function install_systemd { + cp -f $SCRIPT_DIR/telegraf.service /lib/systemd/system/telegraf.service + systemctl enable telegraf +} + +function install_update_rcd { + update-rc.d telegraf defaults +} + +function install_chkconfig { + chkconfig --add telegraf +} + +id telegraf &>/dev/null +if [[ $? -ne 0 ]]; then + useradd --system -U -M telegraf -s /bin/false -d /etc/telegraf +fi + +chown -R -L telegraf:telegraf $LOG_DIR + +# Remove legacy symlink, if it exists +if [[ -L /etc/init.d/telegraf ]]; then + rm -f /etc/init.d/telegraf +fi + +# Add defaults file, if it doesn't exist +if [[ ! -f /etc/default/telegraf ]]; then + touch /etc/default/telegraf +fi + +# Add .d configuration directory +if [[ ! -d /etc/telegraf/telegraf.d ]]; then + mkdir -p /etc/telegraf/telegraf.d +fi + +# Distribution-specific logic +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + install_systemd + else + # Assuming sysv + install_init + install_chkconfig + fi +elif [[ -f /etc/debian_version ]]; then + # Debian/Ubuntu logic + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + install_systemd + else + # Assuming sysv + install_init + install_update_rcd + fi +fi diff --git a/scripts/pre-install.sh b/scripts/pre-install.sh new file mode 100644 index 000000000..443d6bc87 --- /dev/null +++ b/scripts/pre-install.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [[ -f /etc/opt/telegraf/telegraf.conf ]]; then + # Legacy configuration found + if [[ ! -d /etc/telegraf ]]; then + # New configuration does not exist, move legacy configuration to new location + echo -e "Please note, Telegraf's configuration is now located at '/etc/telegraf' (previously '/etc/opt/telegraf')." + mv /etc/opt/telegraf /etc/telegraf + + backup_name="telegraf.conf.$(date +%s).backup" + echo "A backup of your current configuration can be found at: /etc/telegraf/$backup_name" + cp -a /etc/telegraf/telegraf.conf /etc/telegraf/$backup_name + fi +fi diff --git a/scripts/telegraf.service b/scripts/telegraf.service index d5e46b124..6f4450402 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -1,12 +1,12 @@ [Unit] Description=The plugin-driven server agent for reporting metrics into InfluxDB -Documentation=https://github.com/influxdb/telegraf +Documentation=https://github.com/influxdata/telegraf After=network.target [Service] EnvironmentFile=-/etc/default/telegraf User=telegraf -ExecStart=/opt/telegraf/telegraf -config /etc/opt/telegraf/telegraf.conf -configdirectory /etc/opt/telegraf/telegraf.d $TELEGRAF_OPTS +ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d ${TELEGRAF_OPTS} Restart=on-failure KillMode=process diff --git a/testutil/accumulator.go b/testutil/accumulator.go index d31c71ef5..7cdfb4155 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -1,10 +1,14 @@ package testutil import ( + "encoding/json" "fmt" "reflect" "sync" + "testing" "time" + + "github.com/stretchr/testify/assert" ) // Point defines a single point measurement @@ -22,7 +26,9 @@ func (p *Point) String() string { // Accumulator defines a mocked out accumulator type Accumulator struct { sync.Mutex + Points []*Point + debug bool } // Add adds a measurement point to the accumulator @@ -49,6 +55,10 @@ func (a *Accumulator) AddFields( tags = map[string]string{} } + if len(fields) == 0 { + return + } + var t time.Time if len(timestamp) > 0 { t = timestamp[0] @@ -56,6 +66,14 @@ func (a *Accumulator) AddFields( t = time.Now() } + if a.debug { + pretty, _ := json.MarshalIndent(fields, "", " ") + prettyTags, _ := json.MarshalIndent(tags, "", " ") + msg := fmt.Sprintf("Adding Measurement [%s]\nFields:%s\nTags:%s\n", + measurement, string(pretty), string(prettyTags)) + fmt.Print(msg) + } + p := &Point{ Measurement: measurement, Fields: fields, @@ -63,10 +81,7 @@ func (a *Accumulator) AddFields( Time: t, } - a.Points = append( - a.Points, - p, - ) + a.Points = append(a.Points, p) } func (a *Accumulator) SetDefaultTags(tags map[string]string) { @@ -88,11 +103,12 @@ func (a *Accumulator) SetPrefix(prefix string) { func (a *Accumulator) Debug() bool { // stub for implementing Accumulator interface. - return true + return a.debug } func (a *Accumulator) SetDebug(debug bool) { // stub for implementing Accumulator interface. + a.debug = debug } // Get gets the specified measurement point from the accumulator @@ -106,71 +122,24 @@ func (a *Accumulator) Get(measurement string) (*Point, bool) { return nil, false } -// CheckValue calls CheckFieldsValue passing a single-value map as fields -func (a *Accumulator) CheckValue(measurement string, val interface{}) bool { - return a.CheckFieldsValue(measurement, map[string]interface{}{"value": val}) -} - -// CheckValue checks that the accumulators point for the given measurement -// is the same as the given value. -func (a *Accumulator) CheckFieldsValue(measurement string, fields map[string]interface{}) bool { - for _, p := range a.Points { - if p.Measurement == measurement { - if reflect.DeepEqual(fields, p.Fields) { - return true - } else { - fmt.Printf("Measurement %s Failure, expected: %v, got %v\n", - measurement, fields, p.Fields) - return false - } +// NFields returns the total number of fields in the accumulator, across all +// measurements +func (a *Accumulator) NFields() int { + counter := 0 + for _, pt := range a.Points { + for _, _ = range pt.Fields { + counter++ } } - fmt.Printf("Measurement %s, fields %s not found\n", measurement, fields) - return false + return counter } -// CheckTaggedValue calls ValidateTaggedValue -func (a *Accumulator) CheckTaggedValue( - measurement string, - val interface{}, - tags map[string]string, -) bool { - return a.ValidateTaggedValue(measurement, val, tags) == nil -} - -// ValidateTaggedValue calls ValidateTaggedFieldsValue passing a single-value map as fields -func (a *Accumulator) ValidateTaggedValue( - measurement string, - val interface{}, - tags map[string]string, -) error { - return a.ValidateTaggedFieldsValue(measurement, map[string]interface{}{"value": val}, tags) -} - -// ValidateValue calls ValidateTaggedValue -func (a *Accumulator) ValidateValue(measurement string, val interface{}) error { - return a.ValidateTaggedValue(measurement, val, nil) -} - -// CheckTaggedFieldsValue calls ValidateTaggedFieldsValue -func (a *Accumulator) CheckTaggedFieldsValue( +func (a *Accumulator) AssertContainsTaggedFields( + t *testing.T, measurement string, fields map[string]interface{}, tags map[string]string, -) bool { - return a.ValidateTaggedFieldsValue(measurement, fields, tags) == nil -} - -// ValidateTaggedValue validates that the given measurement and value exist -// in the accumulator and with the given tags. -func (a *Accumulator) ValidateTaggedFieldsValue( - measurement string, - fields map[string]interface{}, - tags map[string]string, -) error { - if tags == nil { - tags = map[string]string{} - } +) { for _, p := range a.Points { if !reflect.DeepEqual(tags, p.Tags) { continue @@ -178,53 +147,50 @@ func (a *Accumulator) ValidateTaggedFieldsValue( if p.Measurement == measurement { if !reflect.DeepEqual(fields, p.Fields) { - return fmt.Errorf("%v != %v ", fields, p.Fields) + pActual, _ := json.MarshalIndent(p.Fields, "", " ") + pExp, _ := json.MarshalIndent(fields, "", " ") + msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)", + string(pActual), p.Fields, string(pExp), fields) + assert.Fail(t, msg) } - return nil + return } } - - return fmt.Errorf("unknown measurement %s with tags %v", measurement, tags) + msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags) + assert.Fail(t, msg) } -// ValidateFieldsValue calls ValidateTaggedFieldsValue -func (a *Accumulator) ValidateFieldsValue( +func (a *Accumulator) AssertContainsFields( + t *testing.T, measurement string, fields map[string]interface{}, -) error { - return a.ValidateTaggedValue(measurement, fields, nil) -} - -func (a *Accumulator) ValidateTaggedFields( - measurement string, - fields map[string]interface{}, - tags map[string]string, -) error { - if tags == nil { - tags = map[string]string{} - } +) { for _, p := range a.Points { - if !reflect.DeepEqual(tags, p.Tags) { - continue - } - if p.Measurement == measurement { if !reflect.DeepEqual(fields, p.Fields) { - return fmt.Errorf("%v (%T) != %v (%T)", - p.Fields, p.Fields, fields, fields) + pActual, _ := json.MarshalIndent(p.Fields, "", " ") + pExp, _ := json.MarshalIndent(fields, "", " ") + msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)", + string(pActual), p.Fields, string(pExp), fields) + assert.Fail(t, msg) } - return nil + return } } - return fmt.Errorf("unknown measurement %s with tags %v", measurement, tags) + msg := fmt.Sprintf("unknown measurement %s", measurement) + assert.Fail(t, msg) } // HasIntValue returns true if the measurement has an Int value -func (a *Accumulator) HasIntValue(measurement string) bool { +func (a *Accumulator) HasIntField(measurement string, field string) bool { for _, p := range a.Points { if p.Measurement == measurement { - _, ok := p.Fields["value"].(int64) - return ok + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(int64) + return ok + } + } } } @@ -232,11 +198,15 @@ func (a *Accumulator) HasIntValue(measurement string) bool { } // HasUIntValue returns true if the measurement has a UInt value -func (a *Accumulator) HasUIntValue(measurement string) bool { +func (a *Accumulator) HasUIntField(measurement string, field string) bool { for _, p := range a.Points { if p.Measurement == measurement { - _, ok := p.Fields["value"].(uint64) - return ok + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(uint64) + return ok + } + } } } @@ -244,11 +214,15 @@ func (a *Accumulator) HasUIntValue(measurement string) bool { } // HasFloatValue returns true if the given measurement has a float value -func (a *Accumulator) HasFloatValue(measurement string) bool { +func (a *Accumulator) HasFloatField(measurement string, field string) bool { for _, p := range a.Points { if p.Measurement == measurement { - _, ok := p.Fields["value"].(float64) - return ok + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(float64) + return ok + } + } } } diff --git a/testutil/testutil.go b/testutil/testutil.go index 581220299..436b57361 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -6,7 +6,7 @@ import ( "os" "time" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) var localhost = "localhost"