Compare commits
28 Commits
v0.10.1
...
v0.3.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
069cb9766b | ||
|
|
8b54c73ae4 | ||
|
|
c9ef073fba | ||
|
|
15f66d7d1b | ||
|
|
b0f79f43ec | ||
|
|
c584129758 | ||
|
|
d1930c90b5 | ||
|
|
1e76e36df2 | ||
|
|
a73b5257dc | ||
|
|
c16be04ca7 | ||
|
|
5513275f2c | ||
|
|
3a7b1688a3 | ||
|
|
35d5c7bae3 | ||
|
|
60b6693ae3 | ||
|
|
c1e1f2ace4 | ||
|
|
6698d195d8 | ||
|
|
23b21ca86a | ||
|
|
56e14e4731 | ||
|
|
7deb339b76 | ||
|
|
0e55c371b7 | ||
|
|
f284c8c154 | ||
|
|
e3b314cacb | ||
|
|
9fce094b36 | ||
|
|
319c363c8e | ||
|
|
40d84accee | ||
|
|
3fc43df84e | ||
|
|
59f804d77a | ||
|
|
96d5f0d0de |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,6 +1,4 @@
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
telegraf
|
||||
.idea
|
||||
*~
|
||||
*#
|
||||
|
||||
375
CHANGELOG.md
375
CHANGELOG.md
@@ -1,122 +1,51 @@
|
||||
## v0.10.2 [unreleased]
|
||||
## v0.3.0 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.10.1 [2016-01-27]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
|
||||
defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
|
||||
successful write to that output occurs.
|
||||
- The docker plugin has been significantly overhauled to add more metrics
|
||||
and allow for docker-machine (incl OSX) support.
|
||||
[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
|
||||
for the latest measurements, fields, and tags. There is also now support for
|
||||
specifying a docker endpoint to get metrics from.
|
||||
|
||||
### Features
|
||||
- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
|
||||
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
|
||||
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
|
||||
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
|
||||
- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
|
||||
- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
|
||||
- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
|
||||
- AMQP SSL support. Thanks @ekini!
|
||||
- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
|
||||
- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
|
||||
- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
|
||||
- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
|
||||
- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
|
||||
- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
|
||||
- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
|
||||
- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
|
||||
- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
|
||||
- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
|
||||
- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
|
||||
- [#589](https://github.com/influxdata/telegraf/pull/589): Microsoft SQL Server input plugin. Thanks @zensqlmonitor!
|
||||
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
|
||||
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
|
||||
|
||||
### Bugfixes
|
||||
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
|
||||
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
|
||||
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
|
||||
- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
|
||||
- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
|
||||
- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
|
||||
- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
|
||||
- [#585](https://github.com/influxdata/telegraf/pull/585): Log stack trace and continue on Telegraf panic. Thanks @wutaizeng!
|
||||
|
||||
## v0.10.0 [2016-01-12]
|
||||
|
||||
### Release Notes
|
||||
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
|
||||
and configuration files are in `/etc/telegraf`
|
||||
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
|
||||
`plugins` is too generic, as there are now also "output plugins", and will likely
|
||||
be "aggregator plugins" and "filter plugins" in the future. Additionally,
|
||||
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
|
||||
directory.
|
||||
- **breaking change** the `io` plugin has been renamed `diskio`
|
||||
- **breaking change** plugin measurements aggregated into a single measurement.
|
||||
- **breaking change** Plugin measurements aggregated into a single measurement.
|
||||
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
|
||||
for configuration.
|
||||
- **breaking change** `twemproxy` plugin: `prefix` option removed.
|
||||
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_`
|
||||
instead of only `cpu_`
|
||||
- **breaking change** some command-line flags have been renamed to separate words.
|
||||
`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`,
|
||||
`-outputfilter` -> `-output-filter`
|
||||
- `twemproxy` plugin: `prefix` option removed.
|
||||
- `procstat` cpu measurements are now prepended with `cpu_time_` instead of
|
||||
only `cpu_`
|
||||
- The prometheus plugin schema has not been changed (measurements have not been
|
||||
aggregated).
|
||||
|
||||
### Packaging change note:
|
||||
|
||||
RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their
|
||||
configurations overwritten by the upgrade. There is a backup stored at
|
||||
/etc/telegraf/telegraf.conf.$(date +%s).backup.
|
||||
|
||||
### Features
|
||||
- Plugin measurements aggregated into a single measurement.
|
||||
- Added ability to specify per-plugin tags
|
||||
- Added ability to specify per-plugin measurement suffix and prefix.
|
||||
(`name_prefix` and `name_suffix`)
|
||||
- Added ability to override base plugin measurement name. (`name_override`)
|
||||
- Added ability to override base plugin name. (`name_override`)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.2.5 [unreleased]
|
||||
|
||||
### Features
|
||||
- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||
- [#427](https://github.com/influxdb/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||
- [#428](https://github.com/influxdb/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||
- [#449](https://github.com/influxdb/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||
|
||||
### Bugfixes
|
||||
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||
- [#430](https://github.com/influxdb/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||
- [#452](https://github.com/influxdb/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||
|
||||
## v0.2.4 [2015-12-08]
|
||||
|
||||
### Features
|
||||
- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||
- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||
- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
|
||||
- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
|
||||
- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
|
||||
- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
|
||||
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||
- [#412](https://github.com/influxdb/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||
- [#410](https://github.com/influxdb/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||
- [#414](https://github.com/influxdb/telegraf/issues/414): Jolokia plugin auth parameters
|
||||
- [#415](https://github.com/influxdb/telegraf/issues/415): memcached plugin: support unix sockets
|
||||
- [#418](https://github.com/influxdb/telegraf/pull/418): memcached plugin additional unit tests.
|
||||
- [#408](https://github.com/influxdb/telegraf/pull/408): MailChimp plugin.
|
||||
- [#382](https://github.com/influxdb/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||
- [#401](https://github.com/influxdb/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||
|
||||
### Bugfixes
|
||||
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
|
||||
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||
- [#405](https://github.com/influxdb/telegraf/issues/405): Prometheus output cardinality issue
|
||||
- [#388](https://github.com/influxdb/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||
|
||||
## v0.2.3 [2015-11-30]
|
||||
|
||||
@@ -131,11 +60,11 @@ functional.
|
||||
same type can be specified, like this:
|
||||
|
||||
```
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
@@ -145,15 +74,15 @@ same type can be specified, like this:
|
||||
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
|
||||
|
||||
### Features
|
||||
- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||
- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
|
||||
- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
|
||||
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||
- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||
- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin.
|
||||
- [#392](https://github.com/influxdb/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||
- [#383](https://github.com/influxdb/telegraf/pull/383): Specify plugins as a list.
|
||||
- [#354](https://github.com/influxdb/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
|
||||
- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||
- [#389](https://github.com/influxdb/telegraf/issues/389): NaN value panic
|
||||
|
||||
## v0.2.2 [2015-11-18]
|
||||
|
||||
@@ -162,7 +91,7 @@ same type can be specified, like this:
|
||||
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
|
||||
|
||||
### Bugfixes
|
||||
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
|
||||
- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in plugins.
|
||||
|
||||
## v0.2.1 [2015-11-16]
|
||||
|
||||
@@ -179,22 +108,22 @@ changed to just run docker commands in the Makefile. See `make docker-run` and
|
||||
same type.
|
||||
|
||||
### Features
|
||||
- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||
- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||
- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||
- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
|
||||
- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||
- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||
- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||
- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output.
|
||||
- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||
- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||
- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||
- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||
- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||
- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||
|
||||
## v0.2.0 [2015-10-27]
|
||||
|
||||
@@ -215,38 +144,38 @@ be controlled via the `round_interval` and `flush_jitter` config options.
|
||||
- Telegraf will now retry metric flushes twice
|
||||
|
||||
### Features
|
||||
- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
|
||||
- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||
- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||
- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||
- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||
- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||
- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info
|
||||
- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||
- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||
- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||
- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||
- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||
- Memory plugin: cached and buffered measurements re-added
|
||||
- Logging: additional logging for each collection interval, track the number
|
||||
of metrics collected and from how many inputs.
|
||||
- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||
- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||
- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||
- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||
- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||
- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
|
||||
- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||
- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||
- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||
- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
|
||||
- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
|
||||
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||
of metrics collected and from how many plugins.
|
||||
- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||
- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||
- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||
- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||
- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||
- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2.
|
||||
- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||
- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||
- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||
- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals
|
||||
- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes
|
||||
- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||
- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||
- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||
- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||
- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||
- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||
- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||
- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||
- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||
- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||
- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||
- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||
- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||
- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||
- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||
|
||||
## v0.1.9 [2015-09-22]
|
||||
|
||||
@@ -256,7 +185,7 @@ will still be backwards compatible if only `url` is specified.
|
||||
- The -test flag will now output two metric collections
|
||||
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
|
||||
allow filtering of output sinks on the command-line using the `-outputfilter`
|
||||
flag, much like how the `-filter` flag works for inputs.
|
||||
flag, much like how the `-filter` flag works for plugins.
|
||||
- Support for filtering on config-file creation -- Telegraf now supports
|
||||
filtering to -sample-config command. You can now run
|
||||
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
|
||||
@@ -272,27 +201,27 @@ have been renamed for consistency. Some measurements have also been removed from
|
||||
re-added in a "verbose" mode if there is demand for it.
|
||||
|
||||
### Features
|
||||
- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
|
||||
- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||
- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||
- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||
- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
|
||||
- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
|
||||
- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support
|
||||
- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||
- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||
- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||
- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup.
|
||||
- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||
- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks
|
||||
and filtering when specifying a config file.
|
||||
|
||||
### Bugfixes
|
||||
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
|
||||
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
|
||||
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||
- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support
|
||||
- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics
|
||||
- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||
- Fix net plugin on darwin
|
||||
- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||
- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||
- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||
- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||
- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||
- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||
- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||
- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||
- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||
- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||
|
||||
## v0.1.8 [2015-09-04]
|
||||
|
||||
@@ -301,106 +230,106 @@ and filtering when specifying a config file.
|
||||
- Now using Go 1.5 to build telegraf
|
||||
|
||||
### Features
|
||||
- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||
- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||
- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||
- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||
- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
|
||||
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
|
||||
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
|
||||
- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||
- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||
- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||
- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||
- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option
|
||||
- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3
|
||||
- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.1.7 [2015-08-28]
|
||||
|
||||
### Features
|
||||
- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
|
||||
- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||
- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||
- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||
- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
|
||||
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||
- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer.
|
||||
- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||
- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||
- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||
- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag.
|
||||
- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||
- Indent the toml config file for readability
|
||||
|
||||
### Bugfixes
|
||||
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
|
||||
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
|
||||
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||
- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||
- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing.
|
||||
- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix.
|
||||
- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||
- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||
|
||||
## v0.1.6 [2015-08-20]
|
||||
|
||||
### Features
|
||||
- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
|
||||
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||
- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||
- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies
|
||||
- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||
|
||||
### Bugfixes
|
||||
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||
- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||
- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||
- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||
- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||
- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||
|
||||
## v0.1.5 [2015-08-13]
|
||||
|
||||
### Features
|
||||
- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||
- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||
- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||
- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||
- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||
- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
|
||||
- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||
- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||
- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
|
||||
- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||
- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||
- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||
- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||
- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||
- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||
- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||
- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database.
|
||||
- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||
- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||
- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing
|
||||
- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||
- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||
- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||
- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||
- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||
- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||
- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||
|
||||
### Bugfixes
|
||||
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
|
||||
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||
- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
|
||||
- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||
- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||
- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes
|
||||
- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||
- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally
|
||||
- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||
|
||||
## v0.1.4 [2015-07-09]
|
||||
|
||||
### Features
|
||||
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||
- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||
|
||||
### Bugfixes
|
||||
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||
- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||
- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||
|
||||
## v0.1.3 [2015-07-05]
|
||||
|
||||
### Features
|
||||
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||
- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||
- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||
- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||
- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||
|
||||
## v0.1.2 [2015-07-01]
|
||||
|
||||
### Features
|
||||
- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||
- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||
- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||
|
||||
### Bugfixes
|
||||
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
|
||||
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||
- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||
- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||
- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script.
|
||||
- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||
- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||
- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||
- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||
|
||||
## v0.1.1 [2015-06-19]
|
||||
|
||||
|
||||
123
CONFIGURATION.md
123
CONFIGURATION.md
@@ -1,74 +1,33 @@
|
||||
# Telegraf Configuration
|
||||
|
||||
## Generating a Configuration File
|
||||
## Plugin Configuration
|
||||
|
||||
A default Telegraf config file can be generated using the -sample-config flag:
|
||||
`telegraf -sample-config > telegraf.conf`
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
-input-filter and -output-filter flags:
|
||||
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
|
||||
|
||||
## `[tags]` Configuration
|
||||
|
||||
Global tags can be specific in the `[tags]` section of the config file in
|
||||
key="value" format. All metrics being gathered on this host will be tagged
|
||||
with the tags specified here.
|
||||
|
||||
## `[agent]` Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
config.
|
||||
|
||||
* **interval**: Default data collection interval for all inputs
|
||||
* **round_interval**: Rounds collection interval to 'interval'
|
||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||
for each output, and will flush this buffer on a successful write.
|
||||
* **collection_jitter**: Collection jitter is used to jitter
|
||||
the collection by a random amount.
|
||||
Each plugin will sleep for a random time within jitter before collecting.
|
||||
This can be used to avoid many plugins querying things like sysfs at the
|
||||
same time, which can have a measurable effect on the system.
|
||||
* **flush_interval**: Default data flushing interval for all outputs.
|
||||
You should not set this below
|
||||
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
* **flush_jitter**: Jitter the flush interval by a random amount.
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode.
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
|
||||
## `[inputs.xxx]` Configuration
|
||||
|
||||
There are some configuration options that are configurable per input:
|
||||
There are some configuration options that are configurable per plugin:
|
||||
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
(Default is the name of the plugin).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
* **tags**: A map of tags to apply to a specific plugin's measurements.
|
||||
|
||||
#### Input Filters
|
||||
### Plugin Filters
|
||||
|
||||
There are also filters that can be configured per input:
|
||||
There are also filters that can be configured per plugin:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
current plugin. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted.
|
||||
* **drop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
measurements by the current plugin. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted.
|
||||
This is tested on measurements that have passed the tagpass test.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular plugin should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
#### Input Configuration Examples
|
||||
### Plugin Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
@@ -83,32 +42,35 @@ fields which begin with `time_`.
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[outputs]
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# INPUTS
|
||||
[[inputs.cpu]]
|
||||
# PLUGINS
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
drop = ["time_*"]
|
||||
```
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
### Plugin Config: tagpass and tagdrop
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[inputs.cpu.tagdrop]
|
||||
[plugins.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[inputs.disk]]
|
||||
[inputs.disk.tagpass]
|
||||
[[plugins.disk]]
|
||||
[plugins.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
@@ -117,26 +79,26 @@ fields which begin with `time_`.
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
#### Input Config: pass and drop
|
||||
### Plugin Config: pass and drop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
drop = ["usage_guest", "usage_steal"]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[inputs.disk]]
|
||||
[[plugins.disk]]
|
||||
pass = ["inodes*"]
|
||||
```
|
||||
|
||||
#### Input config: prefix, suffix, and override
|
||||
### Plugin config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
name_suffix = "_total"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
@@ -145,53 +107,50 @@ This plugin will emit measurements with the name `cpu_total`
|
||||
This will emit measurements with the name `foobar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
name_override = "foobar"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
#### Input config: tags
|
||||
### Plugin config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
[inputs.cpu.tags]
|
||||
[plugins.cpu.tags]
|
||||
tag1 = "foo"
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
#### Multiple inputs of the same type
|
||||
### Multiple plugins of the same type
|
||||
|
||||
Additional inputs (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file. It is highly recommended that
|
||||
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
||||
to avoid measurement collisions:
|
||||
Additional plugins (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file:
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
name_override = "percpu_usage"
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## `[outputs.xxx]` Configuration
|
||||
## Output Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as inputs
|
||||
(pass, drop, tagpass, tagdrop)
|
||||
Outputs also support the same configurable options as plugins
|
||||
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
|
||||
112
CONTRIBUTING.md
112
CONTRIBUTING.md
@@ -1,55 +1,35 @@
|
||||
## Steps for Contributing:
|
||||
|
||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||
1. Make changes or write plugin (see below for details)
|
||||
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
||||
1. If your plugin requires a new Go package,
|
||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
||||
Output plugins READMEs are less structured,
|
||||
but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
|
||||
## Sign the CLA
|
||||
|
||||
Before we can merge a pull request, you will need to sign the CLA,
|
||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
## Adding a dependency
|
||||
## Plugins
|
||||
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
This section is for developers who want to create new collection inputs.
|
||||
This section is for developers who want to create new collection plugins.
|
||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered as well as makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new inputs.
|
||||
and submit new plugins.
|
||||
|
||||
### Input Plugin Guidelines
|
||||
### Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `inputs.Input` interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
* A plugin must conform to the `plugins.Plugin` interface.
|
||||
* Each generated metric automatically has the name of the plugin that generated
|
||||
it prepended. This is to keep plugins honest.
|
||||
* Plugins should call `plugins.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdb/telegraf/plugins/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
### Input interface
|
||||
### Plugin interface
|
||||
|
||||
```go
|
||||
type Input interface {
|
||||
type Plugin interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
@@ -72,32 +52,52 @@ type Accumulator interface {
|
||||
The way that a plugin emits metrics is by interacting with the Accumulator.
|
||||
|
||||
The `Add` function takes 3 arguments:
|
||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `
|
||||
faults`.
|
||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
|
||||
* **value**: A value for the metric. This accepts 5 different types of value:
|
||||
* **int**: The most common type. All int types are accepted but favor using `int64`
|
||||
Useful for counters, etc.
|
||||
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`,
|
||||
etc.
|
||||
* **string**: Typically used to indicate a message, or some kind of freeform
|
||||
information.
|
||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `
|
||||
light_on_since`.
|
||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
|
||||
* **string**: Typically used to indicate a message, or some kind of freeform information.
|
||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
|
||||
* **tags**: This is a map of strings to strings to describe the where or who
|
||||
about the metric. For instance, the `net` plugin adds a tag named `"interface"`
|
||||
set to the name of the network interface, like `"eth0"`.
|
||||
|
||||
The `AddFieldsWithTime` allows multiple values for a point to be passed. The values
|
||||
used are the same type profile as **value** above. The **timestamp** argument
|
||||
allows a point to be registered as having occurred at an arbitrary time.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the current host.
|
||||
|
||||
### Input Plugin Example
|
||||
```go
|
||||
|
||||
type Process struct {
|
||||
CPUTime float64
|
||||
MemoryBytes int64
|
||||
PID int
|
||||
}
|
||||
|
||||
func Gather(acc plugins.Accumulator) error {
|
||||
for _, process := range system.Processes() {
|
||||
tags := map[string]string {
|
||||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
}
|
||||
|
||||
acc.Add("cpu", process.CPUTime, tags, time.Now())
|
||||
acc.Add("memory", process.MemoryBytes, tags, time.Now())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Plugin Example
|
||||
|
||||
```go
|
||||
package simple
|
||||
|
||||
// simple.go
|
||||
|
||||
import "github.com/influxdata/telegraf/plugins/inputs"
|
||||
import "github.com/influxdb/telegraf/plugins"
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
@@ -111,7 +111,7 @@ func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc inputs.Accumulator) error {
|
||||
func (s *Simple) Gather(acc plugins.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
@@ -122,19 +122,19 @@ func (s *Simple) Gather(acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("simple", func() inputs.Input { return &Simple{} })
|
||||
plugins.Add("simple", func() plugins.Plugin { return &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
## Service Input Plugins
|
||||
## Service Plugins
|
||||
|
||||
This section is for developers who want to create new "service" collection
|
||||
inputs. A service plugin differs from a regular plugin in that it operates
|
||||
plugins. A service plugin differs from a regular plugin in that it operates
|
||||
a background service while Telegraf is running. One example would be the `statsd`
|
||||
plugin, which operates a statsd server.
|
||||
|
||||
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
||||
will require threads and locks to verify data integrity. Service Input Plugins should
|
||||
Service Plugins are substantially more complicated than a regular plugin, as they
|
||||
will require threads and locks to verify data integrity. Service Plugins should
|
||||
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||
|
||||
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||
@@ -143,7 +143,7 @@ and `Stop()` methods.
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`inputs.ServiceInput` interface.
|
||||
`plugins.ServicePlugin` interface.
|
||||
|
||||
### Service Plugin interface
|
||||
|
||||
@@ -157,19 +157,19 @@ type ServicePlugin interface {
|
||||
}
|
||||
```
|
||||
|
||||
## Output Plugins
|
||||
## Outputs
|
||||
|
||||
This section is for developers who want to create a new output sink. Outputs
|
||||
are created in a similar manner as collection plugins, and their interface has
|
||||
similar constructs.
|
||||
|
||||
### Output Plugin Guidelines
|
||||
### Output Guidelines
|
||||
|
||||
* An output must conform to the `outputs.Output` interface.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||
`github.com/influxdb/telegraf/outputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
output can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this output does.
|
||||
@@ -193,7 +193,7 @@ package simpleoutput
|
||||
|
||||
// simpleoutput.go
|
||||
|
||||
import "github.com/influxdata/telegraf/plugins/outputs"
|
||||
import "github.com/influxdb/telegraf/outputs"
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
@@ -230,7 +230,7 @@ func init() {
|
||||
|
||||
```
|
||||
|
||||
## Service Output Plugins
|
||||
## Service Outputs
|
||||
|
||||
This section is for developers who want to create new "service" output. A
|
||||
service output differs from a regular output in that it operates a background service
|
||||
@@ -243,7 +243,7 @@ and `Stop()` methods.
|
||||
### Service Output Guidelines
|
||||
|
||||
* Same as the `Output` guidelines, except that they must conform to the
|
||||
`output.ServiceOutput` interface.
|
||||
`plugins.ServiceOutput` interface.
|
||||
|
||||
### Service Output interface
|
||||
|
||||
@@ -274,7 +274,7 @@ which would take some time to replicate.
|
||||
To overcome this situation we've decided to use docker containers to provide a
|
||||
fast and reproducible environment to test those services which require it.
|
||||
For other situations
|
||||
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/redis/redis_test.go)
|
||||
(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go )
|
||||
a simple mock will suffice.
|
||||
|
||||
To execute Telegraf tests follow these simple steps:
|
||||
|
||||
59
Godeps
59
Godeps
@@ -1,59 +1,52 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/Shopify/sarama 159e9990b0796511607dd0d7aaa3eb37d1829d16
|
||||
github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/armon/go-metrics 06b60999766278efd6d2b5d8418a58c3d5b99e87
|
||||
github.com/aws/aws-sdk-go 999b1591218c36d5050d1ba7266eba956e65965f
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/boltdb/bolt ee4a0888a9abe7eefe5a0992ca4cb06864839873
|
||||
github.com/boltdb/bolt b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/go-resiliency f341fb4dca45128e4aa86389fa6a675d55fe25e1
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/gogo/protobuf e8904f58e872a473a5b91bc9bf3377d223555263
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/fsouza/go-dockerclient 7177a9e3543b0891a5d91dbf7051e0f71455c8ef
|
||||
github.com/go-ini/ini 9314fb0ef64171d6a3d0a4fa570dfa33441cba05
|
||||
github.com/go-sql-driver/mysql d512f204a577a4ab037a1816604c48c9c13210be
|
||||
github.com/gogo/protobuf e492fd34b12d0230755c45aa5fb1e1eea6a84aa9
|
||||
github.com/golang/protobuf 68415e7123da32b07eab49c96d2c4d6158360e9b
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hailocab/go-hostpool 0637eae892be221164aff5fcbccc57171aea6406
|
||||
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
|
||||
github.com/hashicorp/raft 057b893fd996696719e98b6c44649ea14968c811
|
||||
github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294
|
||||
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb 697f48b4e62e514e701ffec39978b864a3c666e6
|
||||
github.com/influxdb/influxdb 697f48b4e62e514e701ffec39978b864a3c666e6
|
||||
github.com/influxdb/influxdb 69a7664f2d4b75aec300b7cbfc7e57c971721f04
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/klauspost/crc32 0aff1ea9c20474c3901672b5b6ead0ac611156de
|
||||
github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/pborman/uuid cccd189d45f7ac3368a0d127efb7f4d08ae0b655
|
||||
github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/common 56b90312e937d43b930f06a59bf0d6a4ae1944bc
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 85bf0974ed06e4e668595ae2b4de02e772a2819b
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/shirou/gopsutil fc932d9090f13a84fb4b3cb8baa124610cab184c
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/stretchr/testify e3a8ff8ce36581f87a15341206f205b1da467059
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
golang.org/x/crypto 7b85b097bf7527677d54d3220065e966a0e3b613
|
||||
golang.org/x/net 1796f9b8b7178e3c7587dff118d3bb9d37f9b0b3
|
||||
gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
|
||||
19
Makefile
19
Makefile
@@ -21,8 +21,21 @@ dev: prepare
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build linux 64-bit, 32-bit and arm architectures
|
||||
build-linux-bins: prepare
|
||||
GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get ./...
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
@@ -52,7 +65,6 @@ endif
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
@@ -66,12 +78,11 @@ docker-run-circle:
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: docker-kill docker-run
|
||||
|
||||
152
README.md
152
README.md
@@ -1,73 +1,48 @@
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf)
|
||||
# Telegraf - A native agent for InfluxDB [](https://circleci.com/gh/influxdb/telegraf)
|
||||
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||
running on, or from other services, and writing them into InfluxDB.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
|
||||
New input and output plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
new plugins.
|
||||
We'll eagerly accept pull requests for new plugins and will manage the set of
|
||||
plugins that Telegraf supports. See the
|
||||
[contributing guide](CONTRIBUTING.md) for instructions on
|
||||
writing new plugins.
|
||||
|
||||
## Installation:
|
||||
|
||||
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions
|
||||
of telegraf, both in the database layout and the configuration file. 0.2.x
|
||||
will continue to be supported, see below for download links.
|
||||
|
||||
For more details on the differences between Telegraf 0.2.x and 0.10.x, see
|
||||
the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/).
|
||||
|
||||
### Linux deb and rpm packages:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.1-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.1-1.x86_64.rpm
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
||||
|
||||
##### Package instructions:
|
||||
|
||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||
* Telegraf binary is installed in `/opt/telegraf/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/opt/telegraf/telegraf.conf`
|
||||
* On sysv systems, the telegraf daemon can be controlled via
|
||||
`service telegraf [action]`
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### Linux tarballs:
|
||||
### Linux binaries:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.1-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.1-1_linux_386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.1-1_linux_arm.tar.gz
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
||||
|
||||
##### tarball instructions:
|
||||
##### Binary instructions:
|
||||
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
sudo tar -C / -xvf ./telegraf-v0.10.1-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-v0.10.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
These are standalone binaries that can be unpacked and executed on any linux
|
||||
system. They can be unpacked and renamed in a location such as
|
||||
`/usr/local/bin` for convenience. A config file will need to be generated,
|
||||
see "How to use it" below.
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
@@ -80,73 +55,57 @@ brew install telegraf
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.5+.
|
||||
if you don't have it already. You also must build with golang version 1.4+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get github.com/influxdata/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
3. Run `go get github.com/influxdb/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
### How to use it:
|
||||
|
||||
```console
|
||||
$ telegraf -help
|
||||
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration.
|
||||
* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`.
|
||||
to create a config file with only CPU and memory plugins defined, and InfluxDB
|
||||
output defined.
|
||||
* Edit the configuration to match your needs.
|
||||
* Run `telegraf -config telegraf.conf -test` to output one full measurement
|
||||
sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using
|
||||
the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test`
|
||||
* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs.
|
||||
* Run `telegraf -config telegraf.conf -filter system:swap`.
|
||||
to run telegraf with only the system & swap plugins defined in the config.
|
||||
|
||||
Usage:
|
||||
## Telegraf Options
|
||||
|
||||
telegraf <flags>
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
config.
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
* **hostname**: The hostname is passed as a tag. By default this will be
|
||||
the value returned by `hostname` on the machine running Telegraf.
|
||||
You can override that value here.
|
||||
* **interval**: How often to gather metrics. Uses a simple number +
|
||||
unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
||||
InfluxDB.
|
||||
|
||||
## Configuration
|
||||
|
||||
See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Input Plugins
|
||||
## Supported Plugins
|
||||
|
||||
Telegraf currently has support for collecting metrics from many sources. For
|
||||
more information on each, please look at the directory of the same name in
|
||||
`plugins/inputs`.
|
||||
**You can view usage instructions for each plugin by running**
|
||||
`telegraf -usage <pluginname>`.
|
||||
|
||||
Currently implemented sources:
|
||||
Telegraf currently has support for collecting metrics from:
|
||||
|
||||
* aerospike
|
||||
* apache
|
||||
* bcache
|
||||
* disque
|
||||
* docker
|
||||
* elasticsearch
|
||||
* exec (generic JSON-emitting executable plugin)
|
||||
* haproxy
|
||||
@@ -160,9 +119,7 @@ Currently implemented sources:
|
||||
* mongodb
|
||||
* mysql
|
||||
* nginx
|
||||
* nsq
|
||||
* phpfpm
|
||||
* phusion passenger
|
||||
* ping
|
||||
* postgresql
|
||||
* procstat
|
||||
@@ -171,12 +128,9 @@ Currently implemented sources:
|
||||
* rabbitmq
|
||||
* redis
|
||||
* rethinkdb
|
||||
* sql server (microsoft)
|
||||
* twemproxy
|
||||
* zfs
|
||||
* zookeeper
|
||||
* sensors
|
||||
* snmp
|
||||
* system
|
||||
* cpu
|
||||
* mem
|
||||
@@ -186,30 +140,28 @@ Currently implemented sources:
|
||||
* diskio
|
||||
* swap
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
## Supported Service Plugins
|
||||
|
||||
Telegraf can collect metrics via the following services:
|
||||
|
||||
* statsd
|
||||
* kafka_consumer
|
||||
* github_webhooks
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
## Supported Output Plugins
|
||||
## Supported Outputs
|
||||
|
||||
* influxdb
|
||||
* amon
|
||||
* amqp
|
||||
* aws kinesis
|
||||
* aws cloudwatch
|
||||
* datadog
|
||||
* graphite
|
||||
* kafka
|
||||
* librato
|
||||
* mqtt
|
||||
* nsq
|
||||
* kafka
|
||||
* datadog
|
||||
* opentsdb
|
||||
* amqp (rabbitmq)
|
||||
* mqtt
|
||||
* librato
|
||||
* prometheus
|
||||
* amon
|
||||
* riemann
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type Accumulator interface {
|
||||
@@ -29,12 +29,12 @@ type Accumulator interface {
|
||||
}
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *models.InputConfig,
|
||||
pluginConfig *config.PluginConfig,
|
||||
points chan *client.Point,
|
||||
) Accumulator {
|
||||
acc := accumulator{}
|
||||
acc.points = points
|
||||
acc.inputConfig = inputConfig
|
||||
acc.pluginConfig = pluginConfig
|
||||
return &acc
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ type accumulator struct {
|
||||
|
||||
debug bool
|
||||
|
||||
inputConfig *models.InputConfig
|
||||
pluginConfig *config.PluginConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
@@ -69,31 +69,27 @@ func (ac *accumulator) AddFields(
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
if !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
if len(ac.pluginConfig.NameOverride) != 0 {
|
||||
measurement = ac.pluginConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
if len(ac.pluginConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.pluginConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
if len(ac.pluginConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.pluginConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
for k, v := range ac.pluginConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
@@ -108,8 +104,8 @@ func (ac *accumulator) AddFields(
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldPass(k) {
|
||||
if ac.pluginConfig != nil {
|
||||
if !ac.pluginConfig.Filter.ShouldPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
232
agent.go
232
agent.go
@@ -1,22 +1,19 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
@@ -61,7 +58,7 @@ func (a *Agent) Connect() error {
|
||||
}
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s, error was '%s' \n", o.Name, err)
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
if err != nil {
|
||||
@@ -88,57 +85,33 @@ func (a *Agent) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func panicRecover(input *models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name, err, trace)
|
||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
}
|
||||
|
||||
// gatherParallel runs the inputs that are using the same reporting interval
|
||||
// gatherParallel runs the plugins that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
|
||||
for _, input := range a.Config.Inputs {
|
||||
if input.Config.Interval != 0 {
|
||||
for _, plugin := range a.Config.Plugins {
|
||||
if plugin.Config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
counter++
|
||||
go func(input *models.RunningInput) {
|
||||
defer panicRecover(input)
|
||||
go func(plugin *config.RunningPlugin) {
|
||||
defer wg.Done()
|
||||
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if jitter != 0 {
|
||||
nanoSleep := rand.Int63n(jitter)
|
||||
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
|
||||
if err != nil {
|
||||
log.Printf("Jittering collection interval failed for plugin %s",
|
||||
input.Name)
|
||||
} else {
|
||||
time.Sleep(d)
|
||||
}
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
}(input)
|
||||
}(plugin)
|
||||
}
|
||||
|
||||
if counter == 0 {
|
||||
@@ -148,41 +121,36 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
wg.Wait()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
}
|
||||
log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n",
|
||||
a.Config.Agent.Interval, counter, elapsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherSeparate runs the inputs that have been configured with their own
|
||||
// gatherSeparate runs the plugins that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
shutdown chan struct{},
|
||||
input *models.RunningInput,
|
||||
plugin *config.RunningPlugin,
|
||||
pointChan chan *client.Point,
|
||||
) error {
|
||||
defer panicRecover(input)
|
||||
|
||||
ticker := time.NewTicker(input.Config.Interval)
|
||||
ticker := time.NewTicker(plugin.Config.Interval)
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
start := time.Now()
|
||||
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
}
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
plugin.Config.Interval, plugin.Name, elapsed)
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
@@ -197,7 +165,7 @@ func (a *Agent) gatherSeparate(
|
||||
}
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||
// Test verifies that we can 'Gather' from all plugins with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
shutdown := make(chan struct{})
|
||||
@@ -216,26 +184,27 @@ func (a *Agent) Test() error {
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
for _, plugin := range a.Config.Plugins {
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(true)
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name)
|
||||
if plugin.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", plugin.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// Special instructions for some plugins. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name {
|
||||
case "cpu", "mongodb", "procstat":
|
||||
switch plugin.Name {
|
||||
case "cpu", "mongodb":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", plugin.Name)
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -244,45 +213,91 @@ func (a *Agent) Test() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// flush writes a list of points to all configured outputs
|
||||
func (a *Agent) flush() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(a.Config.Outputs))
|
||||
for _, o := range a.Config.Outputs {
|
||||
go func(output *models.RunningOutput) {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
// writeOutput writes a list of points to a single output, with retries.
|
||||
// Optionally takes a `done` channel to indicate that it is done writing.
|
||||
func (a *Agent) writeOutput(
|
||||
points []*client.Point,
|
||||
ro *config.RunningOutput,
|
||||
shutdown chan struct{},
|
||||
wg *sync.WaitGroup,
|
||||
) {
|
||||
defer wg.Done()
|
||||
if len(points) == 0 {
|
||||
return
|
||||
}
|
||||
retry := 0
|
||||
retries := a.Config.Agent.FlushRetries
|
||||
start := time.Now()
|
||||
|
||||
wg.Wait()
|
||||
for {
|
||||
filtered := ro.FilterPoints(points)
|
||||
err := ro.Output.Write(filtered)
|
||||
if err == nil {
|
||||
// Write successful
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Flushed %d metrics to output %s in %s\n",
|
||||
len(filtered), ro.Name, elapsed)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return
|
||||
default:
|
||||
if retry >= retries {
|
||||
// No more retries
|
||||
msg := "FATAL: Write to output [%s] failed %d times, dropping" +
|
||||
" %d metrics\n"
|
||||
log.Printf(msg, ro.Name, retries+1, len(points))
|
||||
return
|
||||
} else if err != nil {
|
||||
// Sleep for a retry
|
||||
log.Printf("Error in output [%s]: %s, retrying in %s",
|
||||
ro.Name, err.Error(), a.Config.Agent.FlushInterval.Duration)
|
||||
time.Sleep(a.Config.Agent.FlushInterval.Duration)
|
||||
}
|
||||
}
|
||||
|
||||
retry++
|
||||
}
|
||||
}
|
||||
|
||||
// flush writes a list of points to all configured outputs
|
||||
func (a *Agent) flush(
|
||||
points []*client.Point,
|
||||
shutdown chan struct{},
|
||||
wait bool,
|
||||
) {
|
||||
var wg sync.WaitGroup
|
||||
for _, o := range a.Config.Outputs {
|
||||
wg.Add(1)
|
||||
go a.writeOutput(points, o, shutdown, &wg)
|
||||
}
|
||||
if wait {
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// flusher monitors the points input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
points := make([]*client.Point, 0)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached points before shutdown")
|
||||
a.flush()
|
||||
a.flush(points, shutdown, true)
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
a.flush()
|
||||
a.flush(points, shutdown, false)
|
||||
points = make([]*client.Point, 0)
|
||||
case pt := <-pointChan:
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.AddPoint(pt)
|
||||
}
|
||||
points = append(points, pt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -294,7 +309,7 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
|
||||
if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
@@ -312,16 +327,15 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(
|
||||
a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s\n",
|
||||
a.Config.Agent.Interval, a.Config.Agent.Debug,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval)
|
||||
|
||||
// channel shared between all input threads for accumulating points
|
||||
// channel shared between all plugin threads for accumulating points
|
||||
pointChan := make(chan *client.Point, 1000)
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
@@ -340,29 +354,29 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
for _, plugin := range a.Config.Plugins {
|
||||
|
||||
// Start service of any ServicePlugins
|
||||
switch p := input.Input.(type) {
|
||||
case inputs.ServiceInput:
|
||||
switch p := plugin.Plugin.(type) {
|
||||
case plugins.ServicePlugin:
|
||||
if err := p.Start(); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
log.Printf("Service for plugin %s failed to start, exiting\n%s\n",
|
||||
plugin.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
|
||||
// Special handling for inputs that have their own collection interval
|
||||
// Special handling for plugins that have their own collection interval
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
if input.Config.Interval != 0 {
|
||||
if plugin.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(input *models.RunningInput) {
|
||||
go func(plugin *config.RunningPlugin) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherSeparate(shutdown, input, pointChan); err != nil {
|
||||
if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input)
|
||||
}(plugin)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,99 +5,80 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
|
||||
// needing to load the plugins
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
_ "github.com/influxdb/telegraf/plugins/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
_ "github.com/influxdb/telegraf/outputs/all"
|
||||
)
|
||||
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.InputFilters = []string{"mysql"}
|
||||
err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.PluginFilters = []string{"mysql"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
assert.Equal(t, 1, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.PluginFilters = []string{"foo"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||
assert.Equal(t, 0, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.PluginFilters = []string{"mysql", "foo"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
assert.Equal(t, 1, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "redis"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.PluginFilters = []string{"mysql", "redis"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
assert.Equal(t, 2, len(a.Config.Plugins))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.PluginFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
assert.Equal(t, 2, len(a.Config.Plugins))
|
||||
}
|
||||
|
||||
func TestAgent_LoadOutput(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb"}
|
||||
err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"kafka"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(c.Outputs))
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
|
||||
713
build.py
713
build.py
@@ -1,713 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# This is the Telegraf build script.
|
||||
#
|
||||
# Current caveats:
|
||||
# - Does not checkout the correct commit/branch (for now, you will need to do so manually)
|
||||
# - Has external dependencies for packaging (fpm) and uploading (boto)
|
||||
#
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import datetime
|
||||
import shutil
|
||||
import tempfile
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
debug = False
|
||||
|
||||
# PACKAGING VARIABLES
|
||||
INSTALL_ROOT_DIR = "/usr/bin"
|
||||
LOG_DIR = "/var/log/telegraf"
|
||||
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
|
||||
CONFIG_DIR = "/etc/telegraf"
|
||||
LOGROTATE_DIR = "/etc/logrotate.d"
|
||||
|
||||
INIT_SCRIPT = "scripts/init.sh"
|
||||
SYSTEMD_SCRIPT = "scripts/telegraf.service"
|
||||
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
|
||||
DEFAULT_CONFIG = "etc/telegraf.conf"
|
||||
POSTINST_SCRIPT = "scripts/post-install.sh"
|
||||
PREINST_SCRIPT = "scripts/pre-install.sh"
|
||||
|
||||
# META-PACKAGE VARIABLES
|
||||
PACKAGE_LICENSE = "MIT"
|
||||
PACKAGE_URL = "https://github.com/influxdata/telegraf"
|
||||
MAINTAINER = "support@influxdb.com"
|
||||
VENDOR = "InfluxData"
|
||||
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
|
||||
|
||||
# SCRIPT START
|
||||
prereqs = [ 'git', 'go' ]
|
||||
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
|
||||
|
||||
fpm_common_args = "-f -s dir --log error \
|
||||
--vendor {} \
|
||||
--url {} \
|
||||
--license {} \
|
||||
--maintainer {} \
|
||||
--config-files {} \
|
||||
--config-files {} \
|
||||
--after-install {} \
|
||||
--before-install {} \
|
||||
--description \"{}\"".format(
|
||||
VENDOR,
|
||||
PACKAGE_URL,
|
||||
PACKAGE_LICENSE,
|
||||
MAINTAINER,
|
||||
CONFIG_DIR + '/telegraf.conf',
|
||||
LOGROTATE_DIR + '/telegraf',
|
||||
POSTINST_SCRIPT,
|
||||
PREINST_SCRIPT,
|
||||
DESCRIPTION)
|
||||
|
||||
targets = {
|
||||
'telegraf' : './cmd/telegraf/telegraf.go',
|
||||
}
|
||||
|
||||
supported_builds = {
|
||||
'darwin': [ "amd64", "i386" ],
|
||||
'windows': [ "amd64", "i386", "arm" ],
|
||||
'linux': [ "amd64", "i386", "arm" ]
|
||||
}
|
||||
supported_packages = {
|
||||
"darwin": [ "tar", "zip" ],
|
||||
"linux": [ "deb", "rpm", "tar", "zip" ],
|
||||
"windows": [ "tar", "zip" ],
|
||||
}
|
||||
|
||||
def run(command, allow_failure=False, shell=False):
|
||||
out = None
|
||||
if debug:
|
||||
print("[DEBUG] {}".format(command))
|
||||
try:
|
||||
if shell:
|
||||
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
|
||||
else:
|
||||
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
|
||||
out = out.decode("utf8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("")
|
||||
print("")
|
||||
print("Executed command failed!")
|
||||
print("-- Command run was: {}".format(command))
|
||||
print("-- Failure was: {}".format(e.output))
|
||||
if allow_failure:
|
||||
print("Continuing...")
|
||||
return None
|
||||
else:
|
||||
print("")
|
||||
print("Stopping.")
|
||||
sys.exit(1)
|
||||
except OSError as e:
|
||||
print("")
|
||||
print("")
|
||||
print("Invalid command!")
|
||||
print("-- Command run was: {}".format(command))
|
||||
print("-- Failure was: {}".format(e))
|
||||
if allow_failure:
|
||||
print("Continuing...")
|
||||
return out
|
||||
else:
|
||||
print("")
|
||||
print("Stopping.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
return out
|
||||
|
||||
def create_temp_dir(prefix=None):
|
||||
if prefix is None:
|
||||
return tempfile.mkdtemp(prefix="telegraf-build.")
|
||||
else:
|
||||
return tempfile.mkdtemp(prefix=prefix)
|
||||
|
||||
def get_current_version():
|
||||
command = "git describe --always --tags --abbrev=0"
|
||||
out = run(command)
|
||||
return out.strip()
|
||||
|
||||
def get_current_commit(short=False):
|
||||
command = None
|
||||
if short:
|
||||
command = "git log --pretty=format:'%h' -n 1"
|
||||
else:
|
||||
command = "git rev-parse HEAD"
|
||||
out = run(command)
|
||||
return out.strip('\'\n\r ')
|
||||
|
||||
def get_current_branch():
|
||||
command = "git rev-parse --abbrev-ref HEAD"
|
||||
out = run(command)
|
||||
return out.strip()
|
||||
|
||||
def get_system_arch():
|
||||
arch = os.uname()[4]
|
||||
if arch == "x86_64":
|
||||
arch = "amd64"
|
||||
return arch
|
||||
|
||||
def get_system_platform():
|
||||
if sys.platform.startswith("linux"):
|
||||
return "linux"
|
||||
else:
|
||||
return sys.platform
|
||||
|
||||
def get_go_version():
|
||||
out = run("go version")
|
||||
matches = re.search('go version go(\S+)', out)
|
||||
if matches is not None:
|
||||
return matches.groups()[0].strip()
|
||||
return None
|
||||
|
||||
def check_path_for(b):
|
||||
def is_exe(fpath):
|
||||
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
|
||||
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
path = path.strip('"')
|
||||
full_path = os.path.join(path, b)
|
||||
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
|
||||
return full_path
|
||||
|
||||
def check_environ(build_dir = None):
|
||||
print("\nChecking environment:")
|
||||
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
|
||||
print("\t- {} -> {}".format(v, os.environ.get(v)))
|
||||
|
||||
cwd = os.getcwd()
|
||||
if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
|
||||
print("\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.")
|
||||
|
||||
def check_prereqs():
|
||||
print("\nChecking for dependencies:")
|
||||
for req in prereqs:
|
||||
path = check_path_for(req)
|
||||
if path is None:
|
||||
path = '?'
|
||||
print("\t- {} -> {}".format(req, path))
|
||||
for req in optional_prereqs:
|
||||
path = check_path_for(req)
|
||||
if path is None:
|
||||
path = '?'
|
||||
print("\t- {} (optional) -> {}".format(req, path))
|
||||
print("")
|
||||
|
||||
def upload_packages(packages, bucket_name=None, nightly=False):
|
||||
if debug:
|
||||
print("[DEBUG] upload_packags: {}".format(packages))
|
||||
try:
|
||||
import boto
|
||||
from boto.s3.key import Key
|
||||
except ImportError:
|
||||
print "!! Cannot upload packages without the 'boto' python library."
|
||||
return 1
|
||||
print("Uploading packages to S3...")
|
||||
print("")
|
||||
c = boto.connect_s3()
|
||||
if bucket_name is None:
|
||||
bucket_name = 'get.influxdb.org/telegraf'
|
||||
bucket = c.get_bucket(bucket_name.split('/')[0])
|
||||
print("\t - Using bucket: {}".format(bucket_name))
|
||||
for p in packages:
|
||||
if '/' in bucket_name:
|
||||
# Allow for nested paths within the bucket name (ex:
|
||||
# bucket/telegraf). Assuming forward-slashes as path
|
||||
# delimiter.
|
||||
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
|
||||
os.path.basename(p))
|
||||
else:
|
||||
name = os.path.basename(p)
|
||||
if bucket.get_key(name) is None or nightly:
|
||||
print("\t - Uploading {} to {}...".format(name, bucket_name))
|
||||
k = Key(bucket)
|
||||
k.key = name
|
||||
if nightly:
|
||||
n = k.set_contents_from_filename(p, replace=True)
|
||||
else:
|
||||
n = k.set_contents_from_filename(p, replace=False)
|
||||
k.make_public()
|
||||
else:
|
||||
print("\t - Not uploading {}, already exists.".format(p))
|
||||
print("")
|
||||
|
||||
def run_tests(race, parallel, timeout, no_vet):
|
||||
get_command = "go get -d -t ./..."
|
||||
print("Retrieving Go dependencies...")
|
||||
sys.stdout.flush()
|
||||
run(get_command)
|
||||
print("Running tests:")
|
||||
print("\tRace: ", race)
|
||||
if parallel is not None:
|
||||
print("\tParallel:", parallel)
|
||||
if timeout is not None:
|
||||
print("\tTimeout:", timeout)
|
||||
sys.stdout.flush()
|
||||
p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
if len(out) > 0 or len(err) > 0:
|
||||
print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
|
||||
print(out)
|
||||
print(err)
|
||||
return False
|
||||
if not no_vet:
|
||||
p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
if len(out) > 0 or len(err) > 0:
|
||||
print("Go vet failed. Please run 'go vet ./...' and fix any errors.")
|
||||
print(out)
|
||||
print(err)
|
||||
return False
|
||||
else:
|
||||
print("Skipping go vet ...")
|
||||
sys.stdout.flush()
|
||||
test_command = "go test -v"
|
||||
if race:
|
||||
test_command += " -race"
|
||||
if parallel is not None:
|
||||
test_command += " -parallel {}".format(parallel)
|
||||
if timeout is not None:
|
||||
test_command += " -timeout {}".format(timeout)
|
||||
test_command += " ./..."
|
||||
code = os.system(test_command)
|
||||
if code != 0:
|
||||
print("Tests Failed")
|
||||
return False
|
||||
else:
|
||||
print("Tests Passed")
|
||||
return True
|
||||
|
||||
def build(version=None,
|
||||
branch=None,
|
||||
commit=None,
|
||||
platform=None,
|
||||
arch=None,
|
||||
nightly=False,
|
||||
rc=None,
|
||||
race=False,
|
||||
clean=False,
|
||||
outdir=".",
|
||||
goarm_version="6"):
|
||||
print("-------------------------")
|
||||
print("")
|
||||
print("Build plan:")
|
||||
print("\t- version: {}".format(version))
|
||||
if rc:
|
||||
print("\t- release candidate: {}".format(rc))
|
||||
print("\t- commit: {}".format(commit))
|
||||
print("\t- branch: {}".format(branch))
|
||||
print("\t- platform: {}".format(platform))
|
||||
print("\t- arch: {}".format(arch))
|
||||
if arch == 'arm' and goarm_version:
|
||||
print("\t- ARM version: {}".format(goarm_version))
|
||||
print("\t- nightly? {}".format(str(nightly).lower()))
|
||||
print("\t- race enabled? {}".format(str(race).lower()))
|
||||
print("")
|
||||
|
||||
if not os.path.exists(outdir):
|
||||
os.makedirs(outdir)
|
||||
elif clean and outdir != '/':
|
||||
print("Cleaning build directory...")
|
||||
shutil.rmtree(outdir)
|
||||
os.makedirs(outdir)
|
||||
|
||||
if rc:
|
||||
# If a release candidate, update the version information accordingly
|
||||
version = "{}rc{}".format(version, rc)
|
||||
|
||||
# Set architecture to something that Go expects
|
||||
if arch == 'i386':
|
||||
arch = '386'
|
||||
elif arch == 'x86_64':
|
||||
arch = 'amd64'
|
||||
|
||||
print("Starting build...")
|
||||
for b, c in targets.items():
|
||||
print("\t- Building '{}'...".format(os.path.join(outdir, b)))
|
||||
build_command = ""
|
||||
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
|
||||
if arch == "arm" and goarm_version:
|
||||
if goarm_version not in ["5", "6", "7", "arm64"]:
|
||||
print("!! Invalid ARM build version: {}".format(goarm_version))
|
||||
build_command += "GOARM={} ".format(goarm_version)
|
||||
build_command += "go build -o {} ".format(os.path.join(outdir, b))
|
||||
if race:
|
||||
build_command += "-race "
|
||||
go_version = get_go_version()
|
||||
if "1.4" in go_version:
|
||||
build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version {} ".format(version)
|
||||
build_command += "-X main.Branch {} ".format(get_current_branch())
|
||||
build_command += "-X main.Commit {}\" ".format(get_current_commit())
|
||||
else:
|
||||
build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version={} ".format(version)
|
||||
build_command += "-X main.Branch={} ".format(get_current_branch())
|
||||
build_command += "-X main.Commit={}\" ".format(get_current_commit())
|
||||
build_command += c
|
||||
run(build_command, shell=True)
|
||||
print("")
|
||||
|
||||
def create_dir(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as e:
|
||||
print(e)
|
||||
|
||||
def rename_file(fr, to):
|
||||
try:
|
||||
os.rename(fr, to)
|
||||
except OSError as e:
|
||||
print(e)
|
||||
# Return the original filename
|
||||
return fr
|
||||
else:
|
||||
# Return the new filename
|
||||
return to
|
||||
|
||||
def copy_file(fr, to):
|
||||
try:
|
||||
shutil.copy(fr, to)
|
||||
except OSError as e:
|
||||
print(e)
|
||||
|
||||
def create_package_fs(build_root):
|
||||
print("\t- Creating a filesystem hierarchy from directory: {}".format(build_root))
|
||||
# Using [1:] for the path names due to them being absolute
|
||||
# (will overwrite previous paths, per 'os.path.join' documentation)
|
||||
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
|
||||
for d in dirs:
|
||||
create_dir(os.path.join(build_root, d))
|
||||
os.chmod(os.path.join(build_root, d), 0o755)
|
||||
|
||||
def package_scripts(build_root):
|
||||
print("\t- Copying scripts and sample configuration to build directory")
|
||||
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
|
||||
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
|
||||
|
||||
def go_get(update=False):
|
||||
get_command = None
|
||||
if update:
|
||||
get_command = "go get -u -f -d ./..."
|
||||
else:
|
||||
get_command = "go get -d ./..."
|
||||
print("Retrieving Go dependencies...")
|
||||
run(get_command)
|
||||
|
||||
def generate_md5_from_file(path):
|
||||
m = hashlib.md5()
|
||||
with open(path, 'rb') as f:
|
||||
while True:
|
||||
data = f.read(4096)
|
||||
if not data:
|
||||
break
|
||||
m.update(data)
|
||||
return m.hexdigest()
|
||||
|
||||
def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
|
||||
outfiles = []
|
||||
tmp_build_dir = create_temp_dir()
|
||||
if debug:
|
||||
print("[DEBUG] build_output = {}".format(build_output))
|
||||
try:
|
||||
print("-------------------------")
|
||||
print("")
|
||||
print("Packaging...")
|
||||
for p in build_output:
|
||||
# Create top-level folder displaying which platform (linux, etc)
|
||||
create_dir(os.path.join(tmp_build_dir, p))
|
||||
for a in build_output[p]:
|
||||
current_location = build_output[p][a]
|
||||
# Create second-level directory displaying the architecture (amd64, etc)p
|
||||
build_root = os.path.join(tmp_build_dir, p, a)
|
||||
# Create directory tree to mimic file system of package
|
||||
create_dir(build_root)
|
||||
create_package_fs(build_root)
|
||||
# Copy in packaging and miscellaneous scripts
|
||||
package_scripts(build_root)
|
||||
# Copy newly-built binaries to packaging directory
|
||||
for b in targets:
|
||||
if p == 'windows':
|
||||
b = b + '.exe'
|
||||
fr = os.path.join(current_location, b)
|
||||
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b)
|
||||
print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to))
|
||||
copy_file(fr, to)
|
||||
# Package the directory structure
|
||||
for package_type in supported_packages[p]:
|
||||
print("\t- Packaging directory '{}' as '{}'...".format(build_root, package_type))
|
||||
name = "telegraf"
|
||||
# Reset version, iteration, and current location on each run
|
||||
# since they may be modified below.
|
||||
package_version = version
|
||||
package_iteration = iteration
|
||||
current_location = build_output[p][a]
|
||||
|
||||
if package_type in ['zip', 'tar']:
|
||||
if nightly:
|
||||
name = '{}-nightly_{}_{}'.format(name, p, a)
|
||||
else:
|
||||
name = '{}-{}-{}_{}_{}'.format(name, package_version, package_iteration, p, a)
|
||||
if package_type == 'tar':
|
||||
# Add `tar.gz` to path to reduce package size
|
||||
current_location = os.path.join(current_location, name + '.tar.gz')
|
||||
if rc is not None:
|
||||
package_iteration = "0.rc{}".format(rc)
|
||||
if a == '386':
|
||||
a = 'i386'
|
||||
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
|
||||
fpm_common_args,
|
||||
name,
|
||||
a,
|
||||
package_type,
|
||||
package_version,
|
||||
package_iteration,
|
||||
build_root,
|
||||
current_location)
|
||||
if package_type == "rpm":
|
||||
fpm_command += "--depends coreutils "
|
||||
fpm_command += "--depends lsof"
|
||||
out = run(fpm_command, shell=True)
|
||||
matches = re.search(':path=>"(.*)"', out)
|
||||
outfile = None
|
||||
if matches is not None:
|
||||
outfile = matches.groups()[0]
|
||||
if outfile is None:
|
||||
print("[ COULD NOT DETERMINE OUTPUT ]")
|
||||
else:
|
||||
# Strip nightly version (the unix epoch) from filename
|
||||
if nightly and package_type in ['deb', 'rpm']:
|
||||
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
|
||||
outfiles.append(os.path.join(os.getcwd(), outfile))
|
||||
# Display MD5 hash for generated package
|
||||
print("\t\tMD5 = {}".format(generate_md5_from_file(outfile)))
|
||||
print("")
|
||||
if debug:
|
||||
print("[DEBUG] package outfiles: {}".format(outfiles))
|
||||
return outfiles
|
||||
finally:
|
||||
# Cleanup
|
||||
shutil.rmtree(tmp_build_dir)
|
||||
|
||||
def print_usage():
|
||||
print("Usage: ./build.py [options]")
|
||||
print("")
|
||||
print("Options:")
|
||||
print("\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build.")
|
||||
print("\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all")
|
||||
print("\t --goarm=<arm version> \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6")
|
||||
print("\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all")
|
||||
print("\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.")
|
||||
print("\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP).")
|
||||
print("\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP).")
|
||||
print("\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information).")
|
||||
print("\t --iteration=<iteration number> \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise).")
|
||||
print("\t --race \n\t\t- Whether the produced build should have race detection enabled.")
|
||||
print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).")
|
||||
print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).")
|
||||
print("\t --update \n\t\t- Whether dependencies should be updated prior to building.")
|
||||
print("\t --test \n\t\t- Run Go tests. Will not produce a build.")
|
||||
print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.")
|
||||
print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.")
|
||||
print("\t --clean \n\t\t- Clean the build output directory prior to creating build.")
|
||||
print("\t --no-get \n\t\t- Do not run `go get` before building.")
|
||||
print("\t --bucket=<S3 bucket>\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).")
|
||||
print("\t --debug \n\t\t- Displays debug output.")
|
||||
print("")
|
||||
|
||||
def print_package_summary(packages):
|
||||
print(packages)
|
||||
|
||||
def main():
|
||||
# Command-line arguments
|
||||
outdir = "build"
|
||||
commit = None
|
||||
target_platform = None
|
||||
target_arch = None
|
||||
nightly = False
|
||||
race = False
|
||||
branch = None
|
||||
version = get_current_version()
|
||||
rc = None
|
||||
package = False
|
||||
update = False
|
||||
clean = False
|
||||
upload = False
|
||||
test = False
|
||||
parallel = None
|
||||
timeout = None
|
||||
iteration = 1
|
||||
no_vet = False
|
||||
goarm_version = "6"
|
||||
run_get = True
|
||||
upload_bucket = None
|
||||
global debug
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
if '--outdir' in arg:
|
||||
# Output directory. If none is specified, then builds will be placed in the same directory.
|
||||
output_dir = arg.split("=")[1]
|
||||
if '--commit' in arg:
|
||||
# Commit to build from. If none is specified, then it will build from the most recent commit.
|
||||
commit = arg.split("=")[1]
|
||||
if '--branch' in arg:
|
||||
# Branch to build from. If none is specified, then it will build from the current branch.
|
||||
branch = arg.split("=")[1]
|
||||
elif '--arch' in arg:
|
||||
# Target architecture. If none is specified, then it will build for the current arch.
|
||||
target_arch = arg.split("=")[1]
|
||||
elif '--platform' in arg:
|
||||
# Target platform. If none is specified, then it will build for the current platform.
|
||||
target_platform = arg.split("=")[1]
|
||||
elif '--version' in arg:
|
||||
# Version to assign to this build (0.9.5, etc)
|
||||
version = arg.split("=")[1]
|
||||
elif '--rc' in arg:
|
||||
# Signifies that this is a release candidate build.
|
||||
rc = arg.split("=")[1]
|
||||
elif '--race' in arg:
|
||||
# Signifies that race detection should be enabled.
|
||||
race = True
|
||||
elif '--package' in arg:
|
||||
# Signifies that packages should be built.
|
||||
package = True
|
||||
elif '--nightly' in arg:
|
||||
# Signifies that this is a nightly build.
|
||||
nightly = True
|
||||
elif '--update' in arg:
|
||||
# Signifies that dependencies should be updated.
|
||||
update = True
|
||||
elif '--upload' in arg:
|
||||
# Signifies that the resulting packages should be uploaded to S3
|
||||
upload = True
|
||||
elif '--test' in arg:
|
||||
# Run tests and exit
|
||||
test = True
|
||||
elif '--parallel' in arg:
|
||||
# Set parallel for tests.
|
||||
parallel = int(arg.split("=")[1])
|
||||
elif '--timeout' in arg:
|
||||
# Set timeout for tests.
|
||||
timeout = arg.split("=")[1]
|
||||
elif '--clean' in arg:
|
||||
# Signifies that the outdir should be deleted before building
|
||||
clean = True
|
||||
elif '--iteration' in arg:
|
||||
iteration = arg.split("=")[1]
|
||||
elif '--no-vet' in arg:
|
||||
no_vet = True
|
||||
elif '--goarm' in arg:
|
||||
# Signifies GOARM flag to pass to build command when compiling for ARM
|
||||
goarm_version = arg.split("=")[1]
|
||||
elif '--bucket' in arg:
|
||||
# The bucket to upload the packages to, relies on boto
|
||||
upload_bucket = arg.split("=")[1]
|
||||
elif '--no-get' in arg:
|
||||
run_get = False
|
||||
elif '--debug' in arg:
|
||||
print "[DEBUG] Using debug output"
|
||||
debug = True
|
||||
elif '--help' in arg:
|
||||
print_usage()
|
||||
return 0
|
||||
else:
|
||||
print("!! Unknown argument: {}".format(arg))
|
||||
print_usage()
|
||||
return 1
|
||||
|
||||
if nightly:
|
||||
if rc:
|
||||
print("!! Cannot be both nightly and a release candidate! Stopping.")
|
||||
return 1
|
||||
# In order to support nightly builds on the repository, we are adding the epoch timestamp
|
||||
# to the version so that version numbers are always greater than the previous nightly.
|
||||
version = "{}.n{}".format(version, int(time.time()))
|
||||
|
||||
# Pre-build checks
|
||||
check_environ()
|
||||
check_prereqs()
|
||||
|
||||
if not commit:
|
||||
commit = get_current_commit(short=True)
|
||||
if not branch:
|
||||
branch = get_current_branch()
|
||||
if not target_arch:
|
||||
if 'arm' in get_system_arch():
|
||||
# Prevent uname from reporting ARM arch (eg 'armv7l')
|
||||
target_arch = "arm"
|
||||
else:
|
||||
target_arch = get_system_arch()
|
||||
if not target_platform:
|
||||
target_platform = get_system_platform()
|
||||
if rc or nightly:
|
||||
# If a release candidate or nightly, set iteration to 0 (instead of 1)
|
||||
iteration = 0
|
||||
|
||||
if target_arch == '386':
|
||||
target_arch = 'i386'
|
||||
elif target_arch == 'x86_64':
|
||||
target_arch = 'amd64'
|
||||
|
||||
build_output = {}
|
||||
if test:
|
||||
if not run_tests(race, parallel, timeout, no_vet):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
if run_get:
|
||||
go_get(update=update)
|
||||
|
||||
platforms = []
|
||||
single_build = True
|
||||
if target_platform == 'all':
|
||||
platforms = list(supported_builds.keys())
|
||||
single_build = False
|
||||
else:
|
||||
platforms = [target_platform]
|
||||
|
||||
for platform in platforms:
|
||||
build_output.update( { platform : {} } )
|
||||
archs = []
|
||||
if target_arch == "all":
|
||||
single_build = False
|
||||
archs = supported_builds.get(platform)
|
||||
else:
|
||||
archs = [target_arch]
|
||||
for arch in archs:
|
||||
od = outdir
|
||||
if not single_build:
|
||||
od = os.path.join(outdir, platform, arch)
|
||||
build(version=version,
|
||||
branch=branch,
|
||||
commit=commit,
|
||||
platform=platform,
|
||||
arch=arch,
|
||||
nightly=nightly,
|
||||
rc=rc,
|
||||
race=race,
|
||||
clean=clean,
|
||||
outdir=od,
|
||||
goarm_version=goarm_version)
|
||||
build_output.get(platform).update( { arch : od } )
|
||||
|
||||
# Build packages
|
||||
if package:
|
||||
if not check_path_for("fpm"):
|
||||
print("!! Cannot package without command 'fpm'. Stopping.")
|
||||
return 1
|
||||
packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration)
|
||||
# Optionally upload to S3
|
||||
if upload:
|
||||
upload_packages(packages, bucket_name=upload_bucket, nightly=nightly)
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
@@ -4,12 +4,14 @@ machine:
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.5.2 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz
|
||||
- go version | grep 1.5.1 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.5.1.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
cache_directories:
|
||||
- "~/telegraf-build/src"
|
||||
override:
|
||||
- docker info
|
||||
|
||||
|
||||
@@ -7,235 +7,146 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
"github.com/influxdb/telegraf"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
_ "github.com/influxdb/telegraf/outputs/all"
|
||||
_ "github.com/influxdb/telegraf/plugins/all"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
var fConfig = flag.String("config", "", "configuration file to load")
|
||||
var fConfigDirectory = flag.String("config-directory", "",
|
||||
var fConfigDirectory = flag.String("configdirectory", "",
|
||||
"directory containing additional *.conf files")
|
||||
var fVersion = flag.Bool("version", false, "display the version")
|
||||
var fSampleConfig = flag.Bool("sample-config", false,
|
||||
"print out full sample configuration")
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
var fPLuginFilters = flag.String("filter", "",
|
||||
"filter the plugins to enable, separator is :")
|
||||
var fOutputFilters = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||
"directory containing additional *.conf files")
|
||||
|
||||
// Telegraf version
|
||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
||||
var Version string
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
`
|
||||
|
||||
func main() {
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Usage = usageExit
|
||||
flag.Parse()
|
||||
flag.Parse()
|
||||
|
||||
if flag.NFlag() == 0 {
|
||||
usageExit()
|
||||
}
|
||||
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
|
||||
if *fConfig != "" {
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Usage: Telegraf")
|
||||
flag.PrintDefaults()
|
||||
return
|
||||
}
|
||||
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := telegraf.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fQuiet {
|
||||
ag.Config.Agent.Quiet = true
|
||||
}
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
go func() {
|
||||
sig := <-signals
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
var pluginFilters []string
|
||||
if *fPLuginFilters != "" {
|
||||
pluginsFilter := strings.TrimSpace(*fPLuginFilters)
|
||||
pluginFilters = strings.Split(":"+pluginsFilter+":", ":")
|
||||
}
|
||||
}
|
||||
|
||||
func usageExit() {
|
||||
fmt.Println(usage)
|
||||
os.Exit(0)
|
||||
var outputFilters []string
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(pluginFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintPluginConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
|
||||
if *fConfig != "" {
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.PluginFilters = pluginFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Usage: Telegraf")
|
||||
flag.PrintDefaults()
|
||||
return
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Plugins) == 0 {
|
||||
log.Fatalf("Error: no plugins found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := telegraf.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
go func() {
|
||||
<-signals
|
||||
close(shutdown)
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded plugins: %s", strings.Join(c.PluginNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
@@ -1,18 +1,27 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
# declared plugins.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
@@ -40,6 +49,8 @@
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
[outputs]
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
@@ -65,46 +76,48 @@
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
# PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
[plugins]
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["time_*"]
|
||||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.disk]]
|
||||
[[plugins.disk]]
|
||||
# By default, telegraf gather stats for all mountpoints.
|
||||
# Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# mount_points=["/"]
|
||||
# Mountpoints=["/"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
[[plugins.diskio]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
# disk partitions.
|
||||
# Setting devices will restrict the stats to the specified devices.
|
||||
# devices = ["sda", "sdb"]
|
||||
# Setting devices will restrict the stats to the specified devcies.
|
||||
# Devices=["sda","sdb"]
|
||||
# Uncomment the following line if you do not need disk serial numbers.
|
||||
# skip_serial_number = true
|
||||
# SkipSerialNumber = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
[[plugins.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
[[plugins.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
[[plugins.system]]
|
||||
# no configuration
|
||||
|
||||
|
||||
###############################################################################
|
||||
# SERVICE INPUTS #
|
||||
# SERVICE PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
@@ -10,13 +10,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
"github.com/naoina/toml"
|
||||
"github.com/naoina/toml/ast"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
@@ -24,12 +25,12 @@ import (
|
||||
// specified
|
||||
type Config struct {
|
||||
Tags map[string]string
|
||||
InputFilters []string
|
||||
PluginFilters []string
|
||||
OutputFilters []string
|
||||
|
||||
Agent *AgentConfig
|
||||
Inputs []*models.RunningInput
|
||||
Outputs []*models.RunningOutput
|
||||
Plugins []*RunningPlugin
|
||||
Outputs []*RunningOutput
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
@@ -39,13 +40,14 @@ func NewConfig() *Config {
|
||||
Interval: internal.Duration{Duration: 10 * time.Second},
|
||||
RoundInterval: true,
|
||||
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
||||
FlushRetries: 2,
|
||||
FlushJitter: internal.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
|
||||
Tags: make(map[string]string),
|
||||
Inputs: make([]*models.RunningInput, 0),
|
||||
Outputs: make([]*models.RunningOutput, 0),
|
||||
InputFilters: make([]string, 0),
|
||||
Plugins: make([]*RunningPlugin, 0),
|
||||
Outputs: make([]*RunningOutput, 0),
|
||||
PluginFilters: make([]string, 0),
|
||||
OutputFilters: make([]string, 0),
|
||||
}
|
||||
return c
|
||||
@@ -59,25 +61,14 @@ type AgentConfig struct {
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// CollectionJitter is used to jitter the collection by a random amount.
|
||||
// Each plugin will sleep for a random time within jitter before collecting.
|
||||
// This can be used to avoid many plugins querying things like sysfs at the
|
||||
// same time, which can have a measurable effect on the system.
|
||||
CollectionJitter internal.Duration
|
||||
|
||||
// Interval at which to flush data
|
||||
FlushInterval internal.Duration
|
||||
|
||||
// FlushJitter Jitters the flush interval by a random amount.
|
||||
// This is primarily to avoid large write spikes for users running a large
|
||||
// number of telegraf instances.
|
||||
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
FlushJitter internal.Duration
|
||||
// FlushRetries is the number of times to retry each data flush
|
||||
FlushRetries int
|
||||
|
||||
// MetricBufferLimit is the max number of metrics that each output plugin
|
||||
// will cache. The buffer is cleared when a successful write occurs. When
|
||||
// full, the oldest metrics will be overwritten.
|
||||
MetricBufferLimit int
|
||||
// FlushJitter tells
|
||||
FlushJitter internal.Duration
|
||||
|
||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
||||
// valid for the agent config. Leaving them here for now for backwards-
|
||||
@@ -85,24 +76,144 @@ type AgentConfig struct {
|
||||
UTC bool `toml:"utc"`
|
||||
Precision string
|
||||
|
||||
// Debug is the option for running in debug mode
|
||||
Debug bool
|
||||
|
||||
// Quiet is the option for running in quiet mode
|
||||
Quiet bool
|
||||
// Option for running in debug mode
|
||||
Debug bool
|
||||
Hostname string
|
||||
}
|
||||
|
||||
// Inputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) InputNames() []string {
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
}
|
||||
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output outputs.Output
|
||||
Config *OutputConfig
|
||||
}
|
||||
|
||||
type RunningPlugin struct {
|
||||
Name string
|
||||
Plugin plugins.Plugin
|
||||
Config *PluginConfig
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
IsActive bool
|
||||
}
|
||||
|
||||
// PluginConfig containing a name, interval, and filter
|
||||
type PluginConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
// Filter returns filtered slice of client.Points based on whether filters
|
||||
// are active for this RunningOutput.
|
||||
func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point {
|
||||
if !ro.Config.Filter.IsActive {
|
||||
return points
|
||||
}
|
||||
|
||||
var filteredPoints []*client.Point
|
||||
for i := range points {
|
||||
if !ro.Config.Filter.ShouldPass(points[i].Name()) || !ro.Config.Filter.ShouldTagsPass(points[i].Tags()) {
|
||||
continue
|
||||
}
|
||||
filteredPoints = append(filteredPoints, points[i])
|
||||
}
|
||||
return filteredPoints
|
||||
}
|
||||
|
||||
// ShouldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldPass(fieldkey string) bool {
|
||||
if f.Pass != nil {
|
||||
for _, pat := range f.Pass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.Drop != nil {
|
||||
for _, pat := range f.Drop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Plugins returns a list of strings of the configured plugins.
|
||||
func (c *Config) PluginNames() []string {
|
||||
var name []string
|
||||
for _, input := range c.Inputs {
|
||||
name = append(name, input.Name)
|
||||
for _, plugin := range c.Plugins {
|
||||
name = append(name, plugin.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured inputs.
|
||||
// Outputs returns a list of strings of the configured plugins.
|
||||
func (c *Config) OutputNames() []string {
|
||||
var name []string
|
||||
for _, output := range c.Outputs {
|
||||
@@ -128,37 +239,36 @@ func (c *Config) ListTags() string {
|
||||
var header = `# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
# declared plugins.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all inputs
|
||||
# Default data collection interval for all plugins
|
||||
interval = "10s"
|
||||
# Rounds collection interval to 'interval'
|
||||
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
# Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
# flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
# Collection jitter is used to jitter the collection by a random amount.
|
||||
# Each plugin will sleep for a random time within jitter before collecting.
|
||||
# This can be used to avoid many plugins querying things like sysfs at the
|
||||
# same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
# Default data flushing interval for all outputs. You should not set this below
|
||||
# interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
@@ -169,8 +279,6 @@ var header = `# Telegraf configuration
|
||||
|
||||
# Run telegraf in debug mode
|
||||
debug = false
|
||||
# Run telegraf in quiet mode
|
||||
quiet = false
|
||||
# Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
@@ -179,20 +287,22 @@ var header = `# Telegraf configuration
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
[outputs]
|
||||
`
|
||||
|
||||
var pluginHeader = `
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
# PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
[plugins]
|
||||
`
|
||||
|
||||
var serviceInputHeader = `
|
||||
var servicePluginHeader = `
|
||||
|
||||
###############################################################################
|
||||
# SERVICE INPUTS #
|
||||
# SERVICE PLUGINS #
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
@@ -216,35 +326,35 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
|
||||
printConfig(oname, output, "outputs")
|
||||
}
|
||||
|
||||
// Filter inputs
|
||||
// Filter plugins
|
||||
var pnames []string
|
||||
for pname := range inputs.Inputs {
|
||||
for pname := range plugins.Plugins {
|
||||
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
|
||||
// Print Inputs
|
||||
// Print Plugins
|
||||
fmt.Printf(pluginHeader)
|
||||
servInputs := make(map[string]inputs.ServiceInput)
|
||||
servPlugins := make(map[string]plugins.ServicePlugin)
|
||||
for _, pname := range pnames {
|
||||
creator := inputs.Inputs[pname]
|
||||
input := creator()
|
||||
creator := plugins.Plugins[pname]
|
||||
plugin := creator()
|
||||
|
||||
switch p := input.(type) {
|
||||
case inputs.ServiceInput:
|
||||
servInputs[pname] = p
|
||||
switch p := plugin.(type) {
|
||||
case plugins.ServicePlugin:
|
||||
servPlugins[pname] = p
|
||||
continue
|
||||
}
|
||||
|
||||
printConfig(pname, input, "inputs")
|
||||
printConfig(pname, plugin, "plugins")
|
||||
}
|
||||
|
||||
// Print Service Inputs
|
||||
fmt.Printf(serviceInputHeader)
|
||||
for name, input := range servInputs {
|
||||
printConfig(name, input, "inputs")
|
||||
// Print Service Plugins
|
||||
fmt.Printf(servicePluginHeader)
|
||||
for name, plugin := range servPlugins {
|
||||
printConfig(name, plugin, "plugins")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -272,12 +382,12 @@ func sliceContains(name string, list []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PrintInputConfig prints the config usage of a single input.
|
||||
func PrintInputConfig(name string) error {
|
||||
if creator, ok := inputs.Inputs[name]; ok {
|
||||
printConfig(name, creator(), "inputs")
|
||||
// PrintPluginConfig prints the config usage of a single plugin.
|
||||
func PrintPluginConfig(name string) error {
|
||||
if creator, ok := plugins.Plugins[name]; ok {
|
||||
printConfig(name, creator(), "plugins")
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Input %s not found", name))
|
||||
return errors.New(fmt.Sprintf("Plugin %s not found", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -315,7 +425,12 @@ func (c *Config) LoadDirectory(path string) error {
|
||||
|
||||
// LoadConfig loads the given config file and applies it to c
|
||||
func (c *Config) LoadConfig(path string) error {
|
||||
tbl, err := config.ParseFile(path)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tbl, err := toml.Parse(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -328,25 +443,43 @@ func (c *Config) LoadConfig(path string) error {
|
||||
|
||||
switch name {
|
||||
case "agent":
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("Could not parse [agent] config\n")
|
||||
return err
|
||||
}
|
||||
case "tags":
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("Could not parse [tags] config\n")
|
||||
return err
|
||||
}
|
||||
case "outputs":
|
||||
for outputName, outputVal := range subTable.Fields {
|
||||
switch outputSubTable := outputVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addOutput(outputName, outputSubTable); err != nil {
|
||||
return err
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range outputSubTable {
|
||||
if err = c.addOutput(outputName, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s",
|
||||
outputName)
|
||||
}
|
||||
}
|
||||
case "plugins":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
|
||||
if err = c.addPlugin(pluginName, pluginSubTable); err != nil {
|
||||
return err
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addOutput(pluginName, t); err != nil {
|
||||
if err = c.addPlugin(pluginName, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -355,28 +488,10 @@ func (c *Config) LoadConfig(path string) error {
|
||||
pluginName)
|
||||
}
|
||||
}
|
||||
case "inputs", "plugins":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addInput(pluginName, pluginSubTable); err != nil {
|
||||
return err
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addInput(pluginName, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s",
|
||||
pluginName)
|
||||
}
|
||||
}
|
||||
// Assume it's an input input for legacy config file support if no other
|
||||
// Assume it's a plugin for legacy config file support if no other
|
||||
// identifiers are present
|
||||
default:
|
||||
if err = c.addInput(name, subTable); err != nil {
|
||||
if err = c.addPlugin(name, subTable); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -399,57 +514,57 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, output); err != nil {
|
||||
if err := toml.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ro := models.NewRunningOutput(name, output, outputConfig)
|
||||
if c.Agent.MetricBufferLimit > 0 {
|
||||
ro.PointBufferLimit = c.Agent.MetricBufferLimit
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
Output: output,
|
||||
Config: outputConfig,
|
||||
}
|
||||
ro.Quiet = c.Agent.Quiet
|
||||
c.Outputs = append(c.Outputs, ro)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
|
||||
func (c *Config) addPlugin(name string, table *ast.Table) error {
|
||||
if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) {
|
||||
return nil
|
||||
}
|
||||
// Legacy support renaming io input to diskio
|
||||
// Legacy support renaming io plugin to diskio
|
||||
if name == "io" {
|
||||
name = "diskio"
|
||||
}
|
||||
|
||||
creator, ok := inputs.Inputs[name]
|
||||
creator, ok := plugins.Plugins[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested input: %s", name)
|
||||
return fmt.Errorf("Undefined but requested plugin: %s", name)
|
||||
}
|
||||
input := creator()
|
||||
plugin := creator()
|
||||
|
||||
pluginConfig, err := buildInput(name, table)
|
||||
pluginConfig, err := buildPlugin(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, input); err != nil {
|
||||
if err := toml.UnmarshalTable(table, plugin); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &models.RunningInput{
|
||||
rp := &RunningPlugin{
|
||||
Name: name,
|
||||
Input: input,
|
||||
Plugin: plugin,
|
||||
Config: pluginConfig,
|
||||
}
|
||||
c.Inputs = append(c.Inputs, rp)
|
||||
c.Plugins = append(c.Plugins, rp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to
|
||||
// be inserted into the models.OutputConfig/models.InputConfig to be used for prefix
|
||||
// be inserted into the OutputConfig/PluginConfig to be used for prefix
|
||||
// filtering on tags and measurements
|
||||
func buildFilter(tbl *ast.Table) models.Filter {
|
||||
f := models.Filter{}
|
||||
func buildFilter(tbl *ast.Table) Filter {
|
||||
f := Filter{}
|
||||
|
||||
if node, ok := tbl.Fields["pass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
@@ -481,7 +596,7 @@ func buildFilter(tbl *ast.Table) models.Filter {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &models.TagFilter{Name: name}
|
||||
tagfilter := &TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
@@ -500,7 +615,7 @@ func buildFilter(tbl *ast.Table) models.Filter {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &models.TagFilter{Name: name}
|
||||
tagfilter := &TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
@@ -522,11 +637,11 @@ func buildFilter(tbl *ast.Table) models.Filter {
|
||||
return f
|
||||
}
|
||||
|
||||
// buildInput parses input specific items from the ast.Table,
|
||||
// buildPlugin parses plugin specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// models.InputConfig to be inserted into models.RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
cp := &models.InputConfig{Name: name}
|
||||
// PluginConfig to be inserted into RunningPlugin
|
||||
func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
|
||||
cp := &PluginConfig{Name: name}
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
@@ -567,8 +682,8 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for plugin %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -583,10 +698,10 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
}
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
|
||||
// models.OutputConfig to be inserted into models.RunningInput
|
||||
// OutputConfig to be inserted into RunningPlugin
|
||||
// Note: error exists in the return for future calls that might require error
|
||||
func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
|
||||
oc := &models.OutputConfig{
|
||||
func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) {
|
||||
oc := &OutputConfig{
|
||||
Name: name,
|
||||
Filter: buildFilter(tbl),
|
||||
}
|
||||
|
||||
@@ -4,34 +4,33 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/influxdb/telegraf/plugins/exec"
|
||||
"github.com/influxdb/telegraf/plugins/memcached"
|
||||
"github.com/influxdb/telegraf/plugins/procstat"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
func TestConfig_LoadSinglePlugin(t *testing.T) {
|
||||
c := NewConfig()
|
||||
c.LoadConfig("./testdata/single_plugin.toml")
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &models.InputConfig{
|
||||
mConfig := &PluginConfig{
|
||||
Name: "memcached",
|
||||
Filter: models.Filter{
|
||||
Filter: Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
@@ -40,11 +39,10 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
},
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
@@ -59,22 +57,22 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &models.InputConfig{
|
||||
mConfig := &PluginConfig{
|
||||
Name: "memcached",
|
||||
Filter: models.Filter{
|
||||
Filter: Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []models.TagFilter{
|
||||
models.TagFilter{
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
@@ -83,39 +81,216 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
},
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &models.InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
ex := plugins.Plugins["exec"]().(*exec.Exec)
|
||||
ex.Commands = []*exec.Command{
|
||||
&exec.Command{
|
||||
Command: "/usr/bin/myothercollector --foo=bar",
|
||||
Name: "myothercollector",
|
||||
},
|
||||
}
|
||||
eConfig.Tags = make(map[string]string)
|
||||
assert.Equal(t, ex, c.Inputs[1].Input,
|
||||
eConfig := &PluginConfig{Name: "exec"}
|
||||
assert.Equal(t, ex, c.Plugins[1].Plugin,
|
||||
"Merged Testdata did not produce a correct exec struct.")
|
||||
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
||||
assert.Equal(t, eConfig, c.Plugins[1].Config,
|
||||
"Merged Testdata did not produce correct exec metadata.")
|
||||
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
assert.Equal(t, memcached, c.Inputs[2].Input,
|
||||
assert.Equal(t, memcached, c.Plugins[2].Plugin,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[2].Config,
|
||||
assert.Equal(t, mConfig, c.Plugins[2].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
pstat := plugins.Plugins["procstat"]().(*procstat.Procstat)
|
||||
pstat.Specifications = []*procstat.Specification{
|
||||
&procstat.Specification{
|
||||
PidFile: "/var/run/grafana-server.pid",
|
||||
},
|
||||
&procstat.Specification{
|
||||
PidFile: "/var/run/influxdb/influxd.pid",
|
||||
},
|
||||
}
|
||||
|
||||
pConfig := &models.InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
pConfig := &PluginConfig{Name: "procstat"}
|
||||
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
assert.Equal(t, pstat, c.Plugins[3].Plugin,
|
||||
"Merged Testdata did not produce a correct procstat struct.")
|
||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||
assert.Equal(t, pConfig, c.Plugins[3].Config,
|
||||
"Merged Testdata did not produce correct procstat metadata.")
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Pass(t *testing.T) {
|
||||
f := Filter{
|
||||
Pass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Drop(t *testing.T) {
|
||||
f := Filter{
|
||||
Drop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
6
internal/config/testdata/single_plugin.toml
vendored
6
internal/config/testdata/single_plugin.toml
vendored
@@ -1,9 +1,9 @@
|
||||
[[inputs.memcached]]
|
||||
[[plugins.memcached]]
|
||||
servers = ["localhost"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
[plugins.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
[plugins.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
|
||||
8
internal/config/testdata/subconfig/exec.conf
vendored
8
internal/config/testdata/subconfig/exec.conf
vendored
@@ -1,4 +1,8 @@
|
||||
[[inputs.exec]]
|
||||
[[plugins.exec]]
|
||||
# specify commands via an array of tables
|
||||
[[plugins.exec.commands]]
|
||||
# the command to run
|
||||
command = "/usr/bin/myothercollector --foo=bar"
|
||||
name_suffix = "_myothercollector"
|
||||
|
||||
# name of the command (used as a prefix for measurements)
|
||||
name = "myothercollector"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
[[inputs.memcached]]
|
||||
[[plugins.memcached]]
|
||||
servers = ["192.168.1.1"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
[plugins.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
[plugins.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
[[inputs.procstat]]
|
||||
[[plugins.procstat]]
|
||||
[[plugins.procstat.specifications]]
|
||||
pid_file = "/var/run/grafana-server.pid"
|
||||
[[plugins.procstat.specifications]]
|
||||
pid_file = "/var/run/influxdb/influxd.pid"
|
||||
|
||||
121
internal/config/testdata/telegraf-agent.toml
vendored
121
internal/config/testdata/telegraf-agent.toml
vendored
@@ -1,7 +1,7 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
# declared plugins.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
@@ -21,13 +21,20 @@
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[tags]
|
||||
dc = "us-east-1"
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all plugins
|
||||
interval = "10s"
|
||||
|
||||
# If utc = false, uses local time (utc is highly recommended)
|
||||
utc = true
|
||||
|
||||
# Precision of writes, valid values are n, u, ms, s, m, and h
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
# run telegraf in debug mode
|
||||
debug = false
|
||||
|
||||
@@ -39,6 +46,8 @@
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
[outputs]
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
@@ -49,6 +58,17 @@
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
||||
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# If not provided, will default to 0 (no timeout)
|
||||
# timeout = "5s"
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = ["udp://localhost:8089"]
|
||||
database = "udp-telegraf"
|
||||
@@ -68,13 +88,15 @@
|
||||
# PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
[plugins]
|
||||
|
||||
# Read Apache status information (mod_status)
|
||||
[[inputs.apache]]
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
[[plugins.apache]]
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[inputs.cpu]]
|
||||
[[plugins.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
@@ -83,11 +105,11 @@
|
||||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.diskio]]
|
||||
[[plugins.diskio]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many disque servers
|
||||
[[inputs.disque]]
|
||||
[[plugins.disque]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
@@ -96,7 +118,7 @@
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read stats from one or more Elasticsearch servers or clusters
|
||||
[[inputs.elasticsearch]]
|
||||
[[plugins.elasticsearch]]
|
||||
# specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
@@ -105,13 +127,17 @@
|
||||
local = true
|
||||
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
[[plugins.exec]]
|
||||
# specify commands via an array of tables
|
||||
[[exec.commands]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
# name of the command (used as a prefix for measurements)
|
||||
name = "mycollector"
|
||||
|
||||
# Read metrics of haproxy, via socket or csv stats page
|
||||
[[inputs.haproxy]]
|
||||
[[plugins.haproxy]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
#
|
||||
@@ -121,30 +147,33 @@
|
||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||
|
||||
# Read flattened metrics from one or more JSON HTTP endpoints
|
||||
[[inputs.httpjson]]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
[[plugins.httpjson]]
|
||||
# Specify services via an array of tables
|
||||
[[httpjson.services]]
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[httpjson.services.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
[[plugins.io]]
|
||||
# no configuration
|
||||
|
||||
# read metrics from a Kafka topic
|
||||
[[inputs.kafka_consumer]]
|
||||
[[plugins.kafka_consumer]]
|
||||
# topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
@@ -157,7 +186,7 @@
|
||||
offset = "oldest"
|
||||
|
||||
# Read metrics from a LeoFS Server via SNMP
|
||||
[[inputs.leofs]]
|
||||
[[plugins.leofs]]
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
#
|
||||
@@ -165,7 +194,7 @@
|
||||
servers = ["127.0.0.1:4021"]
|
||||
|
||||
# Read metrics from local Lustre service on OST, MDS
|
||||
[[inputs.lustre2]]
|
||||
[[plugins.lustre2]]
|
||||
# An array of /proc globs to search for Lustre stats
|
||||
# If not specified, the default will work on Lustre 2.5.x
|
||||
#
|
||||
@@ -173,11 +202,11 @@
|
||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
[[plugins.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many memcached servers
|
||||
[[inputs.memcached]]
|
||||
[[plugins.memcached]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||
#
|
||||
@@ -185,7 +214,7 @@
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics from one or many MongoDB servers
|
||||
[[inputs.mongodb]]
|
||||
[[plugins.mongodb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
@@ -194,7 +223,7 @@
|
||||
servers = ["127.0.0.1:27017"]
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[[inputs.mysql]]
|
||||
[[plugins.mysql]]
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g.
|
||||
@@ -205,7 +234,7 @@
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[[inputs.net]]
|
||||
[[plugins.net]]
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
@@ -213,12 +242,12 @@
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
[[inputs.nginx]]
|
||||
[[plugins.nginx]]
|
||||
# An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/status"]
|
||||
|
||||
# Ping given url(s) and return statistics
|
||||
[[inputs.ping]]
|
||||
[[plugins.ping]]
|
||||
# urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
# number of pings to send (ping -c <COUNT>)
|
||||
@@ -231,7 +260,10 @@
|
||||
interface = ""
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[[inputs.postgresql]]
|
||||
[[plugins.postgresql]]
|
||||
# specify servers via an array of tables
|
||||
[[postgresql.servers]]
|
||||
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
@@ -258,13 +290,14 @@
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many prometheus clients
|
||||
[[inputs.prometheus]]
|
||||
[[plugins.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
|
||||
# Read metrics from one or many RabbitMQ servers via the management API
|
||||
[[inputs.rabbitmq]]
|
||||
[[plugins.rabbitmq]]
|
||||
# Specify servers via an array of tables
|
||||
[[rabbitmq.servers]]
|
||||
# name = "rmq-server-1" # optional tag
|
||||
# url = "http://localhost:15672"
|
||||
# username = "guest"
|
||||
@@ -275,7 +308,7 @@
|
||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[[inputs.redis]]
|
||||
[[plugins.redis]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
@@ -284,7 +317,7 @@
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics from one or many RethinkDB servers
|
||||
[[inputs.rethinkdb]]
|
||||
[[plugins.rethinkdb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
||||
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
@@ -293,9 +326,9 @@
|
||||
servers = ["127.0.0.1:28015"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
[[plugins.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
[[plugins.system]]
|
||||
# no configuration
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -50,17 +49,9 @@ func (f *JSONFlattener) FlattenJSON(
|
||||
return err
|
||||
}
|
||||
}
|
||||
case []interface{}:
|
||||
for i, v := range t {
|
||||
k := strconv.Itoa(i)
|
||||
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
f.Fields[fieldname] = t
|
||||
case bool, string, nil:
|
||||
case bool, string, []interface{}:
|
||||
// ignored types
|
||||
return nil
|
||||
default:
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
IsActive bool
|
||||
}
|
||||
|
||||
func (f Filter) ShouldPointPass(point *client.Point) bool {
|
||||
if f.ShouldPass(point.Name()) && f.ShouldTagsPass(point.Tags()) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldPass(key string) bool {
|
||||
if f.Pass != nil {
|
||||
for _, pat := range f.Pass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.Drop != nil {
|
||||
for _, pat := range f.Drop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Pass(t *testing.T) {
|
||||
f := Filter{
|
||||
Pass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Drop(t *testing.T) {
|
||||
f := Filter{
|
||||
Drop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input inputs.Input
|
||||
Config *InputConfig
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
type InputConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
const DEFAULT_POINT_BUFFER_LIMIT = 10000
|
||||
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output outputs.Output
|
||||
Config *OutputConfig
|
||||
Quiet bool
|
||||
PointBufferLimit int
|
||||
|
||||
points []*client.Point
|
||||
overwriteCounter int
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
name string,
|
||||
output outputs.Output,
|
||||
conf *OutputConfig,
|
||||
) *RunningOutput {
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
points: make([]*client.Point, 0),
|
||||
Output: output,
|
||||
Config: conf,
|
||||
PointBufferLimit: DEFAULT_POINT_BUFFER_LIMIT,
|
||||
}
|
||||
return ro
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) AddPoint(point *client.Point) {
|
||||
if ro.Config.Filter.IsActive {
|
||||
if !ro.Config.Filter.ShouldPointPass(point) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(ro.points) < ro.PointBufferLimit {
|
||||
ro.points = append(ro.points, point)
|
||||
} else {
|
||||
if ro.overwriteCounter == len(ro.points) {
|
||||
ro.overwriteCounter = 0
|
||||
}
|
||||
ro.points[ro.overwriteCounter] = point
|
||||
ro.overwriteCounter++
|
||||
}
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) Write() error {
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(ro.points)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Wrote %d metrics to output %s in %s\n",
|
||||
len(ro.points), ro.Name, elapsed)
|
||||
}
|
||||
ro.points = make([]*client.Point, 0)
|
||||
ro.overwriteCounter = 0
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
16
outputs/all/all.go
Normal file
16
outputs/all/all.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/outputs/amon"
|
||||
_ "github.com/influxdb/telegraf/outputs/amqp"
|
||||
_ "github.com/influxdb/telegraf/outputs/datadog"
|
||||
_ "github.com/influxdb/telegraf/outputs/influxdb"
|
||||
_ "github.com/influxdb/telegraf/outputs/kafka"
|
||||
_ "github.com/influxdb/telegraf/outputs/kinesis"
|
||||
_ "github.com/influxdb/telegraf/outputs/librato"
|
||||
_ "github.com/influxdb/telegraf/outputs/mqtt"
|
||||
_ "github.com/influxdb/telegraf/outputs/nsq"
|
||||
_ "github.com/influxdb/telegraf/outputs/opentsdb"
|
||||
_ "github.com/influxdb/telegraf/outputs/prometheus_client"
|
||||
_ "github.com/influxdb/telegraf/outputs/riemann"
|
||||
)
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Amon struct {
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
func TestBuildPoint(t *testing.T) {
|
||||
@@ -18,7 +18,7 @@ func TestBuildPoint(t *testing.T) {
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testutil.TestPoint(float64(0.0), "testpt"),
|
||||
testutil.TestPoint(float64(0.0)),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
@@ -26,7 +26,7 @@ func TestBuildPoint(t *testing.T) {
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(float64(1.0), "testpt"),
|
||||
testutil.TestPoint(float64(1.0)),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
1.0,
|
||||
@@ -34,7 +34,7 @@ func TestBuildPoint(t *testing.T) {
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int(10), "testpt"),
|
||||
testutil.TestPoint(int(10)),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
10.0,
|
||||
@@ -42,7 +42,7 @@ func TestBuildPoint(t *testing.T) {
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int32(112345), "testpt"),
|
||||
testutil.TestPoint(int32(112345)),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
@@ -50,7 +50,7 @@ func TestBuildPoint(t *testing.T) {
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(int64(112345), "testpt"),
|
||||
testutil.TestPoint(int64(112345)),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
112345.0,
|
||||
@@ -58,7 +58,7 @@ func TestBuildPoint(t *testing.T) {
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint(float32(11234.5), "testpt"),
|
||||
testutil.TestPoint(float32(11234.5)),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
@@ -66,7 +66,7 @@ func TestBuildPoint(t *testing.T) {
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestPoint("11234.5", "testpt"),
|
||||
testutil.TestPoint("11234.5"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
11234.5,
|
||||
@@ -75,16 +75,15 @@ func TestBuildPoint(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
pt, err := buildPoints(tt.ptIn)
|
||||
pt, err := buildPoint(tt.ptIn)
|
||||
if err != nil && tt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
||||
}
|
||||
if tt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
||||
tt.ptIn.Name(), tt.outPt, pt["value"])
|
||||
if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,16 +2,13 @@ package amqp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/streadway/amqp"
|
||||
)
|
||||
|
||||
@@ -20,12 +17,6 @@ type AMQP struct {
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
// path to CA file
|
||||
SslCa string
|
||||
// path to host cert file
|
||||
SslCert string
|
||||
// path to cert key file
|
||||
SslKey string
|
||||
// Routing Key Tag
|
||||
RoutingTag string `toml:"routing_tag"`
|
||||
// InfluxDB database
|
||||
@@ -55,11 +46,6 @@ var sampleConfig = `
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
# Use ssl
|
||||
#ssl_ca = "/etc/telegraf/ca.pem"
|
||||
#ssl_cert = "/etc/telegraf/cert.pem"
|
||||
#ssl_key = "/etc/telegraf/key.pem"
|
||||
|
||||
# InfluxDB retention policy
|
||||
#retention_policy = "default"
|
||||
# InfluxDB database
|
||||
@@ -78,32 +64,7 @@ func (q *AMQP) Connect() error {
|
||||
"retention_policy": q.RetentionPolicy,
|
||||
}
|
||||
|
||||
var connection *amqp.Connection
|
||||
var err error
|
||||
if q.SslCert != "" && q.SslKey != "" {
|
||||
// make new tls config
|
||||
cfg := new(tls.Config)
|
||||
if q.SslCa != "" {
|
||||
// create ca pool
|
||||
cfg.RootCAs = x509.NewCertPool()
|
||||
|
||||
// add self-signed cert
|
||||
if ca, err := ioutil.ReadFile(q.SslCa); err == nil {
|
||||
cfg.RootCAs.AppendCertsFromPEM(ca)
|
||||
} else {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
if cert, err := tls.LoadX509KeyPair(q.SslCert, q.SslKey); err == nil {
|
||||
cfg.Certificates = append(cfg.Certificates, cert)
|
||||
} else {
|
||||
log.Println(err)
|
||||
}
|
||||
connection, err = amqp.DialTLS(q.URL, cfg)
|
||||
|
||||
} else {
|
||||
connection, err = amqp.Dial(q.URL)
|
||||
}
|
||||
connection, err := amqp.Dial(q.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package amqp
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Datadog struct {
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -162,16 +162,15 @@ func TestBuildPoint(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
pt, err := buildPoints(tt.ptIn)
|
||||
pt, err := buildPoint(tt.ptIn)
|
||||
if err != nil && tt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
||||
}
|
||||
if tt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
||||
tt.ptIn.Name(), tt.outPt, pt["value"])
|
||||
if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
85
outputs/kafka/kafka.go
Normal file
85
outputs/kafka/kafka.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
// Kafka brokers to send metrics to
|
||||
Brokers []string
|
||||
// Kafka topic
|
||||
Topic string
|
||||
// Routing Key Tag
|
||||
RoutingTag string `toml:"routing_tag"`
|
||||
|
||||
producer sarama.SyncProducer
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
`
|
||||
|
||||
func (k *Kafka) Connect() error {
|
||||
producer, err := sarama.NewSyncProducer(k.Brokers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.producer = producer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kafka) Close() error {
|
||||
return k.producer.Close()
|
||||
}
|
||||
|
||||
func (k *Kafka) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *Kafka) Description() string {
|
||||
return "Configuration for the Kafka server to send metrics to"
|
||||
}
|
||||
|
||||
func (k *Kafka) Write(points []*client.Point) error {
|
||||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, p := range points {
|
||||
// Combine tags from Point and BatchPoints and grab the resulting
|
||||
// line-protocol output string to write to Kafka
|
||||
value := p.String()
|
||||
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: k.Topic,
|
||||
Value: sarama.StringEncoder(value),
|
||||
}
|
||||
if h, ok := p.Tags()[k.RoutingTag]; ok {
|
||||
m.Key = sarama.StringEncoder(h)
|
||||
}
|
||||
|
||||
_, _, err := k.producer.SendMessage(m)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
|
||||
err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("kafka", func() outputs.Output {
|
||||
return &Kafka{}
|
||||
})
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package kafka
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
@@ -14,8 +15,8 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type KinesisOutput struct {
|
||||
@@ -100,7 +101,7 @@ func (k *KinesisOutput) Connect() error {
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Close() error {
|
||||
return nil
|
||||
return errors.New("Error")
|
||||
}
|
||||
|
||||
func FormatMetric(k *KinesisOutput, point *client.Point) (string, error) {
|
||||
@@ -1,7 +1,7 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Librato struct {
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -142,20 +142,15 @@ func TestBuildGauge(t *testing.T) {
|
||||
|
||||
l := NewLibrato(fakeUrl)
|
||||
for _, gt := range gaugeTests {
|
||||
gauges, err := l.buildGauges(gt.ptIn)
|
||||
gauge, err := l.buildGauge(gt.ptIn)
|
||||
if err != nil && gt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||
}
|
||||
if gt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned",
|
||||
gt.ptIn.Name(), gt.err.Error())
|
||||
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
|
||||
}
|
||||
if len(gauges) == 0 {
|
||||
continue
|
||||
}
|
||||
if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
||||
gt.ptIn.Name(), gt.outGauge, gauges[0])
|
||||
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -203,18 +198,15 @@ func TestBuildGaugeWithSource(t *testing.T) {
|
||||
l := NewLibrato(fakeUrl)
|
||||
l.SourceTag = "hostname"
|
||||
for _, gt := range gaugeTests {
|
||||
gauges, err := l.buildGauges(gt.ptIn)
|
||||
gauge, err := l.buildGauge(gt.ptIn)
|
||||
if err != nil && gt.err == nil {
|
||||
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||
}
|
||||
if gt.err != nil && err == nil {
|
||||
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
|
||||
}
|
||||
if len(gauges) == 0 {
|
||||
continue
|
||||
}
|
||||
if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauges[0])
|
||||
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
|
||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"sync"
|
||||
|
||||
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
const MaxClientIdLen = 8
|
||||
@@ -3,7 +3,7 @@ package mqtt
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -2,8 +2,8 @@ package nsq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/nsqio/go-nsq"
|
||||
)
|
||||
|
||||
@@ -3,7 +3,7 @@ package nsq
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type OpenTSDB struct {
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
@@ -3,11 +3,11 @@ package prometheus_client
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/plugins/prometheus"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var pTesting *PrometheusClient
|
||||
@@ -48,8 +48,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||
|
||||
require.NoError(t, p.Gather(&acc))
|
||||
for _, e := range expected {
|
||||
acc.AssertContainsFields(t, "prometheus_"+e.name,
|
||||
map[string]interface{}{"value": e.value})
|
||||
assert.NoError(t, acc.ValidateValue(e.name, e.value))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,8 +88,7 @@ func TestPrometheusWritePointTag(t *testing.T) {
|
||||
|
||||
require.NoError(t, p.Gather(&acc))
|
||||
for _, e := range expected {
|
||||
acc.AssertContainsFields(t, "prometheus_"+e.name,
|
||||
map[string]interface{}{"value": e.value})
|
||||
assert.True(t, acc.CheckTaggedValue(e.name, e.value, tags))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package outputs
|
||||
|
||||
import (
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type Output interface {
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/amir/raidman"
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/outputs"
|
||||
)
|
||||
|
||||
type Riemann struct {
|
||||
@@ -3,7 +3,7 @@ package riemann
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -119,7 +119,7 @@ func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc inputs.Accumulator) error {
|
||||
func (a *Aerospike) Gather(acc plugins.Accumulator) error {
|
||||
if len(a.Servers) == 0 {
|
||||
return a.gatherServer("127.0.0.1:3000", acc)
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func (a *Aerospike) Gather(acc inputs.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc inputs.Accumulator) error {
|
||||
func (a *Aerospike) gatherServer(host string, acc plugins.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
@@ -249,7 +249,7 @@ func get(key []byte, host string) (map[string]string, error) {
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc inputs.Accumulator,
|
||||
acc plugins.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
@@ -336,7 +336,7 @@ func msgLenFromBytes(buf [6]byte) int64 {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("aerospike", func() inputs.Input {
|
||||
plugins.Add("aerospike", func() plugins.Plugin {
|
||||
return &Aerospike{}
|
||||
})
|
||||
}
|
||||
@@ -1,12 +1,11 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAerospikeStatistics(t *testing.T) {
|
||||
@@ -32,7 +31,7 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
assert.True(t, acc.HasIntValue(metric), metric)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -50,16 +49,13 @@ func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
for k := range stats {
|
||||
if k == "stat-write-errs" {
|
||||
k = "stat_write_errs"
|
||||
}
|
||||
assert.True(t, acc.HasMeasurement(k))
|
||||
assert.True(t, acc.CheckValue(k, int64(12345)))
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
@@ -70,15 +66,13 @@ func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
for k := range stats {
|
||||
assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
37
plugins/all/all.go
Normal file
37
plugins/all/all.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/plugins/aerospike"
|
||||
_ "github.com/influxdb/telegraf/plugins/apache"
|
||||
_ "github.com/influxdb/telegraf/plugins/bcache"
|
||||
_ "github.com/influxdb/telegraf/plugins/disque"
|
||||
_ "github.com/influxdb/telegraf/plugins/elasticsearch"
|
||||
_ "github.com/influxdb/telegraf/plugins/exec"
|
||||
_ "github.com/influxdb/telegraf/plugins/haproxy"
|
||||
_ "github.com/influxdb/telegraf/plugins/httpjson"
|
||||
_ "github.com/influxdb/telegraf/plugins/influxdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/jolokia"
|
||||
_ "github.com/influxdb/telegraf/plugins/kafka_consumer"
|
||||
_ "github.com/influxdb/telegraf/plugins/leofs"
|
||||
_ "github.com/influxdb/telegraf/plugins/lustre2"
|
||||
_ "github.com/influxdb/telegraf/plugins/mailchimp"
|
||||
_ "github.com/influxdb/telegraf/plugins/memcached"
|
||||
_ "github.com/influxdb/telegraf/plugins/mongodb"
|
||||
_ "github.com/influxdb/telegraf/plugins/mysql"
|
||||
_ "github.com/influxdb/telegraf/plugins/nginx"
|
||||
_ "github.com/influxdb/telegraf/plugins/phpfpm"
|
||||
_ "github.com/influxdb/telegraf/plugins/ping"
|
||||
_ "github.com/influxdb/telegraf/plugins/postgresql"
|
||||
_ "github.com/influxdb/telegraf/plugins/procstat"
|
||||
_ "github.com/influxdb/telegraf/plugins/prometheus"
|
||||
_ "github.com/influxdb/telegraf/plugins/puppetagent"
|
||||
_ "github.com/influxdb/telegraf/plugins/rabbitmq"
|
||||
_ "github.com/influxdb/telegraf/plugins/redis"
|
||||
_ "github.com/influxdb/telegraf/plugins/rethinkdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/statsd"
|
||||
_ "github.com/influxdb/telegraf/plugins/system"
|
||||
_ "github.com/influxdb/telegraf/plugins/trig"
|
||||
_ "github.com/influxdb/telegraf/plugins/twemproxy"
|
||||
_ "github.com/influxdb/telegraf/plugins/zfs"
|
||||
_ "github.com/influxdb/telegraf/plugins/zookeeper"
|
||||
)
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
@@ -31,7 +31,7 @@ func (n *Apache) Description() string {
|
||||
return "Read Apache status information (mod_status)"
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc inputs.Accumulator) error {
|
||||
func (n *Apache) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
@@ -59,7 +59,7 @@ var tr = &http.Transport{
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc inputs.Accumulator) error {
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
@@ -164,7 +164,7 @@ func getTags(addr *url.URL) map[string]string {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("apache", func() inputs.Input {
|
||||
plugins.Add("apache", func() plugins.Plugin {
|
||||
return &Apache{}
|
||||
})
|
||||
}
|
||||
@@ -6,8 +6,9 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -43,31 +44,37 @@ func TestHTTPApache(t *testing.T) {
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"TotalAccesses": float64(1.29811861e+08),
|
||||
"TotalkBytes": float64(5.213701865e+09),
|
||||
"CPULoad": float64(6.51929),
|
||||
"Uptime": float64(941553),
|
||||
"ReqPerSec": float64(137.87),
|
||||
"BytesPerSec": float64(5.67024e+06),
|
||||
"BytesPerReq": float64(41127.4),
|
||||
"BusyWorkers": float64(270),
|
||||
"IdleWorkers": float64(630),
|
||||
"ConnsTotal": float64(1451),
|
||||
"ConnsAsyncWriting": float64(32),
|
||||
"ConnsAsyncKeepAlive": float64(945),
|
||||
"ConnsAsyncClosing": float64(205),
|
||||
"scboard_waiting": float64(630),
|
||||
"scboard_starting": float64(0),
|
||||
"scboard_reading": float64(157),
|
||||
"scboard_sending": float64(113),
|
||||
"scboard_keepalive": float64(0),
|
||||
"scboard_dnslookup": float64(0),
|
||||
"scboard_closing": float64(0),
|
||||
"scboard_logging": float64(0),
|
||||
"scboard_finishing": float64(0),
|
||||
"scboard_idle_cleanup": float64(0),
|
||||
"scboard_open": float64(2850),
|
||||
testInt := []struct {
|
||||
measurement string
|
||||
value float64
|
||||
}{
|
||||
{"TotalAccesses", 1.29811861e+08},
|
||||
{"TotalkBytes", 5.213701865e+09},
|
||||
{"CPULoad", 6.51929},
|
||||
{"Uptime", 941553},
|
||||
{"ReqPerSec", 137.87},
|
||||
{"BytesPerSec", 5.67024e+06},
|
||||
{"BytesPerReq", 41127.4},
|
||||
{"BusyWorkers", 270},
|
||||
{"IdleWorkers", 630},
|
||||
{"ConnsTotal", 1451},
|
||||
{"ConnsAsyncWriting", 32},
|
||||
{"ConnsAsyncKeepAlive", 945},
|
||||
{"ConnsAsyncClosing", 205},
|
||||
{"scboard_waiting", 630},
|
||||
{"scboard_starting", 0},
|
||||
{"scboard_reading", 157},
|
||||
{"scboard_sending", 113},
|
||||
{"scboard_keepalive", 0},
|
||||
{"scboard_dnslookup", 0},
|
||||
{"scboard_closing", 0},
|
||||
{"scboard_logging", 0},
|
||||
{"scboard_finishing", 0},
|
||||
{"scboard_idle_cleanup", 0},
|
||||
{"scboard_open", 2850},
|
||||
}
|
||||
|
||||
for _, test := range testInt {
|
||||
assert.True(t, acc.CheckValue(test.measurement, test.value))
|
||||
}
|
||||
acc.AssertContainsFields(t, "apache", fields)
|
||||
}
|
||||
@@ -26,27 +26,27 @@ Measurement names:
|
||||
dirty_data
|
||||
Amount of dirty data for this backing device in the cache. Continuously
|
||||
updated unlike the cache set's version, but may be slightly off.
|
||||
|
||||
|
||||
bypassed
|
||||
Amount of IO (both reads and writes) that has bypassed the cache
|
||||
|
||||
|
||||
|
||||
cache_bypass_hits
|
||||
cache_bypass_misses
|
||||
Hits and misses for IO that is intended to skip the cache are still counted,
|
||||
but broken out here.
|
||||
|
||||
|
||||
cache_hits
|
||||
cache_misses
|
||||
cache_hit_ratio
|
||||
Hits and misses are counted per individual IO as bcache sees them; a
|
||||
partial hit is counted as a miss.
|
||||
|
||||
|
||||
cache_miss_collisions
|
||||
Counts instances where data was going to be inserted into the cache from a
|
||||
cache miss, but raced with a write and data was already present (usually 0
|
||||
since the synchronization for cache misses was rewritten)
|
||||
|
||||
|
||||
cache_readaheads
|
||||
Count of times readahead occurred.
|
||||
```
|
||||
@@ -70,7 +70,7 @@ Using this configuration:
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter bcache -test
|
||||
./telegraf -config telegraf.conf -filter bcache -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type Bcache struct {
|
||||
@@ -69,7 +69,7 @@ func prettyToBytes(v string) uint64 {
|
||||
return uint64(result)
|
||||
}
|
||||
|
||||
func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error {
|
||||
func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
||||
tags := getTags(bdev)
|
||||
metrics, err := filepath.Glob(bdev + "/stats_total/*")
|
||||
if len(metrics) < 0 {
|
||||
@@ -104,7 +104,7 @@ func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bcache) Gather(acc inputs.Accumulator) error {
|
||||
func (b *Bcache) Gather(acc plugins.Accumulator) error {
|
||||
bcacheDevsChecked := make(map[string]bool)
|
||||
var restrictDevs bool
|
||||
if len(b.BcacheDevs) != 0 {
|
||||
@@ -135,7 +135,7 @@ func (b *Bcache) Gather(acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("bcache", func() inputs.Input {
|
||||
plugins.Add("bcache", func() plugins.Plugin {
|
||||
return &Bcache{}
|
||||
})
|
||||
}
|
||||
@@ -5,7 +5,8 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -28,6 +29,11 @@ var (
|
||||
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
||||
)
|
||||
|
||||
type metrics struct {
|
||||
name string
|
||||
value uint64
|
||||
}
|
||||
|
||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
||||
require.NoError(t, err)
|
||||
@@ -47,52 +53,70 @@ func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
|
||||
[]byte(dirty_data), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", []byte(dirty_data), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
|
||||
[]byte(bypassed), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||
[]byte(cache_bypass_hits), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cache_bypass_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||
[]byte(cache_bypass_misses), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cache_bypass_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||
[]byte(cache_hit_ratio), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cache_hit_ratio), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
|
||||
[]byte(cache_hits), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", []byte(cache_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||
[]byte(cache_miss_collisions), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cache_miss_collisions), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
|
||||
[]byte(cache_misses), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", []byte(cache_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
|
||||
[]byte(cache_readaheads), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", []byte(cache_readaheads), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"dirty_data": uint64(1610612736),
|
||||
"bypassed": uint64(5167704440832),
|
||||
"cache_bypass_hits": uint64(146155333),
|
||||
"cache_bypass_misses": uint64(0),
|
||||
"cache_hit_ratio": uint64(90),
|
||||
"cache_hits": uint64(511469583),
|
||||
"cache_miss_collisions": uint64(157567),
|
||||
"cache_misses": uint64(50616331),
|
||||
"cache_readaheads": uint64(2),
|
||||
intMetrics := []*metrics{
|
||||
{
|
||||
name: "dirty_data",
|
||||
value: 1610612736,
|
||||
},
|
||||
{
|
||||
name: "bypassed",
|
||||
value: 5167704440832,
|
||||
},
|
||||
{
|
||||
name: "cache_bypass_hits",
|
||||
value: 146155333,
|
||||
},
|
||||
{
|
||||
name: "cache_bypass_misses",
|
||||
value: 0,
|
||||
},
|
||||
{
|
||||
name: "cache_hit_ratio",
|
||||
value: 90,
|
||||
},
|
||||
{
|
||||
name: "cache_hits",
|
||||
value: 511469583,
|
||||
},
|
||||
{
|
||||
name: "cache_miss_collisions",
|
||||
value: 157567,
|
||||
},
|
||||
{
|
||||
name: "cache_misses",
|
||||
value: 50616331,
|
||||
},
|
||||
{
|
||||
name: "cache_readaheads",
|
||||
value: 2,
|
||||
},
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -102,19 +126,27 @@ func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
// all devs
|
||||
//all devs
|
||||
b := &Bcache{BcachePath: testBcachePath}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
// one exist dev
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasUIntValue(metric.name), metric.name)
|
||||
assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags))
|
||||
}
|
||||
|
||||
//one exist dev
|
||||
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasUIntValue(metric.name), metric.name)
|
||||
assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags))
|
||||
}
|
||||
|
||||
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
||||
require.NoError(t, err)
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type Disque struct {
|
||||
@@ -61,7 +61,7 @@ var ErrProtocolError = errors.New("disque protocol error")
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *Disque) Gather(acc inputs.Accumulator) error {
|
||||
func (g *Disque) Gather(acc plugins.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
url := &url.URL{
|
||||
Host: ":7711",
|
||||
@@ -98,7 +98,7 @@ func (g *Disque) Gather(acc inputs.Accumulator) error {
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error {
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
|
||||
if g.c == nil {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
@@ -198,7 +198,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("disque", func() inputs.Input {
|
||||
plugins.Add("disque", func() plugins.Plugin {
|
||||
return &Disque{}
|
||||
})
|
||||
}
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -54,26 +55,42 @@ func TestDisqueGeneratesMetrics(t *testing.T) {
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
checkInt := []struct {
|
||||
name string
|
||||
value uint64
|
||||
}{
|
||||
{"uptime", 1452705},
|
||||
{"clients", 31},
|
||||
{"blocked_clients", 13},
|
||||
{"used_memory", 1840104},
|
||||
{"used_memory_rss", 3227648},
|
||||
{"used_memory_peak", 89603656},
|
||||
{"total_connections_received", 5062777},
|
||||
{"total_commands_processed", 12308396},
|
||||
{"instantaneous_ops_per_sec", 18},
|
||||
{"latest_fork_usec", 1644},
|
||||
{"registered_jobs", 360},
|
||||
{"registered_queues", 12},
|
||||
}
|
||||
|
||||
for _, c := range checkInt {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
|
||||
checkFloat := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"mem_fragmentation_ratio", 1.75},
|
||||
{"used_cpu_sys", 19585.73},
|
||||
{"used_cpu_user", 11255.96},
|
||||
{"used_cpu_sys_children", 1.75},
|
||||
{"used_cpu_user_children", 1.91},
|
||||
}
|
||||
|
||||
for _, c := range checkFloat {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
@@ -120,26 +137,42 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
checkInt := []struct {
|
||||
name string
|
||||
value uint64
|
||||
}{
|
||||
{"uptime", 1452705},
|
||||
{"clients", 31},
|
||||
{"blocked_clients", 13},
|
||||
{"used_memory", 1840104},
|
||||
{"used_memory_rss", 3227648},
|
||||
{"used_memory_peak", 89603656},
|
||||
{"total_connections_received", 5062777},
|
||||
{"total_commands_processed", 12308396},
|
||||
{"instantaneous_ops_per_sec", 18},
|
||||
{"latest_fork_usec", 1644},
|
||||
{"registered_jobs", 360},
|
||||
{"registered_queues", 12},
|
||||
}
|
||||
|
||||
for _, c := range checkInt {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
|
||||
checkFloat := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"mem_fragmentation_ratio", 1.75},
|
||||
{"used_cpu_sys", 19585.73},
|
||||
{"used_cpu_user", 11255.96},
|
||||
{"used_cpu_sys_children", 1.75},
|
||||
{"used_cpu_user_children", 1.91},
|
||||
}
|
||||
|
||||
for _, c := range checkFloat {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
const testOutput = `# Server
|
||||
@@ -2,15 +2,12 @@ package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
const statsPath = "/_nodes/stats"
|
||||
@@ -95,45 +92,25 @@ func (e *Elasticsearch) Description() string {
|
||||
|
||||
// Gather reads the stats from Elasticsearch and writes it to the
|
||||
// Accumulator.
|
||||
func (e *Elasticsearch) Gather(acc inputs.Accumulator) error {
|
||||
errChan := make(chan error, len(e.Servers))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
func (e *Elasticsearch) Gather(acc plugins.Accumulator) error {
|
||||
for _, serv := range e.Servers {
|
||||
go func(s string, acc inputs.Accumulator) {
|
||||
defer wg.Done()
|
||||
var url string
|
||||
if e.Local {
|
||||
url = s + statsPathLocal
|
||||
} else {
|
||||
url = s + statsPath
|
||||
}
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
||||
}
|
||||
}(serv, acc)
|
||||
var url string
|
||||
if e.Local {
|
||||
url = serv + statsPathLocal
|
||||
} else {
|
||||
url = serv + statsPath
|
||||
}
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", serv), acc)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
// Get all errors and return them as one giant error
|
||||
errStrings := []string{}
|
||||
for err := range errChan {
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errStrings, "\n"))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error {
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*node `json:"nodes"`
|
||||
@@ -178,7 +155,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc inputs.Accumulator) error {
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) error {
|
||||
clusterStats := &clusterHealth{}
|
||||
if err := e.gatherData(url, clusterStats); err != nil {
|
||||
return err
|
||||
@@ -243,7 +220,7 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("elasticsearch", func() inputs.Input {
|
||||
plugins.Add("elasticsearch", func() plugins.Plugin {
|
||||
return NewElasticsearch()
|
||||
})
|
||||
}
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -52,15 +52,23 @@ func TestElasticsearch(t *testing.T) {
|
||||
"node_host": "test",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
|
||||
testTables := []map[string]float64{
|
||||
indicesExpected,
|
||||
osExpected,
|
||||
processExpected,
|
||||
jvmExpected,
|
||||
threadPoolExpected,
|
||||
fsExpected,
|
||||
transportExpected,
|
||||
httpExpected,
|
||||
breakersExpected,
|
||||
}
|
||||
|
||||
for _, testTable := range testTables {
|
||||
for k, v := range testTable {
|
||||
assert.NoError(t, acc.ValidateTaggedValue(k, v, tags))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
@@ -72,15 +80,29 @@ func TestGatherClusterStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.Gather(&acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
var clusterHealthTests = []struct {
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
"cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"},
|
||||
},
|
||||
{
|
||||
"indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"},
|
||||
},
|
||||
{
|
||||
"indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"},
|
||||
},
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
for _, exp := range clusterHealthTests {
|
||||
assert.NoError(t, acc.ValidateTaggedFields(exp.measurement, exp.fields, exp.tags))
|
||||
}
|
||||
}
|
||||
759
plugins/elasticsearch/testdata_test.go
Normal file
759
plugins/elasticsearch/testdata_test.go
Normal file
@@ -0,0 +1,759 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"indices": {
|
||||
"v1": {
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0
|
||||
},
|
||||
"v2": {
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var clusterHealthExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v1IndexExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v2IndexExpected = map[string]interface{}{
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20,
|
||||
}
|
||||
|
||||
const statsResponse = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
"SDFsfSDFsdfFSDSDfSFDSDF": {
|
||||
"timestamp": 1436365550135,
|
||||
"name": "test.host.com",
|
||||
"transport_address": "inet[/127.0.0.1:9300]",
|
||||
"host": "test",
|
||||
"ip": [
|
||||
"inet[/127.0.0.1:9300]",
|
||||
"NONE"
|
||||
],
|
||||
"attributes": {
|
||||
"master": "true"
|
||||
},
|
||||
"indices": {
|
||||
"docs": {
|
||||
"count": 29652,
|
||||
"deleted": 5229
|
||||
},
|
||||
"store": {
|
||||
"size_in_bytes": 37715234,
|
||||
"throttle_time_in_millis": 215
|
||||
},
|
||||
"indexing": {
|
||||
"index_total": 84790,
|
||||
"index_time_in_millis": 29680,
|
||||
"index_current": 0,
|
||||
"delete_total": 13879,
|
||||
"delete_time_in_millis": 1139,
|
||||
"delete_current": 0,
|
||||
"noop_update_total": 0,
|
||||
"is_throttled": false,
|
||||
"throttle_time_in_millis": 0
|
||||
},
|
||||
"get": {
|
||||
"total": 1,
|
||||
"time_in_millis": 2,
|
||||
"exists_total": 0,
|
||||
"exists_time_in_millis": 0,
|
||||
"missing_total": 1,
|
||||
"missing_time_in_millis": 2,
|
||||
"current": 0
|
||||
},
|
||||
"search": {
|
||||
"open_contexts": 0,
|
||||
"query_total": 1452,
|
||||
"query_time_in_millis": 5695,
|
||||
"query_current": 0,
|
||||
"fetch_total": 414,
|
||||
"fetch_time_in_millis": 146,
|
||||
"fetch_current": 0
|
||||
},
|
||||
"merges": {
|
||||
"current": 0,
|
||||
"current_docs": 0,
|
||||
"current_size_in_bytes": 0,
|
||||
"total": 133,
|
||||
"total_time_in_millis": 21060,
|
||||
"total_docs": 203672,
|
||||
"total_size_in_bytes": 142900226
|
||||
},
|
||||
"refresh": {
|
||||
"total": 1076,
|
||||
"total_time_in_millis": 20078
|
||||
},
|
||||
"flush": {
|
||||
"total": 115,
|
||||
"total_time_in_millis": 2401
|
||||
},
|
||||
"warmer": {
|
||||
"current": 0,
|
||||
"total": 2319,
|
||||
"total_time_in_millis": 448
|
||||
},
|
||||
"filter_cache": {
|
||||
"memory_size_in_bytes": 7384,
|
||||
"evictions": 0
|
||||
},
|
||||
"id_cache": {
|
||||
"memory_size_in_bytes": 0
|
||||
},
|
||||
"fielddata": {
|
||||
"memory_size_in_bytes": 12996,
|
||||
"evictions": 0
|
||||
},
|
||||
"percolate": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0,
|
||||
"memory_size_in_bytes": -1,
|
||||
"memory_size": "-1b",
|
||||
"queries": 0
|
||||
},
|
||||
"completion": {
|
||||
"size_in_bytes": 0
|
||||
},
|
||||
"segments": {
|
||||
"count": 134,
|
||||
"memory_in_bytes": 1285212,
|
||||
"index_writer_memory_in_bytes": 0,
|
||||
"index_writer_max_memory_in_bytes": 172368955,
|
||||
"version_map_memory_in_bytes": 611844,
|
||||
"fixed_bit_set_memory_in_bytes": 0
|
||||
},
|
||||
"translog": {
|
||||
"operations": 17702,
|
||||
"size_in_bytes": 17
|
||||
},
|
||||
"suggest": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0
|
||||
},
|
||||
"query_cache": {
|
||||
"memory_size_in_bytes": 0,
|
||||
"evictions": 0,
|
||||
"hit_count": 0,
|
||||
"miss_count": 0
|
||||
},
|
||||
"recovery": {
|
||||
"current_as_source": 0,
|
||||
"current_as_target": 0,
|
||||
"throttle_time_in_millis": 0
|
||||
}
|
||||
},
|
||||
"os": {
|
||||
"timestamp": 1436460392944,
|
||||
"load_average": [
|
||||
0.01,
|
||||
0.04,
|
||||
0.05
|
||||
],
|
||||
"mem": {
|
||||
"free_in_bytes": 477761536,
|
||||
"used_in_bytes": 1621868544,
|
||||
"free_percent": 74,
|
||||
"used_percent": 25,
|
||||
"actual_free_in_bytes": 1565470720,
|
||||
"actual_used_in_bytes": 534159360
|
||||
},
|
||||
"swap": {
|
||||
"used_in_bytes": 0,
|
||||
"free_in_bytes": 487997440
|
||||
}
|
||||
},
|
||||
"process": {
|
||||
"timestamp": 1436460392945,
|
||||
"open_file_descriptors": 160,
|
||||
"cpu": {
|
||||
"percent": 2,
|
||||
"sys_in_millis": 1870,
|
||||
"user_in_millis": 13610,
|
||||
"total_in_millis": 15480
|
||||
},
|
||||
"mem": {
|
||||
"total_virtual_in_bytes": 4747890688
|
||||
}
|
||||
},
|
||||
"jvm": {
|
||||
"timestamp": 1436460392945,
|
||||
"uptime_in_millis": 202245,
|
||||
"mem": {
|
||||
"heap_used_in_bytes": 52709568,
|
||||
"heap_used_percent": 5,
|
||||
"heap_committed_in_bytes": 259522560,
|
||||
"heap_max_in_bytes": 1038876672,
|
||||
"non_heap_used_in_bytes": 39634576,
|
||||
"non_heap_committed_in_bytes": 40841216,
|
||||
"pools": {
|
||||
"young": {
|
||||
"used_in_bytes": 32685760,
|
||||
"max_in_bytes": 279183360,
|
||||
"peak_used_in_bytes": 71630848,
|
||||
"peak_max_in_bytes": 279183360
|
||||
},
|
||||
"survivor": {
|
||||
"used_in_bytes": 8912880,
|
||||
"max_in_bytes": 34865152,
|
||||
"peak_used_in_bytes": 8912888,
|
||||
"peak_max_in_bytes": 34865152
|
||||
},
|
||||
"old": {
|
||||
"used_in_bytes": 11110928,
|
||||
"max_in_bytes": 724828160,
|
||||
"peak_used_in_bytes": 14354608,
|
||||
"peak_max_in_bytes": 724828160
|
||||
}
|
||||
}
|
||||
},
|
||||
"threads": {
|
||||
"count": 44,
|
||||
"peak_count": 45
|
||||
},
|
||||
"gc": {
|
||||
"collectors": {
|
||||
"young": {
|
||||
"collection_count": 2,
|
||||
"collection_time_in_millis": 98
|
||||
},
|
||||
"old": {
|
||||
"collection_count": 1,
|
||||
"collection_time_in_millis": 24
|
||||
}
|
||||
}
|
||||
},
|
||||
"buffer_pools": {
|
||||
"direct": {
|
||||
"count": 40,
|
||||
"used_in_bytes": 6304239,
|
||||
"total_capacity_in_bytes": 6304239
|
||||
},
|
||||
"mapped": {
|
||||
"count": 0,
|
||||
"used_in_bytes": 0,
|
||||
"total_capacity_in_bytes": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"thread_pool": {
|
||||
"percolate": {
|
||||
"threads": 123,
|
||||
"queue": 23,
|
||||
"active": 13,
|
||||
"rejected": 235,
|
||||
"largest": 23,
|
||||
"completed": 33
|
||||
},
|
||||
"fetch_shard_started": {
|
||||
"threads": 3,
|
||||
"queue": 1,
|
||||
"active": 5,
|
||||
"rejected": 6,
|
||||
"largest": 4,
|
||||
"completed": 54
|
||||
},
|
||||
"listener": {
|
||||
"threads": 1,
|
||||
"queue": 2,
|
||||
"active": 4,
|
||||
"rejected": 8,
|
||||
"largest": 1,
|
||||
"completed": 1
|
||||
},
|
||||
"index": {
|
||||
"threads": 6,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 6
|
||||
},
|
||||
"refresh": {
|
||||
"threads": 23,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 4,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"suggest": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 1,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"generic": {
|
||||
"threads": 1,
|
||||
"queue": 4,
|
||||
"active": 6,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 27
|
||||
},
|
||||
"warmer": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 1
|
||||
},
|
||||
"search": {
|
||||
"threads": 5,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 7,
|
||||
"largest": 2,
|
||||
"completed": 4
|
||||
},
|
||||
"flush": {
|
||||
"threads": 3,
|
||||
"queue": 8,
|
||||
"active": 0,
|
||||
"rejected": 1,
|
||||
"largest": 5,
|
||||
"completed": 3
|
||||
},
|
||||
"optimize": {
|
||||
"threads": 3,
|
||||
"queue": 4,
|
||||
"active": 1,
|
||||
"rejected": 2,
|
||||
"largest": 7,
|
||||
"completed": 3
|
||||
},
|
||||
"fetch_shard_store": {
|
||||
"threads": 1,
|
||||
"queue": 7,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 4,
|
||||
"completed": 1
|
||||
},
|
||||
"management": {
|
||||
"threads": 2,
|
||||
"queue": 3,
|
||||
"active": 1,
|
||||
"rejected": 6,
|
||||
"largest": 2,
|
||||
"completed": 22
|
||||
},
|
||||
"get": {
|
||||
"threads": 1,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 1
|
||||
},
|
||||
"merge": {
|
||||
"threads": 6,
|
||||
"queue": 4,
|
||||
"active": 5,
|
||||
"rejected": 2,
|
||||
"largest": 5,
|
||||
"completed": 1
|
||||
},
|
||||
"bulk": {
|
||||
"threads": 4,
|
||||
"queue": 5,
|
||||
"active": 7,
|
||||
"rejected": 3,
|
||||
"largest": 1,
|
||||
"completed": 4
|
||||
},
|
||||
"snapshot": {
|
||||
"threads": 8,
|
||||
"queue": 5,
|
||||
"active": 6,
|
||||
"rejected": 2,
|
||||
"largest": 1,
|
||||
"completed": 0
|
||||
}
|
||||
},
|
||||
"fs": {
|
||||
"timestamp": 1436460392946,
|
||||
"total": {
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
},
|
||||
"data": [
|
||||
{
|
||||
"path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
|
||||
"mount": "/usr/share/elasticsearch/data",
|
||||
"type": "ext4",
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
}
|
||||
]
|
||||
},
|
||||
"transport": {
|
||||
"server_open": 13,
|
||||
"rx_count": 6,
|
||||
"rx_size_in_bytes": 1380,
|
||||
"tx_count": 6,
|
||||
"tx_size_in_bytes": 1380
|
||||
},
|
||||
"http": {
|
||||
"current_open": 3,
|
||||
"total_opened": 3
|
||||
},
|
||||
"breakers": {
|
||||
"fielddata": {
|
||||
"limit_size_in_bytes": 623326003,
|
||||
"limit_size": "594.4mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.03,
|
||||
"tripped": 0
|
||||
},
|
||||
"request": {
|
||||
"limit_size_in_bytes": 415550668,
|
||||
"limit_size": "396.2mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
},
|
||||
"parent": {
|
||||
"limit_size_in_bytes": 727213670,
|
||||
"limit_size": "693.5mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var indicesExpected = map[string]float64{
|
||||
"indices_id_cache_memory_size_in_bytes": 0,
|
||||
"indices_completion_size_in_bytes": 0,
|
||||
"indices_suggest_total": 0,
|
||||
"indices_suggest_time_in_millis": 0,
|
||||
"indices_suggest_current": 0,
|
||||
"indices_query_cache_memory_size_in_bytes": 0,
|
||||
"indices_query_cache_evictions": 0,
|
||||
"indices_query_cache_hit_count": 0,
|
||||
"indices_query_cache_miss_count": 0,
|
||||
"indices_store_size_in_bytes": 37715234,
|
||||
"indices_store_throttle_time_in_millis": 215,
|
||||
"indices_merges_current_docs": 0,
|
||||
"indices_merges_current_size_in_bytes": 0,
|
||||
"indices_merges_total": 133,
|
||||
"indices_merges_total_time_in_millis": 21060,
|
||||
"indices_merges_total_docs": 203672,
|
||||
"indices_merges_total_size_in_bytes": 142900226,
|
||||
"indices_merges_current": 0,
|
||||
"indices_filter_cache_memory_size_in_bytes": 7384,
|
||||
"indices_filter_cache_evictions": 0,
|
||||
"indices_indexing_index_total": 84790,
|
||||
"indices_indexing_index_time_in_millis": 29680,
|
||||
"indices_indexing_index_current": 0,
|
||||
"indices_indexing_noop_update_total": 0,
|
||||
"indices_indexing_throttle_time_in_millis": 0,
|
||||
"indices_indexing_delete_total": 13879,
|
||||
"indices_indexing_delete_time_in_millis": 1139,
|
||||
"indices_indexing_delete_current": 0,
|
||||
"indices_get_exists_time_in_millis": 0,
|
||||
"indices_get_missing_total": 1,
|
||||
"indices_get_missing_time_in_millis": 2,
|
||||
"indices_get_current": 0,
|
||||
"indices_get_total": 1,
|
||||
"indices_get_time_in_millis": 2,
|
||||
"indices_get_exists_total": 0,
|
||||
"indices_refresh_total": 1076,
|
||||
"indices_refresh_total_time_in_millis": 20078,
|
||||
"indices_percolate_current": 0,
|
||||
"indices_percolate_memory_size_in_bytes": -1,
|
||||
"indices_percolate_queries": 0,
|
||||
"indices_percolate_total": 0,
|
||||
"indices_percolate_time_in_millis": 0,
|
||||
"indices_translog_operations": 17702,
|
||||
"indices_translog_size_in_bytes": 17,
|
||||
"indices_recovery_current_as_source": 0,
|
||||
"indices_recovery_current_as_target": 0,
|
||||
"indices_recovery_throttle_time_in_millis": 0,
|
||||
"indices_docs_count": 29652,
|
||||
"indices_docs_deleted": 5229,
|
||||
"indices_flush_total_time_in_millis": 2401,
|
||||
"indices_flush_total": 115,
|
||||
"indices_fielddata_memory_size_in_bytes": 12996,
|
||||
"indices_fielddata_evictions": 0,
|
||||
"indices_search_fetch_current": 0,
|
||||
"indices_search_open_contexts": 0,
|
||||
"indices_search_query_total": 1452,
|
||||
"indices_search_query_time_in_millis": 5695,
|
||||
"indices_search_query_current": 0,
|
||||
"indices_search_fetch_total": 414,
|
||||
"indices_search_fetch_time_in_millis": 146,
|
||||
"indices_warmer_current": 0,
|
||||
"indices_warmer_total": 2319,
|
||||
"indices_warmer_total_time_in_millis": 448,
|
||||
"indices_segments_count": 134,
|
||||
"indices_segments_memory_in_bytes": 1285212,
|
||||
"indices_segments_index_writer_memory_in_bytes": 0,
|
||||
"indices_segments_index_writer_max_memory_in_bytes": 172368955,
|
||||
"indices_segments_version_map_memory_in_bytes": 611844,
|
||||
"indices_segments_fixed_bit_set_memory_in_bytes": 0,
|
||||
}
|
||||
|
||||
var osExpected = map[string]float64{
|
||||
"os_swap_used_in_bytes": 0,
|
||||
"os_swap_free_in_bytes": 487997440,
|
||||
"os_timestamp": 1436460392944,
|
||||
"os_mem_free_percent": 74,
|
||||
"os_mem_used_percent": 25,
|
||||
"os_mem_actual_free_in_bytes": 1565470720,
|
||||
"os_mem_actual_used_in_bytes": 534159360,
|
||||
"os_mem_free_in_bytes": 477761536,
|
||||
"os_mem_used_in_bytes": 1621868544,
|
||||
}
|
||||
|
||||
var processExpected = map[string]float64{
|
||||
"process_mem_total_virtual_in_bytes": 4747890688,
|
||||
"process_timestamp": 1436460392945,
|
||||
"process_open_file_descriptors": 160,
|
||||
"process_cpu_total_in_millis": 15480,
|
||||
"process_cpu_percent": 2,
|
||||
"process_cpu_sys_in_millis": 1870,
|
||||
"process_cpu_user_in_millis": 13610,
|
||||
}
|
||||
|
||||
var jvmExpected = map[string]float64{
|
||||
"jvm_timestamp": 1436460392945,
|
||||
"jvm_uptime_in_millis": 202245,
|
||||
"jvm_mem_non_heap_used_in_bytes": 39634576,
|
||||
"jvm_mem_non_heap_committed_in_bytes": 40841216,
|
||||
"jvm_mem_pools_young_max_in_bytes": 279183360,
|
||||
"jvm_mem_pools_young_peak_used_in_bytes": 71630848,
|
||||
"jvm_mem_pools_young_peak_max_in_bytes": 279183360,
|
||||
"jvm_mem_pools_young_used_in_bytes": 32685760,
|
||||
"jvm_mem_pools_survivor_peak_used_in_bytes": 8912888,
|
||||
"jvm_mem_pools_survivor_peak_max_in_bytes": 34865152,
|
||||
"jvm_mem_pools_survivor_used_in_bytes": 8912880,
|
||||
"jvm_mem_pools_survivor_max_in_bytes": 34865152,
|
||||
"jvm_mem_pools_old_peak_max_in_bytes": 724828160,
|
||||
"jvm_mem_pools_old_used_in_bytes": 11110928,
|
||||
"jvm_mem_pools_old_max_in_bytes": 724828160,
|
||||
"jvm_mem_pools_old_peak_used_in_bytes": 14354608,
|
||||
"jvm_mem_heap_used_in_bytes": 52709568,
|
||||
"jvm_mem_heap_used_percent": 5,
|
||||
"jvm_mem_heap_committed_in_bytes": 259522560,
|
||||
"jvm_mem_heap_max_in_bytes": 1038876672,
|
||||
"jvm_threads_peak_count": 45,
|
||||
"jvm_threads_count": 44,
|
||||
"jvm_gc_collectors_young_collection_count": 2,
|
||||
"jvm_gc_collectors_young_collection_time_in_millis": 98,
|
||||
"jvm_gc_collectors_old_collection_count": 1,
|
||||
"jvm_gc_collectors_old_collection_time_in_millis": 24,
|
||||
"jvm_buffer_pools_direct_count": 40,
|
||||
"jvm_buffer_pools_direct_used_in_bytes": 6304239,
|
||||
"jvm_buffer_pools_direct_total_capacity_in_bytes": 6304239,
|
||||
"jvm_buffer_pools_mapped_count": 0,
|
||||
"jvm_buffer_pools_mapped_used_in_bytes": 0,
|
||||
"jvm_buffer_pools_mapped_total_capacity_in_bytes": 0,
|
||||
}
|
||||
|
||||
var threadPoolExpected = map[string]float64{
|
||||
"thread_pool_merge_threads": 6,
|
||||
"thread_pool_merge_queue": 4,
|
||||
"thread_pool_merge_active": 5,
|
||||
"thread_pool_merge_rejected": 2,
|
||||
"thread_pool_merge_largest": 5,
|
||||
"thread_pool_merge_completed": 1,
|
||||
"thread_pool_bulk_threads": 4,
|
||||
"thread_pool_bulk_queue": 5,
|
||||
"thread_pool_bulk_active": 7,
|
||||
"thread_pool_bulk_rejected": 3,
|
||||
"thread_pool_bulk_largest": 1,
|
||||
"thread_pool_bulk_completed": 4,
|
||||
"thread_pool_warmer_threads": 2,
|
||||
"thread_pool_warmer_queue": 7,
|
||||
"thread_pool_warmer_active": 3,
|
||||
"thread_pool_warmer_rejected": 2,
|
||||
"thread_pool_warmer_largest": 3,
|
||||
"thread_pool_warmer_completed": 1,
|
||||
"thread_pool_get_largest": 2,
|
||||
"thread_pool_get_completed": 1,
|
||||
"thread_pool_get_threads": 1,
|
||||
"thread_pool_get_queue": 8,
|
||||
"thread_pool_get_active": 4,
|
||||
"thread_pool_get_rejected": 3,
|
||||
"thread_pool_index_threads": 6,
|
||||
"thread_pool_index_queue": 8,
|
||||
"thread_pool_index_active": 4,
|
||||
"thread_pool_index_rejected": 2,
|
||||
"thread_pool_index_largest": 3,
|
||||
"thread_pool_index_completed": 6,
|
||||
"thread_pool_suggest_threads": 2,
|
||||
"thread_pool_suggest_queue": 7,
|
||||
"thread_pool_suggest_active": 2,
|
||||
"thread_pool_suggest_rejected": 1,
|
||||
"thread_pool_suggest_largest": 8,
|
||||
"thread_pool_suggest_completed": 3,
|
||||
"thread_pool_fetch_shard_store_queue": 7,
|
||||
"thread_pool_fetch_shard_store_active": 4,
|
||||
"thread_pool_fetch_shard_store_rejected": 2,
|
||||
"thread_pool_fetch_shard_store_largest": 4,
|
||||
"thread_pool_fetch_shard_store_completed": 1,
|
||||
"thread_pool_fetch_shard_store_threads": 1,
|
||||
"thread_pool_management_threads": 2,
|
||||
"thread_pool_management_queue": 3,
|
||||
"thread_pool_management_active": 1,
|
||||
"thread_pool_management_rejected": 6,
|
||||
"thread_pool_management_largest": 2,
|
||||
"thread_pool_management_completed": 22,
|
||||
"thread_pool_percolate_queue": 23,
|
||||
"thread_pool_percolate_active": 13,
|
||||
"thread_pool_percolate_rejected": 235,
|
||||
"thread_pool_percolate_largest": 23,
|
||||
"thread_pool_percolate_completed": 33,
|
||||
"thread_pool_percolate_threads": 123,
|
||||
"thread_pool_listener_active": 4,
|
||||
"thread_pool_listener_rejected": 8,
|
||||
"thread_pool_listener_largest": 1,
|
||||
"thread_pool_listener_completed": 1,
|
||||
"thread_pool_listener_threads": 1,
|
||||
"thread_pool_listener_queue": 2,
|
||||
"thread_pool_search_rejected": 7,
|
||||
"thread_pool_search_largest": 2,
|
||||
"thread_pool_search_completed": 4,
|
||||
"thread_pool_search_threads": 5,
|
||||
"thread_pool_search_queue": 7,
|
||||
"thread_pool_search_active": 2,
|
||||
"thread_pool_fetch_shard_started_threads": 3,
|
||||
"thread_pool_fetch_shard_started_queue": 1,
|
||||
"thread_pool_fetch_shard_started_active": 5,
|
||||
"thread_pool_fetch_shard_started_rejected": 6,
|
||||
"thread_pool_fetch_shard_started_largest": 4,
|
||||
"thread_pool_fetch_shard_started_completed": 54,
|
||||
"thread_pool_refresh_rejected": 4,
|
||||
"thread_pool_refresh_largest": 8,
|
||||
"thread_pool_refresh_completed": 3,
|
||||
"thread_pool_refresh_threads": 23,
|
||||
"thread_pool_refresh_queue": 7,
|
||||
"thread_pool_refresh_active": 3,
|
||||
"thread_pool_optimize_threads": 3,
|
||||
"thread_pool_optimize_queue": 4,
|
||||
"thread_pool_optimize_active": 1,
|
||||
"thread_pool_optimize_rejected": 2,
|
||||
"thread_pool_optimize_largest": 7,
|
||||
"thread_pool_optimize_completed": 3,
|
||||
"thread_pool_snapshot_largest": 1,
|
||||
"thread_pool_snapshot_completed": 0,
|
||||
"thread_pool_snapshot_threads": 8,
|
||||
"thread_pool_snapshot_queue": 5,
|
||||
"thread_pool_snapshot_active": 6,
|
||||
"thread_pool_snapshot_rejected": 2,
|
||||
"thread_pool_generic_threads": 1,
|
||||
"thread_pool_generic_queue": 4,
|
||||
"thread_pool_generic_active": 6,
|
||||
"thread_pool_generic_rejected": 3,
|
||||
"thread_pool_generic_largest": 2,
|
||||
"thread_pool_generic_completed": 27,
|
||||
"thread_pool_flush_threads": 3,
|
||||
"thread_pool_flush_queue": 8,
|
||||
"thread_pool_flush_active": 0,
|
||||
"thread_pool_flush_rejected": 1,
|
||||
"thread_pool_flush_largest": 5,
|
||||
"thread_pool_flush_completed": 3,
|
||||
}
|
||||
|
||||
var fsExpected = map[string]float64{
|
||||
"fs_timestamp": 1436460392946,
|
||||
"fs_total_free_in_bytes": 16909316096,
|
||||
"fs_total_available_in_bytes": 15894814720,
|
||||
"fs_total_total_in_bytes": 19507089408,
|
||||
}
|
||||
|
||||
var transportExpected = map[string]float64{
|
||||
"transport_server_open": 13,
|
||||
"transport_rx_count": 6,
|
||||
"transport_rx_size_in_bytes": 1380,
|
||||
"transport_tx_count": 6,
|
||||
"transport_tx_size_in_bytes": 1380,
|
||||
}
|
||||
|
||||
var httpExpected = map[string]float64{
|
||||
"http_current_open": 3,
|
||||
"http_total_opened": 3,
|
||||
}
|
||||
|
||||
var breakersExpected = map[string]float64{
|
||||
"breakers_fielddata_estimated_size_in_bytes": 0,
|
||||
"breakers_fielddata_overhead": 1.03,
|
||||
"breakers_fielddata_tripped": 0,
|
||||
"breakers_fielddata_limit_size_in_bytes": 623326003,
|
||||
"breakers_request_estimated_size_in_bytes": 0,
|
||||
"breakers_request_overhead": 1.0,
|
||||
"breakers_request_tripped": 0,
|
||||
"breakers_request_limit_size_in_bytes": 415550668,
|
||||
"breakers_parent_overhead": 1.0,
|
||||
"breakers_parent_tripped": 0,
|
||||
"breakers_parent_limit_size_in_bytes": 727213670,
|
||||
"breakers_parent_estimated_size_in_bytes": 0,
|
||||
}
|
||||
42
plugins/exec/README.md
Normal file
42
plugins/exec/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Exec Plugin
|
||||
|
||||
The exec plugin can execute arbitrary commands which output JSON. Then it flattens JSON and finds
|
||||
all numeric values, treating them as floats.
|
||||
|
||||
For example, if you have a json-returning command called mycollector, you could
|
||||
setup the exec plugin with:
|
||||
|
||||
```
|
||||
[[exec.commands]]
|
||||
command = "/usr/bin/mycollector --output=json"
|
||||
name = "mycollector"
|
||||
interval = 10
|
||||
```
|
||||
|
||||
The name is used as a prefix for the measurements.
|
||||
|
||||
The interval is used to determine how often a particular command should be run. Each
|
||||
time the exec plugin runs, it will only run a particular command if it has been at least
|
||||
`interval` seconds since the exec plugin last ran the command.
|
||||
|
||||
|
||||
# Sample
|
||||
|
||||
Let's say that we have a command named "mycollector", which gives the following output:
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
```
|
||||
exec_mycollector_a value=0.5
|
||||
exec_mycollector_b_d value=0.1
|
||||
exec_mycollector_b_e value=5
|
||||
```
|
||||
@@ -8,23 +8,21 @@ import (
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
# NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
# will be ignored.
|
||||
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
# name of the command (used as a prefix for measurements)
|
||||
name = "mycollector"
|
||||
`
|
||||
|
||||
type Exec struct {
|
||||
Command string
|
||||
Name string
|
||||
|
||||
runner Runner
|
||||
}
|
||||
@@ -64,7 +62,7 @@ func (e *Exec) Description() string {
|
||||
return "Read flattened metrics from one or more commands that output JSON to stdout"
|
||||
}
|
||||
|
||||
func (e *Exec) Gather(acc inputs.Accumulator) error {
|
||||
func (e *Exec) Gather(acc plugins.Accumulator) error {
|
||||
out, err := e.runner.Run(e)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -83,12 +81,18 @@ func (e *Exec) Gather(acc inputs.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.AddFields("exec", f.Fields, nil)
|
||||
var msrmnt_name string
|
||||
if e.Name == "" {
|
||||
msrmnt_name = "exec"
|
||||
} else {
|
||||
msrmnt_name = "exec_" + e.Name
|
||||
}
|
||||
acc.AddFields(msrmnt_name, f.Fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("exec", func() inputs.Input {
|
||||
plugins.Add("exec", func() plugins.Plugin {
|
||||
return NewExec()
|
||||
})
|
||||
}
|
||||
262
plugins/exec/exec_test.go
Normal file
262
plugins/exec/exec_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Midnight 9/22/2015
|
||||
const baseTimeSeconds = 1442905200
|
||||
|
||||
const validJson = `
|
||||
{
|
||||
"status": "green",
|
||||
"num_processes": 82,
|
||||
"cpu": {
|
||||
"status": "red",
|
||||
"nil_status": null,
|
||||
"used": 8234,
|
||||
"free": 32
|
||||
},
|
||||
"percent": 0.81,
|
||||
"users": [0, 1, 2, 3]
|
||||
}`
|
||||
|
||||
const malformedJson = `
|
||||
{
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
type runnerMock struct {
|
||||
out []byte
|
||||
err error
|
||||
}
|
||||
|
||||
type clockMock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func newRunnerMock(out []byte, err error) Runner {
|
||||
return &runnerMock{
|
||||
out: out,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (r runnerMock) Run(command *Command) ([]byte, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
return r.out, nil
|
||||
}
|
||||
|
||||
func newClockMock(now time.Time) Clock {
|
||||
return &clockMock{now: now}
|
||||
}
|
||||
|
||||
func (c clockMock) Now() time.Time {
|
||||
return c.now
|
||||
}
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
runner := newRunnerMock([]byte(validJson), nil)
|
||||
clock := newClockMock(time.Unix(baseTimeSeconds+20, 0))
|
||||
command := Command{
|
||||
Command: "testcommand arg1",
|
||||
Name: "mycollector",
|
||||
Interval: 10,
|
||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
||||
}
|
||||
|
||||
e := &Exec{
|
||||
runner: runner,
|
||||
clock: clock,
|
||||
Commands: []*Command{&command},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
initialPoints := len(acc.Points)
|
||||
err := e.Gather(&acc)
|
||||
deltaPoints := len(acc.Points) - initialPoints
|
||||
require.NoError(t, err)
|
||||
|
||||
checkFloat := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"mycollector_num_processes", 82},
|
||||
{"mycollector_cpu_used", 8234},
|
||||
{"mycollector_cpu_free", 32},
|
||||
{"mycollector_percent", 0.81},
|
||||
}
|
||||
|
||||
for _, c := range checkFloat {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
|
||||
assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored")
|
||||
}
|
||||
|
||||
func TestExecMalformed(t *testing.T) {
|
||||
runner := newRunnerMock([]byte(malformedJson), nil)
|
||||
clock := newClockMock(time.Unix(baseTimeSeconds+20, 0))
|
||||
command := Command{
|
||||
Command: "badcommand arg1",
|
||||
Name: "mycollector",
|
||||
Interval: 10,
|
||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
||||
}
|
||||
|
||||
e := &Exec{
|
||||
runner: runner,
|
||||
clock: clock,
|
||||
Commands: []*Command{&command},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
initialPoints := len(acc.Points)
|
||||
err := e.Gather(&acc)
|
||||
deltaPoints := len(acc.Points) - initialPoints
|
||||
require.Error(t, err)
|
||||
|
||||
assert.Equal(t, deltaPoints, 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestCommandError(t *testing.T) {
|
||||
runner := newRunnerMock(nil, fmt.Errorf("exit status code 1"))
|
||||
clock := newClockMock(time.Unix(baseTimeSeconds+20, 0))
|
||||
command := Command{
|
||||
Command: "badcommand",
|
||||
Name: "mycollector",
|
||||
Interval: 10,
|
||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
||||
}
|
||||
|
||||
e := &Exec{
|
||||
runner: runner,
|
||||
clock: clock,
|
||||
Commands: []*Command{&command},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
initialPoints := len(acc.Points)
|
||||
err := e.Gather(&acc)
|
||||
deltaPoints := len(acc.Points) - initialPoints
|
||||
require.Error(t, err)
|
||||
|
||||
assert.Equal(t, deltaPoints, 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestExecNotEnoughTime(t *testing.T) {
|
||||
runner := newRunnerMock([]byte(validJson), nil)
|
||||
clock := newClockMock(time.Unix(baseTimeSeconds+5, 0))
|
||||
command := Command{
|
||||
Command: "testcommand arg1",
|
||||
Name: "mycollector",
|
||||
Interval: 10,
|
||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
||||
}
|
||||
|
||||
e := &Exec{
|
||||
runner: runner,
|
||||
clock: clock,
|
||||
Commands: []*Command{&command},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
initialPoints := len(acc.Points)
|
||||
err := e.Gather(&acc)
|
||||
deltaPoints := len(acc.Points) - initialPoints
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, deltaPoints, 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestExecUninitializedLastRunAt(t *testing.T) {
|
||||
runner := newRunnerMock([]byte(validJson), nil)
|
||||
clock := newClockMock(time.Unix(baseTimeSeconds, 0))
|
||||
command := Command{
|
||||
Command: "testcommand arg1",
|
||||
Name: "mycollector",
|
||||
Interval: math.MaxInt32,
|
||||
// Uninitialized lastRunAt should default to time.Unix(0, 0), so this should
|
||||
// run no matter what the interval is
|
||||
}
|
||||
|
||||
e := &Exec{
|
||||
runner: runner,
|
||||
clock: clock,
|
||||
Commands: []*Command{&command},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
initialPoints := len(acc.Points)
|
||||
err := e.Gather(&acc)
|
||||
deltaPoints := len(acc.Points) - initialPoints
|
||||
require.NoError(t, err)
|
||||
|
||||
checkFloat := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"mycollector_num_processes", 82},
|
||||
{"mycollector_cpu_used", 8234},
|
||||
{"mycollector_cpu_free", 32},
|
||||
{"mycollector_percent", 0.81},
|
||||
}
|
||||
|
||||
for _, c := range checkFloat {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
|
||||
assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored")
|
||||
}
|
||||
func TestExecOneNotEnoughTimeAndOneEnoughTime(t *testing.T) {
|
||||
runner := newRunnerMock([]byte(validJson), nil)
|
||||
clock := newClockMock(time.Unix(baseTimeSeconds+5, 0))
|
||||
notEnoughTimeCommand := Command{
|
||||
Command: "testcommand arg1",
|
||||
Name: "mycollector",
|
||||
Interval: 10,
|
||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
||||
}
|
||||
enoughTimeCommand := Command{
|
||||
Command: "testcommand arg1",
|
||||
Name: "mycollector",
|
||||
Interval: 3,
|
||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
||||
}
|
||||
|
||||
e := &Exec{
|
||||
runner: runner,
|
||||
clock: clock,
|
||||
Commands: []*Command{¬EnoughTimeCommand, &enoughTimeCommand},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
initialPoints := len(acc.Points)
|
||||
err := e.Gather(&acc)
|
||||
deltaPoints := len(acc.Points) - initialPoints
|
||||
require.NoError(t, err)
|
||||
|
||||
checkFloat := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"mycollector_num_processes", 82},
|
||||
{"mycollector_cpu_used", 8234},
|
||||
{"mycollector_cpu_free", 32},
|
||||
{"mycollector_percent", 0.81},
|
||||
}
|
||||
|
||||
for _, c := range checkFloat {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
|
||||
assert.Equal(t, deltaPoints, 4, "Only one command should have been run")
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package haproxy
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -91,7 +91,7 @@ var sampleConfig = `
|
||||
# If no servers are specified, then default to 127.0.0.1:1936
|
||||
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
||||
# Or you can also use local socket(not work yet)
|
||||
# servers = ["socket://run/haproxy/admin.sock"]
|
||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||
`
|
||||
|
||||
func (r *haproxy) SampleConfig() string {
|
||||
@@ -104,7 +104,7 @@ func (r *haproxy) Description() string {
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *haproxy) Gather(acc inputs.Accumulator) error {
|
||||
func (g *haproxy) Gather(acc plugins.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
return g.gatherServer("http://127.0.0.1:1936", acc)
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func (g *haproxy) Gather(acc inputs.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error {
|
||||
func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error {
|
||||
if g.client == nil {
|
||||
|
||||
client := &http.Client{}
|
||||
@@ -156,7 +156,7 @@ func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error {
|
||||
return importCsvResult(res.Body, acc, u.Host)
|
||||
}
|
||||
|
||||
func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error {
|
||||
func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error {
|
||||
csv := csv.NewReader(r)
|
||||
result, err := csv.ReadAll()
|
||||
now := time.Now()
|
||||
@@ -358,7 +358,7 @@ func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("haproxy", func() inputs.Input {
|
||||
plugins.Add("haproxy", func() plugins.Plugin {
|
||||
return &haproxy{}
|
||||
})
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
@@ -47,39 +47,52 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
assert.NoError(t, acc.ValidateTaggedValue("stot", uint64(171014), tags))
|
||||
|
||||
checkInt := []struct {
|
||||
name string
|
||||
value uint64
|
||||
}{
|
||||
|
||||
{"qmax", 81},
|
||||
{"scur", 288},
|
||||
{"smax", 713},
|
||||
{"bin", 5557055817},
|
||||
{"bout", 24096715169},
|
||||
{"dreq", 1102},
|
||||
{"dresp", 80},
|
||||
{"ereq", 95740},
|
||||
{"econ", 0},
|
||||
{"eresp", 0},
|
||||
{"wretr", 17},
|
||||
{"wredis", 19},
|
||||
{"active_servers", 1},
|
||||
{"backup_servers", 0},
|
||||
{"downtime", 0},
|
||||
{"throttle", 13},
|
||||
{"lbtot", 114},
|
||||
{"rate", 18},
|
||||
{"rate_max", 102},
|
||||
{"check_duration", 1},
|
||||
{"http_response.1xx", 0},
|
||||
{"http_response.2xx", 1314093},
|
||||
{"http_response.3xx", 537036},
|
||||
{"http_response.4xx", 123452},
|
||||
{"http_response.5xx", 11966},
|
||||
{"req_rate", 35},
|
||||
{"req_rate_max", 140},
|
||||
{"req_tot", 1987928},
|
||||
{"cli_abort", 0},
|
||||
{"srv_abort", 0},
|
||||
{"qtime", 0},
|
||||
{"ctime", 2},
|
||||
{"rtime", 23},
|
||||
{"ttime", 545},
|
||||
}
|
||||
|
||||
for _, c := range checkInt {
|
||||
assert.Equal(t, true, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
|
||||
//Here, we should get error because we don't pass authentication data
|
||||
r = &haproxy{
|
||||
@@ -111,39 +124,10 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
assert.NoError(t, acc.ValidateTaggedValue("stot", uint64(171014), tags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("scur", uint64(1), tags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("rate", uint64(3), tags))
|
||||
assert.Equal(t, true, acc.CheckValue("bin", uint64(5557055817)))
|
||||
}
|
||||
|
||||
//When not passing server config, we default to localhost
|
||||
@@ -45,16 +45,6 @@ You can also specify additional request parameters for the service:
|
||||
|
||||
```
|
||||
|
||||
You can also specify additional request header parameters for the service:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
...
|
||||
|
||||
[httpjson.services.headers]
|
||||
X-Auth-Token = "my-xauth-token"
|
||||
apiVersion = "v1"
|
||||
```
|
||||
|
||||
# Example:
|
||||
|
||||
@@ -9,10 +9,9 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type HttpJson struct {
|
||||
@@ -21,7 +20,6 @@ type HttpJson struct {
|
||||
Method string
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
@@ -46,9 +44,6 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
# will be ignored.
|
||||
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
@@ -68,15 +63,9 @@ var sampleConfig = `
|
||||
# ]
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[inputs.httpjson.parameters]
|
||||
[plugins.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
# HTTP Header parameters (all values must be strings)
|
||||
# [inputs.httpjson.headers]
|
||||
# X-Auth-Token = "my-xauth-token"
|
||||
# apiVersion = "v1"
|
||||
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
@@ -88,7 +77,7 @@ func (h *HttpJson) Description() string {
|
||||
}
|
||||
|
||||
// Gathers data for all servers.
|
||||
func (h *HttpJson) Gather(acc inputs.Accumulator) error {
|
||||
func (h *HttpJson) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
@@ -127,11 +116,10 @@ func (h *HttpJson) Gather(acc inputs.Accumulator) error {
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (h *HttpJson) gatherServer(
|
||||
acc inputs.Accumulator,
|
||||
acc plugins.Accumulator,
|
||||
serverURL string,
|
||||
) error {
|
||||
resp, responseTime, err := h.sendRequest(serverURL)
|
||||
|
||||
resp, err := h.sendRequest(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -153,9 +141,6 @@ func (h *HttpJson) gatherServer(
|
||||
delete(jsonOut, tag)
|
||||
}
|
||||
|
||||
if responseTime >= 0 {
|
||||
jsonOut["response_time"] = responseTime
|
||||
}
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
@@ -168,7 +153,7 @@ func (h *HttpJson) gatherServer(
|
||||
} else {
|
||||
msrmnt_name = "httpjson_" + h.Name
|
||||
}
|
||||
acc.AddFields(msrmnt_name, f.Fields, tags)
|
||||
acc.AddFields(msrmnt_name, f.Fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -179,11 +164,11 @@ func (h *HttpJson) gatherServer(
|
||||
// Returns:
|
||||
// string: body of the response
|
||||
// error : Any error that may have occurred
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, error) {
|
||||
// Prepare URL
|
||||
requestURL, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
return "", fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
@@ -195,26 +180,19 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add header parameters
|
||||
for k, v := range h.Headers {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
resp, err := h.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
defer resp.Body.Close()
|
||||
responseTime := time.Since(start).Seconds()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return string(body), responseTime, err
|
||||
return string(body), err
|
||||
}
|
||||
|
||||
// Process response
|
||||
@@ -225,14 +203,14 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return string(body), responseTime, err
|
||||
return string(body), err
|
||||
}
|
||||
|
||||
return string(body), responseTime, err
|
||||
return string(body), err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("httpjson", func() inputs.Input {
|
||||
plugins.Add("httpjson", func() plugins.Plugin {
|
||||
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -14,17 +15,17 @@ import (
|
||||
const validJSON = `
|
||||
{
|
||||
"parent": {
|
||||
"child": 3.0,
|
||||
"child": 3,
|
||||
"ignored_child": "hi"
|
||||
},
|
||||
"ignored_null": null,
|
||||
"integer": 4,
|
||||
"list": [3, 4],
|
||||
"ignored_list": [3, 4],
|
||||
"ignored_parent": {
|
||||
"another_ignored_list": [4],
|
||||
"another_ignored_null": null,
|
||||
"ignored_string": "hello, world!"
|
||||
},
|
||||
"another_list": [4]
|
||||
}
|
||||
}`
|
||||
|
||||
const validJSONTags = `
|
||||
@@ -34,14 +35,6 @@ const validJSONTags = `
|
||||
"build": "123"
|
||||
}`
|
||||
|
||||
var expectedFields = map[string]interface{}{
|
||||
"parent_child": float64(3),
|
||||
"list_0": float64(3),
|
||||
"list_1": float64(4),
|
||||
"another_list_0": float64(4),
|
||||
"integer": float64(4),
|
||||
}
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
|
||||
const empty = ""
|
||||
@@ -83,44 +76,37 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
return []*HttpJson{
|
||||
&HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server1.example.com/metrics/",
|
||||
"http://server2.example.com/metrics/",
|
||||
func genMockHttpJson(response string, statusCode int) *HttpJson {
|
||||
return &HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Services: []Service{
|
||||
Service{
|
||||
Servers: []string{
|
||||
"http://server1.example.com/metrics/",
|
||||
"http://server2.example.com/metrics/",
|
||||
},
|
||||
Name: "my_webapp",
|
||||
Method: "GET",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
},
|
||||
Name: "my_webapp",
|
||||
Method: "GET",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"X-Auth-Token": "the-first-parameter",
|
||||
"apiVersion": "v1",
|
||||
},
|
||||
},
|
||||
&HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server3.example.com/metrics/",
|
||||
"http://server4.example.com/metrics/",
|
||||
},
|
||||
Name: "other_webapp",
|
||||
Method: "POST",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"X-Auth-Token": "the-first-parameter",
|
||||
"apiVersion": "v1",
|
||||
},
|
||||
TagKeys: []string{
|
||||
"role",
|
||||
"build",
|
||||
Service{
|
||||
Servers: []string{
|
||||
"http://server3.example.com/metrics/",
|
||||
"http://server4.example.com/metrics/",
|
||||
},
|
||||
Name: "other_webapp",
|
||||
Method: "POST",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
TagKeys: []string{
|
||||
"role",
|
||||
"build",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -130,21 +116,28 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
func TestHttpJson200(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, acc.NFields())
|
||||
// Set responsetime
|
||||
for _, p := range acc.Points {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 8, len(acc.Points))
|
||||
|
||||
for _, service := range httpjson.Services {
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv}
|
||||
mname := "httpjson_" + service.Name
|
||||
expectedFields["response_time"] = 1.0
|
||||
acc.AssertContainsTaggedFields(t, mname, expectedFields, tags)
|
||||
require.NoError(t,
|
||||
acc.ValidateTaggedValue(
|
||||
fmt.Sprintf("%s_parent_child", service.Name),
|
||||
3.0,
|
||||
map[string]string{"server": srv},
|
||||
),
|
||||
)
|
||||
require.NoError(t,
|
||||
acc.ValidateTaggedValue(
|
||||
fmt.Sprintf("%s_integer", service.Name),
|
||||
4.0,
|
||||
map[string]string{"server": srv},
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -154,22 +147,28 @@ func TestHttpJson500(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test response to HTTP 405
|
||||
func TestHttpJsonBadMethod(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
httpjson[0].Method = "NOT_A_REAL_METHOD"
|
||||
httpjson.Services[0].Method = "NOT_A_REAL_METHOD"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
// 2 error lines for (2 urls) * (1 falied service)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2)
|
||||
|
||||
// (2 measurements) * (2 servers) * (1 successful service)
|
||||
assert.Equal(t, 4, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test response to malformed JSON
|
||||
@@ -177,10 +176,12 @@ func TestHttpJsonBadJson(t *testing.T) {
|
||||
httpjson := genMockHttpJson(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test response to empty string as response objectgT
|
||||
@@ -188,31 +189,34 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
httpjson := genMockHttpJson(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200Tags(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSONTags, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 4, len(acc.Points))
|
||||
|
||||
for _, service := range httpjson.Services {
|
||||
if service.Name == "other_webapp" {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
// Set responsetime
|
||||
for _, p := range acc.Points {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 4, acc.NFields())
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv, "role": "master", "build": "123"}
|
||||
fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)}
|
||||
mname := "httpjson_" + service.Name
|
||||
acc.AssertContainsTaggedFields(t, mname, fields, tags)
|
||||
require.NoError(t,
|
||||
acc.ValidateTaggedValue(
|
||||
fmt.Sprintf("%s_value", service.Name),
|
||||
15.0,
|
||||
map[string]string{"server": srv, "role": "master", "build": "123"},
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ The influxdb plugin collects InfluxDB-formatted data from JSON endpoints.
|
||||
With a configuration of:
|
||||
|
||||
```toml
|
||||
[[inputs.influxdb]]
|
||||
[[plugins.influxdb]]
|
||||
urls = [
|
||||
"http://127.0.0.1:8086/debug/vars",
|
||||
"http://192.168.2.1:8086/debug/vars"
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
@@ -32,7 +32,7 @@ func (*InfluxDB) SampleConfig() string {
|
||||
`
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Gather(acc inputs.Accumulator) error {
|
||||
func (i *InfluxDB) Gather(acc plugins.Accumulator) error {
|
||||
errorChannel := make(chan error, len(i.URLs))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -77,7 +77,7 @@ type point struct {
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (i *InfluxDB) gatherURL(
|
||||
acc inputs.Accumulator,
|
||||
acc plugins.Accumulator,
|
||||
url string,
|
||||
) error {
|
||||
resp, err := http.Get(url)
|
||||
@@ -130,7 +130,7 @@ func (i *InfluxDB) gatherURL(
|
||||
p.Tags["url"] = url
|
||||
|
||||
acc.AddFields(
|
||||
"influxdb_"+p.Name,
|
||||
p.Name,
|
||||
p.Values,
|
||||
p.Tags,
|
||||
)
|
||||
@@ -140,7 +140,7 @@ func (i *InfluxDB) gatherURL(
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("influxdb", func() inputs.Input {
|
||||
plugins.Add("influxdb", func() plugins.Plugin {
|
||||
return &InfluxDB{}
|
||||
})
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/influxdb/telegraf/plugins/influxdb"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -72,26 +72,29 @@ func TestBasic(t *testing.T) {
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Points, 2)
|
||||
fields := map[string]interface{}{
|
||||
// JSON will truncate floats to integer representations.
|
||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
||||
"i": -1.0,
|
||||
"f": 0.5,
|
||||
"b": true,
|
||||
"s": "string",
|
||||
}
|
||||
tags := map[string]string{
|
||||
"id": "ex1",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "influxdb_foo", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"x": "x",
|
||||
}
|
||||
tags = map[string]string{
|
||||
"id": "ex2",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "influxdb_bar", fields, tags)
|
||||
require.NoError(t, acc.ValidateTaggedFieldsValue(
|
||||
"foo",
|
||||
map[string]interface{}{
|
||||
// JSON will truncate floats to integer representations.
|
||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
||||
"i": -1.0,
|
||||
"f": 0.5,
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
map[string]string{
|
||||
"id": "ex1",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
},
|
||||
))
|
||||
require.NoError(t, acc.ValidateTaggedFieldsValue(
|
||||
"bar",
|
||||
map[string]interface{}{
|
||||
"x": "x",
|
||||
},
|
||||
map[string]string{
|
||||
"id": "ex2",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
},
|
||||
))
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
# Example Input Plugin
|
||||
|
||||
The example plugin gathers metrics about example things
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Description
|
||||
[[inputs.example]]
|
||||
# SampleConfig
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
<optional description>
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
- field2 (float, percent)
|
||||
- measurement2
|
||||
- field3 (integer, bytes)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- tag1 (optional description)
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
### Example Output:
|
||||
|
||||
Give an example `-test` output here
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
@@ -1,44 +0,0 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
)
|
||||
@@ -1,148 +0,0 @@
|
||||
# Docker Input Plugin
|
||||
|
||||
The docker plugin uses the docker remote API to gather metrics on running
|
||||
docker containers. You can read Docker's documentation for their remote API
|
||||
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
|
||||
|
||||
The docker plugin uses the excellent
|
||||
[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to
|
||||
gather stats. Documentation for the library can be found
|
||||
[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation
|
||||
for the stat structure can be found
|
||||
[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats)
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_cpu metric may appear multiple times per collection, based
|
||||
on the availability of per-cpu stats on your system.
|
||||
|
||||
- docker_mem
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
- total_inactive_file
|
||||
- pgpgout
|
||||
- rss
|
||||
- total_mapped_file
|
||||
- writeback
|
||||
- unevictable
|
||||
- pgpgin
|
||||
- total_unevictable
|
||||
- pgmajfault
|
||||
- total_rss
|
||||
- total_rss_huge
|
||||
- total_writeback
|
||||
- total_inactive_anon
|
||||
- rss_huge
|
||||
- hierarchical_memory_limit
|
||||
- total_pgfault
|
||||
- total_active_file
|
||||
- active_anon
|
||||
- total_active_anon
|
||||
- total_pgpgout
|
||||
- total_cache
|
||||
- inactive_anon
|
||||
- active_file
|
||||
- pgfault
|
||||
- inactive_file
|
||||
- total_pgpgin
|
||||
- max_usage
|
||||
- usage
|
||||
- failcnt
|
||||
- limit
|
||||
- docker_cpu
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
- usage_in_kernelmode
|
||||
- usage_in_usermode
|
||||
- usage_system
|
||||
- usage_total
|
||||
- docker_net
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
- tx_packets
|
||||
- tx_dropped
|
||||
- rx_packets
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- docker_blkio
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
- io_service_bytes_recursive_total
|
||||
- io_service_bytes_recursive_write
|
||||
- io_serviced_recursive_async
|
||||
- io_serviced_recursive_read
|
||||
- io_serviced_recursive_sync
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
|
||||
### Tags:
|
||||
|
||||
- All stats have the following tags:
|
||||
- cont_id (container ID)
|
||||
- cont_image (container image)
|
||||
- cont_name (container name)
|
||||
- docker_cpu specific:
|
||||
- cpu
|
||||
- docker_net specific:
|
||||
- network
|
||||
- docker_blkio specific:
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
```
|
||||
@@ -1,309 +0,0 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
|
||||
client *docker.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
`
|
||||
|
||||
func (d *Docker) Description() string {
|
||||
return "Read metrics about docker containers"
|
||||
}
|
||||
|
||||
func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
func (d *Docker) Gather(acc inputs.Accumulator) error {
|
||||
if d.client == nil {
|
||||
var c *docker.Client
|
||||
var err error
|
||||
if d.Endpoint == "ENV" {
|
||||
c, err = docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if d.Endpoint == "" {
|
||||
c, err = docker.NewClient("unix:///var/run/docker.sock")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c, err = docker.NewClient(d.Endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
|
||||
opts := docker.ListContainersOptions{}
|
||||
containers, err := d.client.ListContainers(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
go func(c docker.APIContainers) {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}(container)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherContainer(
|
||||
container docker.APIContainers,
|
||||
acc inputs.Accumulator,
|
||||
) error {
|
||||
// Parse container name
|
||||
cname := "unknown"
|
||||
if len(container.Names) > 0 {
|
||||
// Not sure what to do with other names, just take the first.
|
||||
cname = strings.TrimPrefix(container.Names[0], "/")
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cont_id": container.ID,
|
||||
"cont_name": cname,
|
||||
"cont_image": container.Image,
|
||||
}
|
||||
if len(d.ContainerNames) > 0 {
|
||||
if !sliceContains(cname, d.ContainerNames) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
statChan := make(chan *docker.Stats)
|
||||
done := make(chan bool)
|
||||
statOpts := docker.StatsOptions{
|
||||
Stream: false,
|
||||
ID: container.ID,
|
||||
Stats: statChan,
|
||||
Done: done,
|
||||
Timeout: time.Duration(time.Second * 5),
|
||||
}
|
||||
|
||||
go func() {
|
||||
d.client.Stats(statOpts)
|
||||
}()
|
||||
|
||||
stat := <-statChan
|
||||
close(done)
|
||||
|
||||
// Add labels to tags
|
||||
for k, v := range container.Labels {
|
||||
tags[k] = v
|
||||
}
|
||||
|
||||
gatherContainerStats(stat, acc, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherContainerStats(
|
||||
stat *docker.Stats,
|
||||
acc inputs.Accumulator,
|
||||
tags map[string]string,
|
||||
) {
|
||||
now := stat.Read
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": stat.MemoryStats.MaxUsage,
|
||||
"usage": stat.MemoryStats.Usage,
|
||||
"fail_count": stat.MemoryStats.Failcnt,
|
||||
"limit": stat.MemoryStats.Limit,
|
||||
"total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault,
|
||||
"cache": stat.MemoryStats.Stats.Cache,
|
||||
"mapped_file": stat.MemoryStats.Stats.MappedFile,
|
||||
"total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile,
|
||||
"pgpgout": stat.MemoryStats.Stats.Pgpgout,
|
||||
"rss": stat.MemoryStats.Stats.Rss,
|
||||
"total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile,
|
||||
"writeback": stat.MemoryStats.Stats.Writeback,
|
||||
"unevictable": stat.MemoryStats.Stats.Unevictable,
|
||||
"pgpgin": stat.MemoryStats.Stats.Pgpgin,
|
||||
"total_unevictable": stat.MemoryStats.Stats.TotalUnevictable,
|
||||
"pgmajfault": stat.MemoryStats.Stats.Pgmajfault,
|
||||
"total_rss": stat.MemoryStats.Stats.TotalRss,
|
||||
"total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge,
|
||||
"total_writeback": stat.MemoryStats.Stats.TotalWriteback,
|
||||
"total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon,
|
||||
"rss_huge": stat.MemoryStats.Stats.RssHuge,
|
||||
"hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit,
|
||||
"total_pgfault": stat.MemoryStats.Stats.TotalPgfault,
|
||||
"total_active_file": stat.MemoryStats.Stats.TotalActiveFile,
|
||||
"active_anon": stat.MemoryStats.Stats.ActiveAnon,
|
||||
"total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon,
|
||||
"total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout,
|
||||
"total_cache": stat.MemoryStats.Stats.TotalCache,
|
||||
"inactive_anon": stat.MemoryStats.Stats.InactiveAnon,
|
||||
"active_file": stat.MemoryStats.Stats.ActiveFile,
|
||||
"pgfault": stat.MemoryStats.Stats.Pgfault,
|
||||
"inactive_file": stat.MemoryStats.Stats.InactiveFile,
|
||||
"total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin,
|
||||
}
|
||||
acc.AddFields("docker_mem", memfields, tags, now)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
|
||||
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
|
||||
"usage_system": stat.CPUStats.SystemCPUUsage,
|
||||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
}
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_cpu", cpufields, cputags, now)
|
||||
|
||||
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
||||
percputags := copyTags(tags)
|
||||
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
||||
acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
|
||||
}
|
||||
|
||||
for network, netstats := range stat.Networks {
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": netstats.RxDropped,
|
||||
"rx_bytes": netstats.RxBytes,
|
||||
"rx_errors": netstats.RxErrors,
|
||||
"tx_packets": netstats.TxPackets,
|
||||
"tx_dropped": netstats.TxDropped,
|
||||
"rx_packets": netstats.RxPackets,
|
||||
"tx_errors": netstats.TxErrors,
|
||||
"tx_bytes": netstats.TxBytes,
|
||||
}
|
||||
// Create a new network tag dictionary for the "network" tag
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = network
|
||||
acc.AddFields("docker_net", netfields, nettags, now)
|
||||
}
|
||||
|
||||
gatherBlockIOMetrics(stat, acc, tags, now)
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *docker.Stats,
|
||||
acc inputs.Accumulator,
|
||||
tags map[string]string,
|
||||
now time.Time,
|
||||
) {
|
||||
blkioStats := stat.BlkioStats
|
||||
// Make a map of devices to their block io stats
|
||||
deviceStatMap := make(map[string]map[string]interface{})
|
||||
|
||||
for _, metric := range blkioStats.IOServiceBytesRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOServicedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOQueueRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOServiceTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOWaitTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOMergedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.SectorsRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for device, fields := range deviceStatMap {
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = device
|
||||
acc.AddFields("docker_blkio", fields, iotags, now)
|
||||
}
|
||||
}
|
||||
|
||||
func copyTags(in map[string]string) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func sliceContains(in string, sl []string) bool {
|
||||
for _, str := range sl {
|
||||
if str == in {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() inputs.Input {
|
||||
return &Docker{}
|
||||
})
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := testStats()
|
||||
|
||||
tags := map[string]string{
|
||||
"cont_id": "foobarbaz",
|
||||
"cont_name": "redis",
|
||||
"cont_image": "redis/image",
|
||||
}
|
||||
gatherContainerStats(stats, &acc, tags)
|
||||
|
||||
// test docker_net measurement
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": uint64(1),
|
||||
"rx_bytes": uint64(2),
|
||||
"rx_errors": uint64(3),
|
||||
"tx_packets": uint64(4),
|
||||
"tx_dropped": uint64(1),
|
||||
"rx_packets": uint64(2),
|
||||
"tx_errors": uint64(3),
|
||||
"tx_bytes": uint64(4),
|
||||
}
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = "eth0"
|
||||
acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags)
|
||||
|
||||
// test docker_blkio measurement
|
||||
blkiotags := copyTags(tags)
|
||||
blkiotags["device"] = "6:0"
|
||||
blkiofields := map[string]interface{}{
|
||||
"io_service_bytes_recursive_read": uint64(100),
|
||||
"io_serviced_recursive_write": uint64(101),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags)
|
||||
|
||||
// test docker_mem measurement
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": uint64(1001),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(20),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_rss": uint64(44),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_writeback": uint64(55),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
|
||||
|
||||
// test docker_cpu measurement
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": uint64(500),
|
||||
"usage_in_usermode": uint64(100),
|
||||
"usage_in_kernelmode": uint64(200),
|
||||
"usage_system": uint64(100),
|
||||
"throttling_periods": uint64(1),
|
||||
"throttling_throttled_periods": uint64(0),
|
||||
"throttling_throttled_time": uint64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu0"
|
||||
cpu0fields := map[string]interface{}{
|
||||
"usage_total": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu1"
|
||||
cpu1fields := map[string]interface{}{
|
||||
"usage_total": uint64(1002),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
|
||||
}
|
||||
|
||||
func testStats() *docker.Stats {
|
||||
stats := &docker.Stats{
|
||||
Read: time.Now(),
|
||||
Networks: make(map[string]docker.NetworkStats),
|
||||
}
|
||||
|
||||
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
|
||||
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
||||
stats.CPUStats.CPUUsage.TotalUsage = 500
|
||||
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
||||
stats.CPUStats.SystemCPUUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.MemoryStats.Stats.TotalPgmafault = 0
|
||||
stats.MemoryStats.Stats.Cache = 0
|
||||
stats.MemoryStats.Stats.MappedFile = 0
|
||||
stats.MemoryStats.Stats.TotalInactiveFile = 0
|
||||
stats.MemoryStats.Stats.Pgpgout = 0
|
||||
stats.MemoryStats.Stats.Rss = 0
|
||||
stats.MemoryStats.Stats.TotalMappedFile = 0
|
||||
stats.MemoryStats.Stats.Writeback = 0
|
||||
stats.MemoryStats.Stats.Unevictable = 0
|
||||
stats.MemoryStats.Stats.Pgpgin = 0
|
||||
stats.MemoryStats.Stats.TotalUnevictable = 0
|
||||
stats.MemoryStats.Stats.Pgmajfault = 0
|
||||
stats.MemoryStats.Stats.TotalRss = 44
|
||||
stats.MemoryStats.Stats.TotalRssHuge = 444
|
||||
stats.MemoryStats.Stats.TotalWriteback = 55
|
||||
stats.MemoryStats.Stats.TotalInactiveAnon = 0
|
||||
stats.MemoryStats.Stats.RssHuge = 0
|
||||
stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0
|
||||
stats.MemoryStats.Stats.TotalPgfault = 0
|
||||
stats.MemoryStats.Stats.TotalActiveFile = 0
|
||||
stats.MemoryStats.Stats.ActiveAnon = 0
|
||||
stats.MemoryStats.Stats.TotalActiveAnon = 0
|
||||
stats.MemoryStats.Stats.TotalPgpgout = 0
|
||||
stats.MemoryStats.Stats.TotalCache = 0
|
||||
stats.MemoryStats.Stats.InactiveAnon = 0
|
||||
stats.MemoryStats.Stats.ActiveFile = 1
|
||||
stats.MemoryStats.Stats.Pgfault = 2
|
||||
stats.MemoryStats.Stats.InactiveFile = 3
|
||||
stats.MemoryStats.Stats.TotalPgpgin = 4
|
||||
|
||||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 20
|
||||
|
||||
stats.Networks["eth0"] = docker.NetworkStats{
|
||||
RxDropped: 1,
|
||||
RxBytes: 2,
|
||||
RxErrors: 3,
|
||||
TxPackets: 4,
|
||||
TxDropped: 1,
|
||||
RxPackets: 2,
|
||||
TxErrors: 3,
|
||||
TxBytes: 4,
|
||||
}
|
||||
|
||||
sbr := docker.BlkioStatsEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "read",
|
||||
Value: 100,
|
||||
}
|
||||
sr := docker.BlkioStatsEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "write",
|
||||
Value: 101,
|
||||
}
|
||||
|
||||
stats.BlkioStats.IOServiceBytesRecursive = append(
|
||||
stats.BlkioStats.IOServiceBytesRecursive, sbr)
|
||||
stats.BlkioStats.IOServicedRecursive = append(
|
||||
stats.BlkioStats.IOServicedRecursive, sr)
|
||||
|
||||
return stats
|
||||
}
|
||||
@@ -1,765 +0,0 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"indices": {
|
||||
"v1": {
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0
|
||||
},
|
||||
"v2": {
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var clusterHealthExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v1IndexExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v2IndexExpected = map[string]interface{}{
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20,
|
||||
}
|
||||
|
||||
const statsResponse = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
"SDFsfSDFsdfFSDSDfSFDSDF": {
|
||||
"timestamp": 1436365550135,
|
||||
"name": "test.host.com",
|
||||
"transport_address": "inet[/127.0.0.1:9300]",
|
||||
"host": "test",
|
||||
"ip": [
|
||||
"inet[/127.0.0.1:9300]",
|
||||
"NONE"
|
||||
],
|
||||
"attributes": {
|
||||
"master": "true"
|
||||
},
|
||||
"indices": {
|
||||
"docs": {
|
||||
"count": 29652,
|
||||
"deleted": 5229
|
||||
},
|
||||
"store": {
|
||||
"size_in_bytes": 37715234,
|
||||
"throttle_time_in_millis": 215
|
||||
},
|
||||
"indexing": {
|
||||
"index_total": 84790,
|
||||
"index_time_in_millis": 29680,
|
||||
"index_current": 0,
|
||||
"delete_total": 13879,
|
||||
"delete_time_in_millis": 1139,
|
||||
"delete_current": 0,
|
||||
"noop_update_total": 0,
|
||||
"is_throttled": false,
|
||||
"throttle_time_in_millis": 0
|
||||
},
|
||||
"get": {
|
||||
"total": 1,
|
||||
"time_in_millis": 2,
|
||||
"exists_total": 0,
|
||||
"exists_time_in_millis": 0,
|
||||
"missing_total": 1,
|
||||
"missing_time_in_millis": 2,
|
||||
"current": 0
|
||||
},
|
||||
"search": {
|
||||
"open_contexts": 0,
|
||||
"query_total": 1452,
|
||||
"query_time_in_millis": 5695,
|
||||
"query_current": 0,
|
||||
"fetch_total": 414,
|
||||
"fetch_time_in_millis": 146,
|
||||
"fetch_current": 0
|
||||
},
|
||||
"merges": {
|
||||
"current": 0,
|
||||
"current_docs": 0,
|
||||
"current_size_in_bytes": 0,
|
||||
"total": 133,
|
||||
"total_time_in_millis": 21060,
|
||||
"total_docs": 203672,
|
||||
"total_size_in_bytes": 142900226
|
||||
},
|
||||
"refresh": {
|
||||
"total": 1076,
|
||||
"total_time_in_millis": 20078
|
||||
},
|
||||
"flush": {
|
||||
"total": 115,
|
||||
"total_time_in_millis": 2401
|
||||
},
|
||||
"warmer": {
|
||||
"current": 0,
|
||||
"total": 2319,
|
||||
"total_time_in_millis": 448
|
||||
},
|
||||
"filter_cache": {
|
||||
"memory_size_in_bytes": 7384,
|
||||
"evictions": 0
|
||||
},
|
||||
"id_cache": {
|
||||
"memory_size_in_bytes": 0
|
||||
},
|
||||
"fielddata": {
|
||||
"memory_size_in_bytes": 12996,
|
||||
"evictions": 0
|
||||
},
|
||||
"percolate": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0,
|
||||
"memory_size_in_bytes": -1,
|
||||
"memory_size": "-1b",
|
||||
"queries": 0
|
||||
},
|
||||
"completion": {
|
||||
"size_in_bytes": 0
|
||||
},
|
||||
"segments": {
|
||||
"count": 134,
|
||||
"memory_in_bytes": 1285212,
|
||||
"index_writer_memory_in_bytes": 0,
|
||||
"index_writer_max_memory_in_bytes": 172368955,
|
||||
"version_map_memory_in_bytes": 611844,
|
||||
"fixed_bit_set_memory_in_bytes": 0
|
||||
},
|
||||
"translog": {
|
||||
"operations": 17702,
|
||||
"size_in_bytes": 17
|
||||
},
|
||||
"suggest": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0
|
||||
},
|
||||
"query_cache": {
|
||||
"memory_size_in_bytes": 0,
|
||||
"evictions": 0,
|
||||
"hit_count": 0,
|
||||
"miss_count": 0
|
||||
},
|
||||
"recovery": {
|
||||
"current_as_source": 0,
|
||||
"current_as_target": 0,
|
||||
"throttle_time_in_millis": 0
|
||||
}
|
||||
},
|
||||
"os": {
|
||||
"timestamp": 1436460392944,
|
||||
"load_average": [
|
||||
0.01,
|
||||
0.04,
|
||||
0.05
|
||||
],
|
||||
"mem": {
|
||||
"free_in_bytes": 477761536,
|
||||
"used_in_bytes": 1621868544,
|
||||
"free_percent": 74,
|
||||
"used_percent": 25,
|
||||
"actual_free_in_bytes": 1565470720,
|
||||
"actual_used_in_bytes": 534159360
|
||||
},
|
||||
"swap": {
|
||||
"used_in_bytes": 0,
|
||||
"free_in_bytes": 487997440
|
||||
}
|
||||
},
|
||||
"process": {
|
||||
"timestamp": 1436460392945,
|
||||
"open_file_descriptors": 160,
|
||||
"cpu": {
|
||||
"percent": 2,
|
||||
"sys_in_millis": 1870,
|
||||
"user_in_millis": 13610,
|
||||
"total_in_millis": 15480
|
||||
},
|
||||
"mem": {
|
||||
"total_virtual_in_bytes": 4747890688
|
||||
}
|
||||
},
|
||||
"jvm": {
|
||||
"timestamp": 1436460392945,
|
||||
"uptime_in_millis": 202245,
|
||||
"mem": {
|
||||
"heap_used_in_bytes": 52709568,
|
||||
"heap_used_percent": 5,
|
||||
"heap_committed_in_bytes": 259522560,
|
||||
"heap_max_in_bytes": 1038876672,
|
||||
"non_heap_used_in_bytes": 39634576,
|
||||
"non_heap_committed_in_bytes": 40841216,
|
||||
"pools": {
|
||||
"young": {
|
||||
"used_in_bytes": 32685760,
|
||||
"max_in_bytes": 279183360,
|
||||
"peak_used_in_bytes": 71630848,
|
||||
"peak_max_in_bytes": 279183360
|
||||
},
|
||||
"survivor": {
|
||||
"used_in_bytes": 8912880,
|
||||
"max_in_bytes": 34865152,
|
||||
"peak_used_in_bytes": 8912888,
|
||||
"peak_max_in_bytes": 34865152
|
||||
},
|
||||
"old": {
|
||||
"used_in_bytes": 11110928,
|
||||
"max_in_bytes": 724828160,
|
||||
"peak_used_in_bytes": 14354608,
|
||||
"peak_max_in_bytes": 724828160
|
||||
}
|
||||
}
|
||||
},
|
||||
"threads": {
|
||||
"count": 44,
|
||||
"peak_count": 45
|
||||
},
|
||||
"gc": {
|
||||
"collectors": {
|
||||
"young": {
|
||||
"collection_count": 2,
|
||||
"collection_time_in_millis": 98
|
||||
},
|
||||
"old": {
|
||||
"collection_count": 1,
|
||||
"collection_time_in_millis": 24
|
||||
}
|
||||
}
|
||||
},
|
||||
"buffer_pools": {
|
||||
"direct": {
|
||||
"count": 40,
|
||||
"used_in_bytes": 6304239,
|
||||
"total_capacity_in_bytes": 6304239
|
||||
},
|
||||
"mapped": {
|
||||
"count": 0,
|
||||
"used_in_bytes": 0,
|
||||
"total_capacity_in_bytes": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"thread_pool": {
|
||||
"percolate": {
|
||||
"threads": 123,
|
||||
"queue": 23,
|
||||
"active": 13,
|
||||
"rejected": 235,
|
||||
"largest": 23,
|
||||
"completed": 33
|
||||
},
|
||||
"fetch_shard_started": {
|
||||
"threads": 3,
|
||||
"queue": 1,
|
||||
"active": 5,
|
||||
"rejected": 6,
|
||||
"largest": 4,
|
||||
"completed": 54
|
||||
},
|
||||
"listener": {
|
||||
"threads": 1,
|
||||
"queue": 2,
|
||||
"active": 4,
|
||||
"rejected": 8,
|
||||
"largest": 1,
|
||||
"completed": 1
|
||||
},
|
||||
"index": {
|
||||
"threads": 6,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 6
|
||||
},
|
||||
"refresh": {
|
||||
"threads": 23,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 4,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"suggest": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 1,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"generic": {
|
||||
"threads": 1,
|
||||
"queue": 4,
|
||||
"active": 6,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 27
|
||||
},
|
||||
"warmer": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 1
|
||||
},
|
||||
"search": {
|
||||
"threads": 5,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 7,
|
||||
"largest": 2,
|
||||
"completed": 4
|
||||
},
|
||||
"flush": {
|
||||
"threads": 3,
|
||||
"queue": 8,
|
||||
"active": 0,
|
||||
"rejected": 1,
|
||||
"largest": 5,
|
||||
"completed": 3
|
||||
},
|
||||
"optimize": {
|
||||
"threads": 3,
|
||||
"queue": 4,
|
||||
"active": 1,
|
||||
"rejected": 2,
|
||||
"largest": 7,
|
||||
"completed": 3
|
||||
},
|
||||
"fetch_shard_store": {
|
||||
"threads": 1,
|
||||
"queue": 7,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 4,
|
||||
"completed": 1
|
||||
},
|
||||
"management": {
|
||||
"threads": 2,
|
||||
"queue": 3,
|
||||
"active": 1,
|
||||
"rejected": 6,
|
||||
"largest": 2,
|
||||
"completed": 22
|
||||
},
|
||||
"get": {
|
||||
"threads": 1,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 1
|
||||
},
|
||||
"merge": {
|
||||
"threads": 6,
|
||||
"queue": 4,
|
||||
"active": 5,
|
||||
"rejected": 2,
|
||||
"largest": 5,
|
||||
"completed": 1
|
||||
},
|
||||
"bulk": {
|
||||
"threads": 4,
|
||||
"queue": 5,
|
||||
"active": 7,
|
||||
"rejected": 3,
|
||||
"largest": 1,
|
||||
"completed": 4
|
||||
},
|
||||
"snapshot": {
|
||||
"threads": 8,
|
||||
"queue": 5,
|
||||
"active": 6,
|
||||
"rejected": 2,
|
||||
"largest": 1,
|
||||
"completed": 0
|
||||
}
|
||||
},
|
||||
"fs": {
|
||||
"timestamp": 1436460392946,
|
||||
"total": {
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
},
|
||||
"data": [
|
||||
{
|
||||
"path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
|
||||
"mount": "/usr/share/elasticsearch/data",
|
||||
"type": "ext4",
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
}
|
||||
]
|
||||
},
|
||||
"transport": {
|
||||
"server_open": 13,
|
||||
"rx_count": 6,
|
||||
"rx_size_in_bytes": 1380,
|
||||
"tx_count": 6,
|
||||
"tx_size_in_bytes": 1380
|
||||
},
|
||||
"http": {
|
||||
"current_open": 3,
|
||||
"total_opened": 3
|
||||
},
|
||||
"breakers": {
|
||||
"fielddata": {
|
||||
"limit_size_in_bytes": 623326003,
|
||||
"limit_size": "594.4mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.03,
|
||||
"tripped": 0
|
||||
},
|
||||
"request": {
|
||||
"limit_size_in_bytes": 415550668,
|
||||
"limit_size": "396.2mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
},
|
||||
"parent": {
|
||||
"limit_size_in_bytes": 727213670,
|
||||
"limit_size": "693.5mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var indicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"suggest_total": float64(0),
|
||||
"suggest_time_in_millis": float64(0),
|
||||
"suggest_current": float64(0),
|
||||
"query_cache_memory_size_in_bytes": float64(0),
|
||||
"query_cache_evictions": float64(0),
|
||||
"query_cache_hit_count": float64(0),
|
||||
"query_cache_miss_count": float64(0),
|
||||
"store_size_in_bytes": float64(37715234),
|
||||
"store_throttle_time_in_millis": float64(215),
|
||||
"merges_current_docs": float64(0),
|
||||
"merges_current_size_in_bytes": float64(0),
|
||||
"merges_total": float64(133),
|
||||
"merges_total_time_in_millis": float64(21060),
|
||||
"merges_total_docs": float64(203672),
|
||||
"merges_total_size_in_bytes": float64(142900226),
|
||||
"merges_current": float64(0),
|
||||
"filter_cache_memory_size_in_bytes": float64(7384),
|
||||
"filter_cache_evictions": float64(0),
|
||||
"indexing_index_total": float64(84790),
|
||||
"indexing_index_time_in_millis": float64(29680),
|
||||
"indexing_index_current": float64(0),
|
||||
"indexing_noop_update_total": float64(0),
|
||||
"indexing_throttle_time_in_millis": float64(0),
|
||||
"indexing_delete_total": float64(13879),
|
||||
"indexing_delete_time_in_millis": float64(1139),
|
||||
"indexing_delete_current": float64(0),
|
||||
"get_exists_time_in_millis": float64(0),
|
||||
"get_missing_total": float64(1),
|
||||
"get_missing_time_in_millis": float64(2),
|
||||
"get_current": float64(0),
|
||||
"get_total": float64(1),
|
||||
"get_time_in_millis": float64(2),
|
||||
"get_exists_total": float64(0),
|
||||
"refresh_total": float64(1076),
|
||||
"refresh_total_time_in_millis": float64(20078),
|
||||
"percolate_current": float64(0),
|
||||
"percolate_memory_size_in_bytes": float64(-1),
|
||||
"percolate_queries": float64(0),
|
||||
"percolate_total": float64(0),
|
||||
"percolate_time_in_millis": float64(0),
|
||||
"translog_operations": float64(17702),
|
||||
"translog_size_in_bytes": float64(17),
|
||||
"recovery_current_as_source": float64(0),
|
||||
"recovery_current_as_target": float64(0),
|
||||
"recovery_throttle_time_in_millis": float64(0),
|
||||
"docs_count": float64(29652),
|
||||
"docs_deleted": float64(5229),
|
||||
"flush_total_time_in_millis": float64(2401),
|
||||
"flush_total": float64(115),
|
||||
"fielddata_memory_size_in_bytes": float64(12996),
|
||||
"fielddata_evictions": float64(0),
|
||||
"search_fetch_current": float64(0),
|
||||
"search_open_contexts": float64(0),
|
||||
"search_query_total": float64(1452),
|
||||
"search_query_time_in_millis": float64(5695),
|
||||
"search_query_current": float64(0),
|
||||
"search_fetch_total": float64(414),
|
||||
"search_fetch_time_in_millis": float64(146),
|
||||
"warmer_current": float64(0),
|
||||
"warmer_total": float64(2319),
|
||||
"warmer_total_time_in_millis": float64(448),
|
||||
"segments_count": float64(134),
|
||||
"segments_memory_in_bytes": float64(1285212),
|
||||
"segments_index_writer_memory_in_bytes": float64(0),
|
||||
"segments_index_writer_max_memory_in_bytes": float64(172368955),
|
||||
"segments_version_map_memory_in_bytes": float64(611844),
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var osExpected = map[string]interface{}{
|
||||
"load_average_0": float64(0.01),
|
||||
"load_average_1": float64(0.04),
|
||||
"load_average_2": float64(0.05),
|
||||
"swap_used_in_bytes": float64(0),
|
||||
"swap_free_in_bytes": float64(487997440),
|
||||
"timestamp": float64(1436460392944),
|
||||
"mem_free_percent": float64(74),
|
||||
"mem_used_percent": float64(25),
|
||||
"mem_actual_free_in_bytes": float64(1565470720),
|
||||
"mem_actual_used_in_bytes": float64(534159360),
|
||||
"mem_free_in_bytes": float64(477761536),
|
||||
"mem_used_in_bytes": float64(1621868544),
|
||||
}
|
||||
|
||||
var processExpected = map[string]interface{}{
|
||||
"mem_total_virtual_in_bytes": float64(4747890688),
|
||||
"timestamp": float64(1436460392945),
|
||||
"open_file_descriptors": float64(160),
|
||||
"cpu_total_in_millis": float64(15480),
|
||||
"cpu_percent": float64(2),
|
||||
"cpu_sys_in_millis": float64(1870),
|
||||
"cpu_user_in_millis": float64(13610),
|
||||
}
|
||||
|
||||
var jvmExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392945),
|
||||
"uptime_in_millis": float64(202245),
|
||||
"mem_non_heap_used_in_bytes": float64(39634576),
|
||||
"mem_non_heap_committed_in_bytes": float64(40841216),
|
||||
"mem_pools_young_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_peak_used_in_bytes": float64(71630848),
|
||||
"mem_pools_young_peak_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_used_in_bytes": float64(32685760),
|
||||
"mem_pools_survivor_peak_used_in_bytes": float64(8912888),
|
||||
"mem_pools_survivor_peak_max_in_bytes": float64(34865152),
|
||||
"mem_pools_survivor_used_in_bytes": float64(8912880),
|
||||
"mem_pools_survivor_max_in_bytes": float64(34865152),
|
||||
"mem_pools_old_peak_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_used_in_bytes": float64(11110928),
|
||||
"mem_pools_old_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_peak_used_in_bytes": float64(14354608),
|
||||
"mem_heap_used_in_bytes": float64(52709568),
|
||||
"mem_heap_used_percent": float64(5),
|
||||
"mem_heap_committed_in_bytes": float64(259522560),
|
||||
"mem_heap_max_in_bytes": float64(1038876672),
|
||||
"threads_peak_count": float64(45),
|
||||
"threads_count": float64(44),
|
||||
"gc_collectors_young_collection_count": float64(2),
|
||||
"gc_collectors_young_collection_time_in_millis": float64(98),
|
||||
"gc_collectors_old_collection_count": float64(1),
|
||||
"gc_collectors_old_collection_time_in_millis": float64(24),
|
||||
"buffer_pools_direct_count": float64(40),
|
||||
"buffer_pools_direct_used_in_bytes": float64(6304239),
|
||||
"buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
|
||||
"buffer_pools_mapped_count": float64(0),
|
||||
"buffer_pools_mapped_used_in_bytes": float64(0),
|
||||
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var threadPoolExpected = map[string]interface{}{
|
||||
"merge_threads": float64(6),
|
||||
"merge_queue": float64(4),
|
||||
"merge_active": float64(5),
|
||||
"merge_rejected": float64(2),
|
||||
"merge_largest": float64(5),
|
||||
"merge_completed": float64(1),
|
||||
"bulk_threads": float64(4),
|
||||
"bulk_queue": float64(5),
|
||||
"bulk_active": float64(7),
|
||||
"bulk_rejected": float64(3),
|
||||
"bulk_largest": float64(1),
|
||||
"bulk_completed": float64(4),
|
||||
"warmer_threads": float64(2),
|
||||
"warmer_queue": float64(7),
|
||||
"warmer_active": float64(3),
|
||||
"warmer_rejected": float64(2),
|
||||
"warmer_largest": float64(3),
|
||||
"warmer_completed": float64(1),
|
||||
"get_largest": float64(2),
|
||||
"get_completed": float64(1),
|
||||
"get_threads": float64(1),
|
||||
"get_queue": float64(8),
|
||||
"get_active": float64(4),
|
||||
"get_rejected": float64(3),
|
||||
"index_threads": float64(6),
|
||||
"index_queue": float64(8),
|
||||
"index_active": float64(4),
|
||||
"index_rejected": float64(2),
|
||||
"index_largest": float64(3),
|
||||
"index_completed": float64(6),
|
||||
"suggest_threads": float64(2),
|
||||
"suggest_queue": float64(7),
|
||||
"suggest_active": float64(2),
|
||||
"suggest_rejected": float64(1),
|
||||
"suggest_largest": float64(8),
|
||||
"suggest_completed": float64(3),
|
||||
"fetch_shard_store_queue": float64(7),
|
||||
"fetch_shard_store_active": float64(4),
|
||||
"fetch_shard_store_rejected": float64(2),
|
||||
"fetch_shard_store_largest": float64(4),
|
||||
"fetch_shard_store_completed": float64(1),
|
||||
"fetch_shard_store_threads": float64(1),
|
||||
"management_threads": float64(2),
|
||||
"management_queue": float64(3),
|
||||
"management_active": float64(1),
|
||||
"management_rejected": float64(6),
|
||||
"management_largest": float64(2),
|
||||
"management_completed": float64(22),
|
||||
"percolate_queue": float64(23),
|
||||
"percolate_active": float64(13),
|
||||
"percolate_rejected": float64(235),
|
||||
"percolate_largest": float64(23),
|
||||
"percolate_completed": float64(33),
|
||||
"percolate_threads": float64(123),
|
||||
"listener_active": float64(4),
|
||||
"listener_rejected": float64(8),
|
||||
"listener_largest": float64(1),
|
||||
"listener_completed": float64(1),
|
||||
"listener_threads": float64(1),
|
||||
"listener_queue": float64(2),
|
||||
"search_rejected": float64(7),
|
||||
"search_largest": float64(2),
|
||||
"search_completed": float64(4),
|
||||
"search_threads": float64(5),
|
||||
"search_queue": float64(7),
|
||||
"search_active": float64(2),
|
||||
"fetch_shard_started_threads": float64(3),
|
||||
"fetch_shard_started_queue": float64(1),
|
||||
"fetch_shard_started_active": float64(5),
|
||||
"fetch_shard_started_rejected": float64(6),
|
||||
"fetch_shard_started_largest": float64(4),
|
||||
"fetch_shard_started_completed": float64(54),
|
||||
"refresh_rejected": float64(4),
|
||||
"refresh_largest": float64(8),
|
||||
"refresh_completed": float64(3),
|
||||
"refresh_threads": float64(23),
|
||||
"refresh_queue": float64(7),
|
||||
"refresh_active": float64(3),
|
||||
"optimize_threads": float64(3),
|
||||
"optimize_queue": float64(4),
|
||||
"optimize_active": float64(1),
|
||||
"optimize_rejected": float64(2),
|
||||
"optimize_largest": float64(7),
|
||||
"optimize_completed": float64(3),
|
||||
"snapshot_largest": float64(1),
|
||||
"snapshot_completed": float64(0),
|
||||
"snapshot_threads": float64(8),
|
||||
"snapshot_queue": float64(5),
|
||||
"snapshot_active": float64(6),
|
||||
"snapshot_rejected": float64(2),
|
||||
"generic_threads": float64(1),
|
||||
"generic_queue": float64(4),
|
||||
"generic_active": float64(6),
|
||||
"generic_rejected": float64(3),
|
||||
"generic_largest": float64(2),
|
||||
"generic_completed": float64(27),
|
||||
"flush_threads": float64(3),
|
||||
"flush_queue": float64(8),
|
||||
"flush_active": float64(0),
|
||||
"flush_rejected": float64(1),
|
||||
"flush_largest": float64(5),
|
||||
"flush_completed": float64(3),
|
||||
}
|
||||
|
||||
var fsExpected = map[string]interface{}{
|
||||
"data_0_total_in_bytes": float64(19507089408),
|
||||
"data_0_free_in_bytes": float64(16909316096),
|
||||
"data_0_available_in_bytes": float64(15894814720),
|
||||
"timestamp": float64(1436460392946),
|
||||
"total_free_in_bytes": float64(16909316096),
|
||||
"total_available_in_bytes": float64(15894814720),
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var transportExpected = map[string]interface{}{
|
||||
"server_open": float64(13),
|
||||
"rx_count": float64(6),
|
||||
"rx_size_in_bytes": float64(1380),
|
||||
"tx_count": float64(6),
|
||||
"tx_size_in_bytes": float64(1380),
|
||||
}
|
||||
|
||||
var httpExpected = map[string]interface{}{
|
||||
"current_open": float64(3),
|
||||
"total_opened": float64(3),
|
||||
}
|
||||
|
||||
var breakersExpected = map[string]interface{}{
|
||||
"fielddata_estimated_size_in_bytes": float64(0),
|
||||
"fielddata_overhead": float64(1.03),
|
||||
"fielddata_tripped": float64(0),
|
||||
"fielddata_limit_size_in_bytes": float64(623326003),
|
||||
"request_estimated_size_in_bytes": float64(0),
|
||||
"request_overhead": float64(1.0),
|
||||
"request_tripped": float64(0),
|
||||
"request_limit_size_in_bytes": float64(415550668),
|
||||
"parent_overhead": float64(1.0),
|
||||
"parent_tripped": float64(0),
|
||||
"parent_limit_size_in_bytes": float64(727213670),
|
||||
"parent_estimated_size_in_bytes": float64(0),
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
# Exec Plugin
|
||||
|
||||
The exec plugin can execute arbitrary commands which output JSON. Then it flattens JSON and finds
|
||||
all numeric values, treating them as floats.
|
||||
|
||||
For example, if you have a json-returning command called mycollector, you could
|
||||
setup the exec plugin with:
|
||||
|
||||
```
|
||||
[[inputs.exec]]
|
||||
command = "/usr/bin/mycollector --output=json"
|
||||
name_suffix = "_mycollector"
|
||||
interval = "10s"
|
||||
```
|
||||
|
||||
The name suffix is appended to exec as "exec_name_suffix" to identify the input stream.
|
||||
|
||||
The interval is used to determine how often a particular command should be run. Each
|
||||
time the exec plugin runs, it will only run a particular command if it has been at least
|
||||
`interval` seconds since the exec plugin last ran the command.
|
||||
|
||||
|
||||
# Sample
|
||||
|
||||
Let's say that we have a command with the name_suffix "_mycollector", which gives the following output:
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": 0.1,
|
||||
"d": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be stored as field values under the same measurement "exec_mycollector":
|
||||
```
|
||||
exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567
|
||||
```
|
||||
|
||||
Other options for modifying the measurement names are:
|
||||
```
|
||||
name_override = "newname"
|
||||
name_prefix = "prefix_"
|
||||
```
|
||||
@@ -1,99 +0,0 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Midnight 9/22/2015
|
||||
const baseTimeSeconds = 1442905200
|
||||
|
||||
const validJson = `
|
||||
{
|
||||
"status": "green",
|
||||
"num_processes": 82,
|
||||
"cpu": {
|
||||
"status": "red",
|
||||
"nil_status": null,
|
||||
"used": 8234,
|
||||
"free": 32
|
||||
},
|
||||
"percent": 0.81,
|
||||
"users": [0, 1, 2, 3]
|
||||
}`
|
||||
|
||||
const malformedJson = `
|
||||
{
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
type runnerMock struct {
|
||||
out []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newRunnerMock(out []byte, err error) Runner {
|
||||
return &runnerMock{
|
||||
out: out,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (r runnerMock) Run(e *Exec) ([]byte, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
return r.out, nil
|
||||
}
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(validJson), nil),
|
||||
Command: "testcommand arg1",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"num_processes": float64(82),
|
||||
"cpu_used": float64(8234),
|
||||
"cpu_free": float64(32),
|
||||
"percent": float64(0.81),
|
||||
"users_0": float64(0),
|
||||
"users_1": float64(1),
|
||||
"users_2": float64(2),
|
||||
"users_3": float64(3),
|
||||
}
|
||||
acc.AssertContainsFields(t, "exec", fields)
|
||||
}
|
||||
|
||||
func TestExecMalformed(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(malformedJson), nil),
|
||||
Command: "badcommand arg1",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestCommandError(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")),
|
||||
Command: "badcommand",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
@@ -1,369 +0,0 @@
|
||||
# github_webhooks
|
||||
|
||||
This is a Telegraf service plugin that listens for events kicked off by Github's Webhooks service and persists data from them into configured outputs. To set up the listener first generate the proper configuration:
|
||||
```sh
|
||||
$ telegraf -sample-config -input-filter github_webhooks -output-filter influxdb > config.conf.new
|
||||
```
|
||||
Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete:
|
||||
```sh
|
||||
$ cp config.conf.new /etc/telegraf/telegraf.conf
|
||||
$ sudo service telegraf start
|
||||
```
|
||||
Once the server is running you should configure your Organization's Webhooks to point at the `github_webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://<my_ip>:1618`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me <b>everything</b>'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
|
||||
|
||||
## Events
|
||||
|
||||
The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows:
|
||||
```
|
||||
# TAGS
|
||||
* 'tagKey' = `tagValue` type
|
||||
# FIELDS
|
||||
* 'fieldKey' = `fieldValue` type
|
||||
```
|
||||
The tag values and field values show the place on the incoming JSON object where the data is sourced from.
|
||||
|
||||
#### [`commit_comment` event](https://developer.github.com/v3/activity/events/types/#commitcommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.comment.commit_id` string
|
||||
* 'comment' = `event.comment.body` string
|
||||
|
||||
#### [`create` event](https://developer.github.com/v3/activity/events/types/#createevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'issues' = `event.ref_type` string
|
||||
|
||||
#### [`delete` event](https://developer.github.com/v3/activity/events/types/#deleteevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'issues' = `event.ref_type` string
|
||||
|
||||
#### [`deployment` event](https://developer.github.com/v3/activity/events/types/#deploymentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.deployment.sha` string
|
||||
* 'task' = `event.deployment.task` string
|
||||
* 'environment' = `event.deployment.evnironment` string
|
||||
* 'description' = `event.deployment.description` string
|
||||
|
||||
#### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.deployment.sha` string
|
||||
* 'task' = `event.deployment.task` string
|
||||
* 'environment' = `event.deployment.evnironment` string
|
||||
* 'description' = `event.deployment.description` string
|
||||
* 'depState' = `event.deployment_status.state` string
|
||||
* 'depDescription' = `event.deployment_status.description` string
|
||||
|
||||
#### [`fork` event](https://developer.github.com/v3/activity/events/types/#forkevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'forkee' = `event.forkee.repository` string
|
||||
|
||||
#### [`gollum` event](https://developer.github.com/v3/activity/events/types/#gollumevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`issue_comment` event](https://developer.github.com/v3/activity/events/types/#issuecommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'issue' = `event.issue.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'title' = `event.issue.title` string
|
||||
* 'comments' = `event.issue.comments` int
|
||||
* 'body' = `event.comment.body` string
|
||||
|
||||
#### [`issues` event](https://developer.github.com/v3/activity/events/types/#issuesevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'issue' = `event.issue.number` int
|
||||
* 'action' = `event.action` string
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'title' = `event.issue.title` string
|
||||
* 'comments' = `event.issue.comments` int
|
||||
|
||||
#### [`member` event](https://developer.github.com/v3/activity/events/types/#memberevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'newMember' = `event.sender.login` string
|
||||
* 'newMemberStatus' = `event.sender.site_admin` bool
|
||||
|
||||
#### [`membership` event](https://developer.github.com/v3/activity/events/types/#membershipevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'action' = `event.action` string
|
||||
|
||||
**Fields:**
|
||||
* 'newMember' = `event.sender.login` string
|
||||
* 'newMemberStatus' = `event.sender.site_admin` bool
|
||||
|
||||
#### [`page_build` event](https://developer.github.com/v3/activity/events/types/#pagebuildevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`public` event](https://developer.github.com/v3/activity/events/types/#publicevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`pull_request_review_comment` event](https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'action' = `event.action` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'prNumber' = `event.pull_request.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'state' = `event.pull_request.state` string
|
||||
* 'title' = `event.pull_request.title` string
|
||||
* 'comments' = `event.pull_request.comments` int
|
||||
* 'commits' = `event.pull_request.commits` int
|
||||
* 'additions' = `event.pull_request.additions` int
|
||||
* 'deletions' = `event.pull_request.deletions` int
|
||||
* 'changedFiles' = `event.pull_request.changed_files` int
|
||||
* 'commentFile' = `event.comment.file` string
|
||||
* 'comment' = `event.comment.body` string
|
||||
|
||||
#### [`pull_request` event](https://developer.github.com/v3/activity/events/types/#pullrequestevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'action' = `event.action` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'prNumber' = `event.pull_request.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'state' = `event.pull_request.state` string
|
||||
* 'title' = `event.pull_request.title` string
|
||||
* 'comments' = `event.pull_request.comments` int
|
||||
* 'commits' = `event.pull_request.commits` int
|
||||
* 'additions' = `event.pull_request.additions` int
|
||||
* 'deletions' = `event.pull_request.deletions` int
|
||||
* 'changedFiles' = `event.pull_request.changed_files` int
|
||||
|
||||
#### [`push` event](https://developer.github.com/v3/activity/events/types/#pushevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'before' = `event.before` string
|
||||
* 'after' = `event.after` string
|
||||
|
||||
#### [`repository` event](https://developer.github.com/v3/activity/events/types/#repositoryevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`release` event](https://developer.github.com/v3/activity/events/types/#releaseevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'tagName' = `event.release.tag_name` string
|
||||
|
||||
#### [`status` event](https://developer.github.com/v3/activity/events/types/#statusevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.sha` string
|
||||
* 'state' = `event.state` string
|
||||
|
||||
#### [`team_add` event](https://developer.github.com/v3/activity/events/types/#teamaddevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'teamName' = `event.team.name` string
|
||||
|
||||
#### [`watch` event](https://developer.github.com/v3/activity/events/types/#watchevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
@@ -1,334 +0,0 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("github_webhooks", func() inputs.Input { return &GithubWebhooks{} })
|
||||
}
|
||||
|
||||
type GithubWebhooks struct {
|
||||
ServiceAddress string
|
||||
// Lock for the struct
|
||||
sync.Mutex
|
||||
// Events buffer to store events between Gather calls
|
||||
events []Event
|
||||
}
|
||||
|
||||
func NewGithubWebhooks() *GithubWebhooks {
|
||||
return &GithubWebhooks{}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) SampleConfig() string {
|
||||
return `
|
||||
# Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
`
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Description() string {
|
||||
return "A Github Webhook Event collector"
|
||||
}
|
||||
|
||||
// Writes the points from <-gh.in to the Accumulator
|
||||
func (gh *GithubWebhooks) Gather(acc inputs.Accumulator) error {
|
||||
gh.Lock()
|
||||
defer gh.Unlock()
|
||||
for _, event := range gh.events {
|
||||
p := event.NewPoint()
|
||||
acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
|
||||
}
|
||||
gh.events = make([]Event, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Listen() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", gh.eventHandler).Methods("POST")
|
||||
err := http.ListenAndServe(fmt.Sprintf("%s", gh.ServiceAddress), r)
|
||||
if err != nil {
|
||||
log.Printf("Error starting server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Start() error {
|
||||
go gh.Listen()
|
||||
log.Printf("Started the github_webhooks service on %s\n", gh.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Stop() {
|
||||
log.Println("Stopping the ghWebhooks service")
|
||||
}
|
||||
|
||||
// Handles the / route
|
||||
func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
eventType := r.Header["X-Github-Event"][0]
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
e, err := NewEvent(data, eventType)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
gh.Lock()
|
||||
gh.events = append(gh.events, e)
|
||||
gh.Unlock()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func newCommitComment(data []byte) (Event, error) {
|
||||
commitCommentStruct := CommitCommentEvent{}
|
||||
err := json.Unmarshal(data, &commitCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return commitCommentStruct, nil
|
||||
}
|
||||
|
||||
func newCreate(data []byte) (Event, error) {
|
||||
createStruct := CreateEvent{}
|
||||
err := json.Unmarshal(data, &createStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return createStruct, nil
|
||||
}
|
||||
|
||||
func newDelete(data []byte) (Event, error) {
|
||||
deleteStruct := DeleteEvent{}
|
||||
err := json.Unmarshal(data, &deleteStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deleteStruct, nil
|
||||
}
|
||||
|
||||
func newDeployment(data []byte) (Event, error) {
|
||||
deploymentStruct := DeploymentEvent{}
|
||||
err := json.Unmarshal(data, &deploymentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deploymentStruct, nil
|
||||
}
|
||||
|
||||
func newDeploymentStatus(data []byte) (Event, error) {
|
||||
deploymentStatusStruct := DeploymentStatusEvent{}
|
||||
err := json.Unmarshal(data, &deploymentStatusStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deploymentStatusStruct, nil
|
||||
}
|
||||
|
||||
func newFork(data []byte) (Event, error) {
|
||||
forkStruct := ForkEvent{}
|
||||
err := json.Unmarshal(data, &forkStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return forkStruct, nil
|
||||
}
|
||||
|
||||
func newGollum(data []byte) (Event, error) {
|
||||
gollumStruct := GollumEvent{}
|
||||
err := json.Unmarshal(data, &gollumStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gollumStruct, nil
|
||||
}
|
||||
|
||||
func newIssueComment(data []byte) (Event, error) {
|
||||
issueCommentStruct := IssueCommentEvent{}
|
||||
err := json.Unmarshal(data, &issueCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return issueCommentStruct, nil
|
||||
}
|
||||
|
||||
func newIssues(data []byte) (Event, error) {
|
||||
issuesStruct := IssuesEvent{}
|
||||
err := json.Unmarshal(data, &issuesStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return issuesStruct, nil
|
||||
}
|
||||
|
||||
func newMember(data []byte) (Event, error) {
|
||||
memberStruct := MemberEvent{}
|
||||
err := json.Unmarshal(data, &memberStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return memberStruct, nil
|
||||
}
|
||||
|
||||
func newMembership(data []byte) (Event, error) {
|
||||
membershipStruct := MembershipEvent{}
|
||||
err := json.Unmarshal(data, &membershipStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return membershipStruct, nil
|
||||
}
|
||||
|
||||
func newPageBuild(data []byte) (Event, error) {
|
||||
pageBuildEvent := PageBuildEvent{}
|
||||
err := json.Unmarshal(data, &pageBuildEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pageBuildEvent, nil
|
||||
}
|
||||
|
||||
func newPublic(data []byte) (Event, error) {
|
||||
publicEvent := PublicEvent{}
|
||||
err := json.Unmarshal(data, &publicEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return publicEvent, nil
|
||||
}
|
||||
|
||||
func newPullRequest(data []byte) (Event, error) {
|
||||
pullRequestStruct := PullRequestEvent{}
|
||||
err := json.Unmarshal(data, &pullRequestStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pullRequestStruct, nil
|
||||
}
|
||||
|
||||
func newPullRequestReviewComment(data []byte) (Event, error) {
|
||||
pullRequestReviewCommentStruct := PullRequestReviewCommentEvent{}
|
||||
err := json.Unmarshal(data, &pullRequestReviewCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pullRequestReviewCommentStruct, nil
|
||||
}
|
||||
|
||||
func newPush(data []byte) (Event, error) {
|
||||
pushStruct := PushEvent{}
|
||||
err := json.Unmarshal(data, &pushStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pushStruct, nil
|
||||
}
|
||||
|
||||
func newRelease(data []byte) (Event, error) {
|
||||
releaseStruct := ReleaseEvent{}
|
||||
err := json.Unmarshal(data, &releaseStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return releaseStruct, nil
|
||||
}
|
||||
|
||||
func newRepository(data []byte) (Event, error) {
|
||||
repositoryStruct := RepositoryEvent{}
|
||||
err := json.Unmarshal(data, &repositoryStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return repositoryStruct, nil
|
||||
}
|
||||
|
||||
func newStatus(data []byte) (Event, error) {
|
||||
statusStruct := StatusEvent{}
|
||||
err := json.Unmarshal(data, &statusStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statusStruct, nil
|
||||
}
|
||||
|
||||
func newTeamAdd(data []byte) (Event, error) {
|
||||
teamAddStruct := TeamAddEvent{}
|
||||
err := json.Unmarshal(data, &teamAddStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return teamAddStruct, nil
|
||||
}
|
||||
|
||||
func newWatch(data []byte) (Event, error) {
|
||||
watchStruct := WatchEvent{}
|
||||
err := json.Unmarshal(data, &watchStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watchStruct, nil
|
||||
}
|
||||
|
||||
type newEventError struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func (e *newEventError) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
func NewEvent(r []byte, t string) (Event, error) {
|
||||
log.Printf("New %v event recieved", t)
|
||||
switch t {
|
||||
case "commit_comment":
|
||||
return newCommitComment(r)
|
||||
case "create":
|
||||
return newCreate(r)
|
||||
case "delete":
|
||||
return newDelete(r)
|
||||
case "deployment":
|
||||
return newDeployment(r)
|
||||
case "deployment_status":
|
||||
return newDeploymentStatus(r)
|
||||
case "fork":
|
||||
return newFork(r)
|
||||
case "gollum":
|
||||
return newGollum(r)
|
||||
case "issue_comment":
|
||||
return newIssueComment(r)
|
||||
case "issues":
|
||||
return newIssues(r)
|
||||
case "member":
|
||||
return newMember(r)
|
||||
case "membership":
|
||||
return newMembership(r)
|
||||
case "page_build":
|
||||
return newPageBuild(r)
|
||||
case "public":
|
||||
return newPublic(r)
|
||||
case "pull_request":
|
||||
return newPullRequest(r)
|
||||
case "pull_request_review_comment":
|
||||
return newPullRequestReviewComment(r)
|
||||
case "push":
|
||||
return newPush(r)
|
||||
case "release":
|
||||
return newRelease(r)
|
||||
case "repository":
|
||||
return newRepository(r)
|
||||
case "status":
|
||||
return newStatus(r)
|
||||
case "team_add":
|
||||
return newTeamAdd(r)
|
||||
case "watch":
|
||||
return newWatch(r)
|
||||
}
|
||||
return nil, &newEventError{"Not a recgonized event type"}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,711 +0,0 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
const meas = "github_webhooks"
|
||||
|
||||
type Event interface {
|
||||
NewPoint() *client.Point
|
||||
}
|
||||
|
||||
type Repository struct {
|
||||
Repository string `json:"full_name"`
|
||||
Private bool `json:"private"`
|
||||
Stars int `json:"stargazers_count"`
|
||||
Forks int `json:"forks_count"`
|
||||
Issues int `json:"open_issues_count"`
|
||||
}
|
||||
|
||||
type Sender struct {
|
||||
User string `json:"login"`
|
||||
Admin bool `json:"site_admin"`
|
||||
}
|
||||
|
||||
type CommitComment struct {
|
||||
Commit string `json:"commit_id"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
type Deployment struct {
|
||||
Commit string `json:"sha"`
|
||||
Task string `json:"task"`
|
||||
Environment string `json:"environment"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type Page struct {
|
||||
Name string `json:"page_name"`
|
||||
Title string `json:"title"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type Issue struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
Comments int `json:"comments"`
|
||||
}
|
||||
|
||||
type IssueComment struct {
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
type Team struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type PullRequest struct {
|
||||
Number int `json:"number"`
|
||||
State string `json:"state"`
|
||||
Title string `json:"title"`
|
||||
Comments int `json:"comments"`
|
||||
Commits int `json:"commits"`
|
||||
Additions int `json:"additions"`
|
||||
Deletions int `json:"deletions"`
|
||||
ChangedFiles int `json:"changed_files"`
|
||||
}
|
||||
|
||||
type PullRequestReviewComment struct {
|
||||
File string `json:"path"`
|
||||
Comment string `json:"body"`
|
||||
}
|
||||
|
||||
type Release struct {
|
||||
TagName string `json:"tag_name"`
|
||||
}
|
||||
|
||||
type DeploymentStatus struct {
|
||||
State string `json:"state"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CommitCommentEvent struct {
|
||||
Comment CommitComment `json:"comment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CommitCommentEvent) NewPoint() *client.Point {
|
||||
event := "commit_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Comment.Commit,
|
||||
"comment": s.Comment.Body,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type CreateEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
RefType string `json:"ref_type"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CreateEvent) NewPoint() *client.Point {
|
||||
event := "create"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type DeleteEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
RefType string `json:"ref_type"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeleteEvent) NewPoint() *client.Point {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type DeploymentEvent struct {
|
||||
Deployment Deployment `json:"deployment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentEvent) NewPoint() *client.Point {
|
||||
event := "deployment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Deployment.Commit,
|
||||
"task": s.Deployment.Task,
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type DeploymentStatusEvent struct {
|
||||
Deployment Deployment `json:"deployment"`
|
||||
DeploymentStatus DeploymentStatus `json:"deployment_status"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentStatusEvent) NewPoint() *client.Point {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Deployment.Commit,
|
||||
"task": s.Deployment.Task,
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
"depState": s.DeploymentStatus.State,
|
||||
"depDescription": s.DeploymentStatus.Description,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type ForkEvent struct {
|
||||
Forkee Repository `json:"forkee"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ForkEvent) NewPoint() *client.Point {
|
||||
event := "fork"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"fork": s.Forkee.Repository,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type GollumEvent struct {
|
||||
Pages []Page `json:"pages"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
// REVIEW: Going to be lazy and not deal with the pages.
|
||||
func (s GollumEvent) NewPoint() *client.Point {
|
||||
event := "gollum"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type IssueCommentEvent struct {
|
||||
Issue Issue `json:"issue"`
|
||||
Comment IssueComment `json:"comment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssueCommentEvent) NewPoint() *client.Point {
|
||||
event := "issue_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"issue": fmt.Sprintf("%v", s.Issue.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
"body": s.Comment.Body,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type IssuesEvent struct {
|
||||
Action string `json:"action"`
|
||||
Issue Issue `json:"issue"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssuesEvent) NewPoint() *client.Point {
|
||||
event := "issue"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"issue": fmt.Sprintf("%v", s.Issue.Number),
|
||||
"action": s.Action,
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type MemberEvent struct {
|
||||
Member Sender `json:"member"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s MemberEvent) NewPoint() *client.Point {
|
||||
event := "member"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type MembershipEvent struct {
|
||||
Action string `json:"action"`
|
||||
Member Sender `json:"member"`
|
||||
Sender Sender `json:"sender"`
|
||||
Team Team `json:"team"`
|
||||
}
|
||||
|
||||
func (s MembershipEvent) NewPoint() *client.Point {
|
||||
event := "membership"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"action": s.Action,
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type PageBuildEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PageBuildEvent) NewPoint() *client.Point {
|
||||
event := "page_build"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type PublicEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PublicEvent) NewPoint() *client.Point {
|
||||
event := "public"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type PullRequestEvent struct {
|
||||
Action string `json:"action"`
|
||||
PullRequest PullRequest `json:"pull_request"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestEvent) NewPoint() *client.Point {
|
||||
event := "pull_request"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"action": s.Action,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"state": s.PullRequest.State,
|
||||
"title": s.PullRequest.Title,
|
||||
"comments": s.PullRequest.Comments,
|
||||
"commits": s.PullRequest.Commits,
|
||||
"additions": s.PullRequest.Additions,
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type PullRequestReviewCommentEvent struct {
|
||||
Comment PullRequestReviewComment `json:"comment"`
|
||||
PullRequest PullRequest `json:"pull_request"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestReviewCommentEvent) NewPoint() *client.Point {
|
||||
event := "pull_request_review_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"state": s.PullRequest.State,
|
||||
"title": s.PullRequest.Title,
|
||||
"comments": s.PullRequest.Comments,
|
||||
"commits": s.PullRequest.Commits,
|
||||
"additions": s.PullRequest.Additions,
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
"commentFile": s.Comment.File,
|
||||
"comment": s.Comment.Comment,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type PushEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
Before string `json:"before"`
|
||||
After string `json:"after"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PushEvent) NewPoint() *client.Point {
|
||||
event := "push"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"before": s.Before,
|
||||
"after": s.After,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type ReleaseEvent struct {
|
||||
Release Release `json:"release"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ReleaseEvent) NewPoint() *client.Point {
|
||||
event := "release"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"tagName": s.Release.TagName,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type RepositoryEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s RepositoryEvent) NewPoint() *client.Point {
|
||||
event := "repository"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type StatusEvent struct {
|
||||
Commit string `json:"sha"`
|
||||
State string `json:"state"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s StatusEvent) NewPoint() *client.Point {
|
||||
event := "status"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Commit,
|
||||
"state": s.State,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type TeamAddEvent struct {
|
||||
Team Team `json:"team"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s TeamAddEvent) NewPoint() *client.Point {
|
||||
event := "team_add"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"teamName": s.Team.Name,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type WatchEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s WatchEvent) NewPoint() *client.Point {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user