Compare commits
109 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
434c08a357 | ||
|
|
bd9c5b6995 | ||
|
|
b941d270ce | ||
|
|
9406961125 | ||
|
|
0d391b66a3 | ||
|
|
a11e07e250 | ||
|
|
d266dad1f4 | ||
|
|
331b700d1b | ||
|
|
2163fde0a4 | ||
|
|
24a2aaef4b | ||
|
|
042cf517b2 | ||
|
|
b97027ac9a | ||
|
|
4ea3f82e50 | ||
|
|
38c4111e6c | ||
|
|
338341add8 | ||
|
|
93bb679f9d | ||
|
|
40d859354f | ||
|
|
9e7c8df384 | ||
|
|
f088dd7e00 | ||
|
|
10c4e4f63f | ||
|
|
962325cc40 | ||
|
|
a9c33abfa5 | ||
|
|
d835c19fce | ||
|
|
1f1384afc6 | ||
|
|
9d4b55be19 | ||
|
|
c549ab907a | ||
|
|
9c0d14bb60 | ||
|
|
a822d942cd | ||
|
|
3a64a01f91 | ||
|
|
6ebb6bc7ee | ||
|
|
be95dfdd0e | ||
|
|
88890fa7c2 | ||
|
|
f8930b9cbc | ||
|
|
c10227a766 | ||
|
|
7e7e462de1 | ||
|
|
a93e1ceac8 | ||
|
|
7f8469b66a | ||
|
|
cf568487c8 | ||
|
|
4c74a2dd3a | ||
|
|
a70452219b | ||
|
|
47ea2d5fb4 | ||
|
|
16540e35f1 | ||
|
|
3bfb3a9fe2 | ||
|
|
f9517dcf24 | ||
|
|
7878b22b09 | ||
|
|
e6d7e4e309 | ||
|
|
40d0da404e | ||
|
|
8675bd125a | ||
|
|
4e5dfa5d33 | ||
|
|
89f5b77550 | ||
|
|
5b15cd9163 | ||
|
|
dbf1383a38 | ||
|
|
46b367e74b | ||
|
|
3da390682d | ||
|
|
5349a3b6d1 | ||
|
|
f2ab5f61f5 | ||
|
|
e910a03af4 | ||
|
|
4d0dc8b7c8 | ||
|
|
e0dc1ef5bd | ||
|
|
f24f5e98dd | ||
|
|
6647cfc228 | ||
|
|
ddcd99a1ce | ||
|
|
55c07f23b0 | ||
|
|
8192572e23 | ||
|
|
0cdf1b07e9 | ||
|
|
8653bae6ac | ||
|
|
fc1aa7d3b4 | ||
|
|
8bdcd6d576 | ||
|
|
d3925fe578 | ||
|
|
d3a5cca1bc | ||
|
|
f3b553712a | ||
|
|
839651fadb | ||
|
|
6a50fceea4 | ||
|
|
7efe108686 | ||
|
|
c313af1b24 | ||
|
|
1388b1b58b | ||
|
|
551db20657 | ||
|
|
bc71e956a5 | ||
|
|
5af6974796 | ||
|
|
a712036b56 | ||
|
|
37b96c192b | ||
|
|
8cbdf0f907 | ||
|
|
ef5c630d3a | ||
|
|
6eea89f4c0 | ||
|
|
dbbb2d9877 | ||
|
|
c483e16d72 | ||
|
|
40a5bad968 | ||
|
|
1421bce371 | ||
|
|
5e7dd6d51b | ||
|
|
71f4e72b22 | ||
|
|
b24e71b232 | ||
|
|
f60c090e4c | ||
|
|
50334e6bac | ||
|
|
963a9429dd | ||
|
|
2eda8d64c7 | ||
|
|
9b96c62e46 | ||
|
|
378b7467a4 | ||
|
|
c0d98ecd4b | ||
|
|
b44644b6bf | ||
|
|
7bfb42946e | ||
|
|
e8907acd28 | ||
|
|
d6ef3b1e02 | ||
|
|
a39a7a7a03 | ||
|
|
923be102b3 | ||
|
|
7531e218c1 | ||
|
|
3cc1fecb53 | ||
|
|
3c89847489 | ||
|
|
fb837ca66d | ||
|
|
2ec1ffdc11 |
369
CHANGELOG.md
369
CHANGELOG.md
@@ -1,4 +1,87 @@
|
||||
## v0.10.0 [unreleased]
|
||||
## v0.10.3 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.10.2 [2016-02-04]
|
||||
|
||||
### Release Notes
|
||||
- Statsd timing measurements are now aggregated into a single measurement with
|
||||
fields.
|
||||
- Graphite output now inserts tags into the bucket in alphabetical order.
|
||||
- Normalized TLS/SSL support for output plugins: MQTT, AMQP, Kafka
|
||||
- `verify_ssl` config option was removed from Kafka because it was actually
|
||||
doing the opposite of what it claimed to do (yikes). It's been replaced by
|
||||
`insecure_skip_verify`
|
||||
|
||||
### Features
|
||||
- [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse!
|
||||
- [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type.
|
||||
- [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch!
|
||||
- [#601](https://github.com/influxdata/telegraf/issues/601): Warn when overwriting cached metrics.
|
||||
- [#614](https://github.com/influxdata/telegraf/pull/614): PowerDNS input plugin. Thanks @Kasen!
|
||||
- [#617](https://github.com/influxdata/telegraf/pull/617): exec plugin: parse influx line protocol in addition to JSON.
|
||||
- [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support
|
||||
|
||||
### Bugfixes
|
||||
- [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements.
|
||||
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
|
||||
- [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong.
|
||||
- [#602](https://github.com/influxdata/telegraf/issues/602): Fix statsd field name templating.
|
||||
- [#612](https://github.com/influxdata/telegraf/pull/612): Docker input panic fix if stats received are nil.
|
||||
- [#634](https://github.com/influxdata/telegraf/pull/634): Properly set host headers in httpjson. Thanks @reginaldosousa!
|
||||
|
||||
## v0.10.1 [2016-01-27]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
|
||||
defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
|
||||
successful write to that output occurs.
|
||||
- The docker plugin has been significantly overhauled to add more metrics
|
||||
and allow for docker-machine (incl OSX) support.
|
||||
[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
|
||||
for the latest measurements, fields, and tags. There is also now support for
|
||||
specifying a docker endpoint to get metrics from.
|
||||
|
||||
### Features
|
||||
- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
|
||||
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
|
||||
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
|
||||
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
|
||||
- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
|
||||
- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
|
||||
- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
|
||||
- AMQP SSL support. Thanks @ekini!
|
||||
- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
|
||||
- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
|
||||
- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
|
||||
- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
|
||||
- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
|
||||
- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
|
||||
- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
|
||||
- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
|
||||
- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
|
||||
- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
|
||||
- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
|
||||
- [#589](https://github.com/influxdata/telegraf/pull/589): Microsoft SQL Server input plugin. Thanks @zensqlmonitor!
|
||||
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
|
||||
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
|
||||
|
||||
### Bugfixes
|
||||
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
|
||||
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
|
||||
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
|
||||
- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
|
||||
- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
|
||||
- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
|
||||
- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
|
||||
- [#585](https://github.com/influxdata/telegraf/pull/585): Log stack trace and continue on Telegraf panic. Thanks @wutaizeng!
|
||||
|
||||
## v0.10.0 [2016-01-12]
|
||||
|
||||
### Release Notes
|
||||
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
|
||||
@@ -39,29 +122,29 @@ configurations overwritten by the upgrade. There is a backup stored at
|
||||
## v0.2.5 [unreleased]
|
||||
|
||||
### Features
|
||||
- [#427](https://github.com/influxdb/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||
- [#428](https://github.com/influxdb/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||
- [#449](https://github.com/influxdb/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||
- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||
|
||||
### Bugfixes
|
||||
- [#430](https://github.com/influxdb/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||
- [#452](https://github.com/influxdb/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||
|
||||
## v0.2.4 [2015-12-08]
|
||||
|
||||
### Features
|
||||
- [#412](https://github.com/influxdb/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||
- [#410](https://github.com/influxdb/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||
- [#414](https://github.com/influxdb/telegraf/issues/414): Jolokia plugin auth parameters
|
||||
- [#415](https://github.com/influxdb/telegraf/issues/415): memcached plugin: support unix sockets
|
||||
- [#418](https://github.com/influxdb/telegraf/pull/418): memcached plugin additional unit tests.
|
||||
- [#408](https://github.com/influxdb/telegraf/pull/408): MailChimp plugin.
|
||||
- [#382](https://github.com/influxdb/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||
- [#401](https://github.com/influxdb/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||
- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||
- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||
- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
|
||||
- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
|
||||
- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
|
||||
- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
|
||||
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||
|
||||
### Bugfixes
|
||||
- [#405](https://github.com/influxdb/telegraf/issues/405): Prometheus output cardinality issue
|
||||
- [#388](https://github.com/influxdb/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
|
||||
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||
|
||||
## v0.2.3 [2015-11-30]
|
||||
|
||||
@@ -90,15 +173,15 @@ same type can be specified, like this:
|
||||
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
|
||||
|
||||
### Features
|
||||
- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||
- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin.
|
||||
- [#392](https://github.com/influxdb/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||
- [#383](https://github.com/influxdb/telegraf/pull/383): Specify plugins as a list.
|
||||
- [#354](https://github.com/influxdb/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||
- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||
- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
|
||||
- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
|
||||
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||
- [#389](https://github.com/influxdb/telegraf/issues/389): NaN value panic
|
||||
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
|
||||
|
||||
## v0.2.2 [2015-11-18]
|
||||
|
||||
@@ -107,7 +190,7 @@ same type can be specified, like this:
|
||||
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
|
||||
|
||||
### Bugfixes
|
||||
- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in inputs.
|
||||
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
|
||||
|
||||
## v0.2.1 [2015-11-16]
|
||||
|
||||
@@ -124,22 +207,22 @@ changed to just run docker commands in the Makefile. See `make docker-run` and
|
||||
same type.
|
||||
|
||||
### Features
|
||||
- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||
- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||
- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||
- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output.
|
||||
- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||
- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||
- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||
- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||
- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
|
||||
- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||
- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||
- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||
- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||
|
||||
## v0.2.0 [2015-10-27]
|
||||
|
||||
@@ -160,38 +243,38 @@ be controlled via the `round_interval` and `flush_jitter` config options.
|
||||
- Telegraf will now retry metric flushes twice
|
||||
|
||||
### Features
|
||||
- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info
|
||||
- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||
- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||
- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||
- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||
- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||
- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
|
||||
- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||
- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||
- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||
- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||
- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||
- Memory plugin: cached and buffered measurements re-added
|
||||
- Logging: additional logging for each collection interval, track the number
|
||||
of metrics collected and from how many inputs.
|
||||
- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||
- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||
- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||
- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||
- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||
- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2.
|
||||
- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||
- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||
- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||
- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals
|
||||
- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes
|
||||
- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||
- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||
- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||
- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||
- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||
- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||
- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||
- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
|
||||
- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||
- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||
- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||
- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
|
||||
- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
|
||||
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||
- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||
- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||
- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||
- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||
- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||
- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||
- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||
- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||
- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||
- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||
- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||
- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||
|
||||
## v0.1.9 [2015-09-22]
|
||||
|
||||
@@ -217,27 +300,27 @@ have been renamed for consistency. Some measurements have also been removed from
|
||||
re-added in a "verbose" mode if there is demand for it.
|
||||
|
||||
### Features
|
||||
- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support
|
||||
- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||
- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||
- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||
- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup.
|
||||
- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||
- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks
|
||||
- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
|
||||
- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||
- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||
- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||
- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
|
||||
- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
|
||||
and filtering when specifying a config file.
|
||||
|
||||
### Bugfixes
|
||||
- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support
|
||||
- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics
|
||||
- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
|
||||
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
|
||||
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||
- Fix net plugin on darwin
|
||||
- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||
- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||
- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||
- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||
- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||
- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||
- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||
- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||
- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||
- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||
|
||||
## v0.1.8 [2015-09-04]
|
||||
|
||||
@@ -246,106 +329,106 @@ and filtering when specifying a config file.
|
||||
- Now using Go 1.5 to build telegraf
|
||||
|
||||
### Features
|
||||
- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||
- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||
- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||
- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||
- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option
|
||||
- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3
|
||||
- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin
|
||||
- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||
- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||
- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||
- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||
- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
|
||||
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
|
||||
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.1.7 [2015-08-28]
|
||||
|
||||
### Features
|
||||
- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer.
|
||||
- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||
- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||
- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||
- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag.
|
||||
- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||
- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
|
||||
- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||
- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||
- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||
- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
|
||||
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||
- Indent the toml config file for readability
|
||||
|
||||
### Bugfixes
|
||||
- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing.
|
||||
- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix.
|
||||
- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||
- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
|
||||
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
|
||||
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||
- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||
|
||||
## v0.1.6 [2015-08-20]
|
||||
|
||||
### Features
|
||||
- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||
- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies
|
||||
- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||
- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
|
||||
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||
|
||||
### Bugfixes
|
||||
- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||
- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||
- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||
- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||
- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||
|
||||
## v0.1.5 [2015-08-13]
|
||||
|
||||
### Features
|
||||
- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||
- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||
- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||
- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||
- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||
- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database.
|
||||
- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||
- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||
- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing
|
||||
- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||
- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||
- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||
- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||
- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||
- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||
- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||
- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||
- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||
- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||
- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||
- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||
- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
|
||||
- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||
- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||
- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
|
||||
- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||
- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||
|
||||
### Bugfixes
|
||||
- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||
- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes
|
||||
- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||
- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally
|
||||
- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
|
||||
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||
- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
|
||||
- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||
|
||||
## v0.1.4 [2015-07-09]
|
||||
|
||||
### Features
|
||||
- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||
|
||||
### Bugfixes
|
||||
- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||
- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||
|
||||
## v0.1.3 [2015-07-05]
|
||||
|
||||
### Features
|
||||
- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||
- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||
- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||
|
||||
## v0.1.2 [2015-07-01]
|
||||
|
||||
### Features
|
||||
- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||
- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||
- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||
|
||||
### Bugfixes
|
||||
- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script.
|
||||
- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||
- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||
- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||
- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
|
||||
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||
- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||
- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||
|
||||
## v0.1.1 [2015-06-19]
|
||||
|
||||
|
||||
@@ -2,27 +2,46 @@
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be generated using the `-sample-config` flag,
|
||||
like this: `telegraf -sample-config`
|
||||
A default Telegraf config file can be generated using the -sample-config flag:
|
||||
`telegraf -sample-config > telegraf.conf`
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
`-input-filter` and `-output-filter` flags, like this:
|
||||
-input-filter and -output-filter flags:
|
||||
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
|
||||
|
||||
## Telegraf Agent Configuration
|
||||
## `[tags]` Configuration
|
||||
|
||||
Global tags can be specific in the `[tags]` section of the config file in
|
||||
key="value" format. All metrics being gathered on this host will be tagged
|
||||
with the tags specified here.
|
||||
|
||||
## `[agent]` Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
config.
|
||||
|
||||
* **hostname**: The hostname is passed as a tag. By default this will be
|
||||
the value returned by `hostname` on the machine running Telegraf.
|
||||
You can override that value here.
|
||||
* **interval**: How often to gather metrics. Uses a simple number +
|
||||
unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
||||
InfluxDB.
|
||||
* **interval**: Default data collection interval for all inputs
|
||||
* **round_interval**: Rounds collection interval to 'interval'
|
||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||
for each output, and will flush this buffer on a successful write.
|
||||
* **collection_jitter**: Collection jitter is used to jitter
|
||||
the collection by a random amount.
|
||||
Each plugin will sleep for a random time within jitter before collecting.
|
||||
This can be used to avoid many plugins querying things like sysfs at the
|
||||
same time, which can have a measurable effect on the system.
|
||||
* **flush_interval**: Default data flushing interval for all outputs.
|
||||
You should not set this below
|
||||
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
* **flush_jitter**: Jitter the flush interval by a random amount.
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode.
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
|
||||
## Input Configuration
|
||||
## `[inputs.xxx]` Configuration
|
||||
|
||||
There are some configuration options that are configurable per input:
|
||||
|
||||
@@ -35,7 +54,7 @@ There are some configuration options that are configurable per input:
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
### Input Filters
|
||||
#### Input Filters
|
||||
|
||||
There are also filters that can be configured per input:
|
||||
|
||||
@@ -49,7 +68,7 @@ match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
|
||||
### Input Configuration Examples
|
||||
#### Input Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
@@ -77,7 +96,7 @@ fields which begin with `time_`.
|
||||
drop = ["time_*"]
|
||||
```
|
||||
|
||||
### Input Config: tagpass and tagdrop
|
||||
#### Input Config: tagpass and tagdrop
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
@@ -98,7 +117,7 @@ fields which begin with `time_`.
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
### Input Config: pass and drop
|
||||
#### Input Config: pass and drop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
@@ -112,7 +131,7 @@ fields which begin with `time_`.
|
||||
pass = ["inodes*"]
|
||||
```
|
||||
|
||||
### Input config: prefix, suffix, and override
|
||||
#### Input config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
@@ -132,7 +151,7 @@ This will emit measurements with the name `foobar`
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
### Input config: tags
|
||||
#### Input config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
@@ -146,7 +165,7 @@ This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
### Multiple inputs of the same type
|
||||
#### Multiple inputs of the same type
|
||||
|
||||
Additional inputs (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file. It is highly recommended that
|
||||
@@ -165,7 +184,7 @@ to avoid measurement collisions:
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## Output Configuration
|
||||
## `[outputs.xxx]` Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
|
||||
@@ -1,8 +1,30 @@
|
||||
## Steps for Contributing:
|
||||
|
||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||
1. Make changes or write plugin (see below for details)
|
||||
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
||||
1. If your plugin requires a new Go package,
|
||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
||||
Output plugins READMEs are less structured,
|
||||
but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
|
||||
## Sign the CLA
|
||||
|
||||
Before we can merge a pull request, you will need to sign the CLA,
|
||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
## Adding a dependency
|
||||
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
This section is for developers who want to create new collection inputs.
|
||||
@@ -15,11 +37,11 @@ and submit new inputs.
|
||||
|
||||
### Input Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `inputs.Input` interface.
|
||||
* A plugin must conform to the `telegraf.Input` interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
`github.com/influxdb/telegraf/plugins/inputs/all/all.go` file.
|
||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
@@ -75,7 +97,10 @@ package simple
|
||||
|
||||
// simple.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins/inputs"
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
@@ -100,7 +125,7 @@ func (s *Simple) Gather(acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("simple", func() inputs.Input { return &Simple{} })
|
||||
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
@@ -147,7 +172,7 @@ similar constructs.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdb/telegraf/plugins/outputs/all/all.go` file.
|
||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
output can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this output does.
|
||||
@@ -160,7 +185,7 @@ type Output interface {
|
||||
Close() error
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
Write(points []*client.Point) error
|
||||
Write(metrics []telegraf.Metric) error
|
||||
}
|
||||
```
|
||||
|
||||
@@ -171,7 +196,10 @@ package simpleoutput
|
||||
|
||||
// simpleoutput.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins/outputs"
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
@@ -195,7 +223,7 @@ func (s *Simple) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Write(points []*client.Point) error {
|
||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||
for _, pt := range points {
|
||||
// write `pt` to the output sink here
|
||||
}
|
||||
@@ -203,7 +231,7 @@ func (s *Simple) Write(points []*client.Point) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("simpleoutput", func() outputs.Output { return &Simple{} })
|
||||
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
||||
}
|
||||
|
||||
```
|
||||
@@ -231,7 +259,7 @@ type ServiceOutput interface {
|
||||
Close() error
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
Write(points []*client.Point) error
|
||||
Write(metrics []telegraf.Metric) error
|
||||
Start() error
|
||||
Stop()
|
||||
}
|
||||
@@ -252,7 +280,7 @@ which would take some time to replicate.
|
||||
To overcome this situation we've decided to use docker containers to provide a
|
||||
fast and reproducible environment to test those services which require it.
|
||||
For other situations
|
||||
(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go)
|
||||
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/redis/redis_test.go)
|
||||
a simple mock will suffice.
|
||||
|
||||
To execute Telegraf tests follow these simple steps:
|
||||
|
||||
49
Godeps
49
Godeps
@@ -1,52 +1,59 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757
|
||||
github.com/aws/aws-sdk-go f09322ae1e6468fe828c862542389bc45baf3c00
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/boltdb/bolt 34a0fa5307f7562980fb8e7ff4723f7987edf49b
|
||||
github.com/boltdb/bolt ee4a0888a9abe7eefe5a0992ca4cb06864839873
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 175e1df973274f04e9b459a62cffc49808f1a649
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-sql-driver/mysql 7a8740a6bd8feb6af5786ab9a9f1513970019d8c
|
||||
github.com/gogo/protobuf 7b1331554dbe882cb3613ee8f1824a5583627963
|
||||
github.com/golang/protobuf 2402d76f3d41f928c7902a765dfc872356dd3aad
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/gogo/protobuf e8904f58e872a473a5b91bc9bf3377d223555263
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
|
||||
github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294
|
||||
github.com/hashicorp/raft 057b893fd996696719e98b6c44649ea14968c811
|
||||
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
|
||||
github.com/influxdb/influxdb bd63489ef0faae2465ae5b1f0a28bd7e71e02e38
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb 697f48b4e62e514e701ffec39978b864a3c666e6
|
||||
github.com/influxdb/influxdb 697f48b4e62e514e701ffec39978b864a3c666e6
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 a3b15ae34567abb20a22992b989cd76f48d09c47
|
||||
github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
|
||||
github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 0a3005bb37bc411040083a55372e77c405f6464c
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil ef151b7ff7fe76308f89a389447b7b78dfa02e0f
|
||||
github.com/shirou/gopsutil 85bf0974ed06e4e668595ae2b4de02e772a2819b
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify c92828f29518bc633893affbce12904ba41a7cfa
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
golang.org/x/crypto f23ba3a5ee43012fcb4b92e1a2a405a92554f4f2
|
||||
golang.org/x/net 520af5de654dc4dd4f0f65aa40e66dbbd9043df1
|
||||
gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
|
||||
63
Godeps_windows
Normal file
63
Godeps_windows
Normal file
@@ -0,0 +1,63 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
||||
github.com/Shopify/sarama b1da1753dedcf77d053613b7eae907b98a2ddad5
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757
|
||||
github.com/aws/aws-sdk-go 2a34ea8812f32aae75b43400f9424a0559840659
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/boltdb/bolt ee4a0888a9abe7eefe5a0992ca4cb06864839873
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 02a8beb401b20e112cff3ea740545960b667eab1
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/gogo/protobuf e8904f58e872a473a5b91bc9bf3377d223555263
|
||||
github.com/golang/protobuf 45bba206dd5270d96bac4942dcfe515726613249
|
||||
github.com/golang/snappy 1963d058044b19e16595f80d5050fa54e2070438
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
|
||||
github.com/hashicorp/raft 057b893fd996696719e98b6c44649ea14968c811
|
||||
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb 60df13fb566d07ff2cdd07aa23a4796a02b0df3c
|
||||
github.com/influxdb/influxdb 60df13fb566d07ff2cdd07aa23a4796a02b0df3c
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 9d8191d6a6e17dcf43b10a20084a11e8c1aa92e6
|
||||
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6fc2e00a0d64b1f7fc1212dae5b0c939cf6d9ac4
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
32
Makefile
32
Makefile
@@ -9,36 +9,36 @@ endif
|
||||
# Standard Telegraf build
|
||||
default: prepare build
|
||||
|
||||
# Windows build
|
||||
windows: prepare-windows build-windows
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go build -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
build-windows:
|
||||
go build -o telegraf.exe -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build linux 64-bit, 32-bit and arm architectures
|
||||
build-linux-bins: prepare
|
||||
GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get ./...
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
# Use the windows godeps file to prepare dependencies
|
||||
prepare-windows:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore -f Godeps_windows
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
@@ -65,6 +65,7 @@ endif
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
@@ -78,11 +79,12 @@ docker-run-circle:
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: docker-kill docker-run
|
||||
|
||||
85
README.md
85
README.md
@@ -17,23 +17,24 @@ new plugins.
|
||||
|
||||
## Installation:
|
||||
|
||||
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions of
|
||||
telegraf, both in the database layout and the configuration file. 0.2.x will
|
||||
continue to be supported, see below for download links.
|
||||
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions
|
||||
of telegraf, both in the database layout and the configuration file. 0.2.x
|
||||
will continue to be supported, see below for download links.
|
||||
|
||||
TODO: link to blog post about 0.10.x changes.
|
||||
For more details on the differences between Telegraf 0.2.x and 0.10.x, see
|
||||
the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/).
|
||||
|
||||
### Linux deb and rpm packages:
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.0_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.0-1.x86_64.rpm
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.2-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1.x86_64.rpm
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
||||
|
||||
##### Package instructions:
|
||||
##### Package Instructions:
|
||||
|
||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||
@@ -42,24 +43,41 @@ Latest:
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### Linux binaries:
|
||||
### yum/apt Repositories:
|
||||
|
||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||
[here](https://docs.influxdata.com/influxdb/v0.9/introduction/installation/#installation)
|
||||
for instructions, replacing the `influxdb` package name with `telegraf`.
|
||||
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.10.0.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.10.0.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.10.0.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_i386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.2-1_linux_arm.tar.gz
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
||||
|
||||
##### Binary instructions:
|
||||
##### tarball Instructions:
|
||||
|
||||
These are standalone binaries that can be unpacked and executed on any linux
|
||||
system. They can be unpacked and renamed in a location such as
|
||||
`/usr/local/bin` for convenience. A config file will need to be generated,
|
||||
see "How to use it" below.
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
sudo tar -C / -xvf ./telegraf-v0.10.2-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-v0.10.2-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
@@ -72,19 +90,19 @@ brew install telegraf
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.4+.
|
||||
if you don't have it already. You also must build with golang version 1.5+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get github.com/influxdb/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
|
||||
3. Run `go get github.com/influxdata/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
### How to use it:
|
||||
## How to use it:
|
||||
|
||||
```console
|
||||
$ telegraf -help
|
||||
Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB
|
||||
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
@@ -99,6 +117,8 @@ The flags are:
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
@@ -126,12 +146,17 @@ configuration options.
|
||||
|
||||
## Supported Input Plugins
|
||||
|
||||
Telegraf currently has support for collecting metrics from:
|
||||
Telegraf currently has support for collecting metrics from many sources. For
|
||||
more information on each, please look at the directory of the same name in
|
||||
`plugins/inputs`.
|
||||
|
||||
Currently implemented sources:
|
||||
|
||||
* aerospike
|
||||
* apache
|
||||
* bcache
|
||||
* disque
|
||||
* docker
|
||||
* elasticsearch
|
||||
* exec (generic JSON-emitting executable plugin)
|
||||
* haproxy
|
||||
@@ -145,18 +170,25 @@ Telegraf currently has support for collecting metrics from:
|
||||
* mongodb
|
||||
* mysql
|
||||
* nginx
|
||||
* nsq
|
||||
* phpfpm
|
||||
* phusion passenger
|
||||
* ping
|
||||
* postgresql
|
||||
* powerdns
|
||||
* procstat
|
||||
* prometheus
|
||||
* puppetagent
|
||||
* rabbitmq
|
||||
* redis
|
||||
* rethinkdb
|
||||
* sql server (microsoft)
|
||||
* twemproxy
|
||||
* zfs
|
||||
* zookeeper
|
||||
* sensors
|
||||
* snmp
|
||||
* win_perf_counters (windows performance counters)
|
||||
* system
|
||||
* cpu
|
||||
* mem
|
||||
@@ -170,6 +202,7 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* statsd
|
||||
* kafka_consumer
|
||||
* github_webhooks
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
@@ -179,9 +212,11 @@ want to add support for another service or third-party API.
|
||||
* influxdb
|
||||
* amon
|
||||
* amqp
|
||||
* aws kinesis
|
||||
* aws cloudwatch
|
||||
* datadog
|
||||
* graphite
|
||||
* kafka
|
||||
* amazon kinesis
|
||||
* librato
|
||||
* mqtt
|
||||
* nsq
|
||||
@@ -193,4 +228,4 @@ want to add support for another service or third-party API.
|
||||
|
||||
Please see the
|
||||
[contributing guide](CONTRIBUTING.md)
|
||||
for details on contributing a plugin or output to Telegraf.
|
||||
for details on contributing a plugin to Telegraf.
|
||||
|
||||
191
accumulator.go
191
accumulator.go
@@ -1,188 +1,21 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
import "time"
|
||||
|
||||
type Accumulator interface {
|
||||
Add(measurement string, value interface{},
|
||||
tags map[string]string, t ...time.Time)
|
||||
AddFields(measurement string, fields map[string]interface{},
|
||||
tags map[string]string, t ...time.Time)
|
||||
// Create a point with a value, decorating it with tags
|
||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||
// it after passing to Add.
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
SetDefaultTags(tags map[string]string)
|
||||
AddDefaultTag(key, value string)
|
||||
|
||||
Prefix() string
|
||||
SetPrefix(prefix string)
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
}
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *config.InputConfig,
|
||||
points chan *client.Point,
|
||||
) Accumulator {
|
||||
acc := accumulator{}
|
||||
acc.points = points
|
||||
acc.inputConfig = inputConfig
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
points chan *client.Point
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
|
||||
inputConfig *config.InputConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
}
|
||||
if ac.debug {
|
||||
fmt.Println("> " + pt.String())
|
||||
}
|
||||
ac.points <- pt
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddDefaultTag(key, value string) {
|
||||
ac.defaultTags[key] = value
|
||||
}
|
||||
|
||||
func (ac *accumulator) Prefix() string {
|
||||
return ac.prefix
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetPrefix(prefix string) {
|
||||
ac.prefix = prefix
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
163
agent/accumulator.go
Normal file
163
agent/accumulator.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *internal_models.InputConfig,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
acc := accumulator{}
|
||||
acc.metrics = metrics
|
||||
acc.inputConfig = inputConfig
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
|
||||
inputConfig *internal_models.InputConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
}
|
||||
if ac.debug {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
ac.metrics <- m
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) addDefaultTag(key, value string) {
|
||||
ac.defaultTags[key] = value
|
||||
}
|
||||
@@ -1,19 +1,19 @@
|
||||
package telegraf
|
||||
package agent
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
cryptorand "crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
@@ -45,7 +45,7 @@ func NewAgent(config *config.Config) (*Agent, error) {
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
switch ot := o.Output.(type) {
|
||||
case outputs.ServiceOutput:
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
@@ -58,7 +58,7 @@ func (a *Agent) Connect() error {
|
||||
}
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name)
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s, error was '%s' \n", o.Name, err)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
if err != nil {
|
||||
@@ -78,20 +78,33 @@ func (a *Agent) Close() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
err = o.Output.Close()
|
||||
switch ot := o.Output.(type) {
|
||||
case outputs.ServiceOutput:
|
||||
case telegraf.ServiceOutput:
|
||||
ot.Stop()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func panicRecover(input *internal_models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name, err, trace)
|
||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
}
|
||||
|
||||
// gatherParallel runs the inputs that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
|
||||
for _, input := range a.Config.Inputs {
|
||||
if input.Config.Interval != 0 {
|
||||
continue
|
||||
@@ -99,13 +112,24 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
|
||||
wg.Add(1)
|
||||
counter++
|
||||
go func(input *config.RunningInput) {
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer panicRecover(input)
|
||||
defer wg.Done()
|
||||
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// acc.SetPrefix(input.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if jitter != 0 {
|
||||
nanoSleep := rand.Int63n(jitter)
|
||||
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
|
||||
if err != nil {
|
||||
log.Printf("Jittering collection interval failed for plugin %s",
|
||||
input.Name)
|
||||
} else {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
@@ -121,8 +145,10 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
wg.Wait()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -130,27 +156,30 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
shutdown chan struct{},
|
||||
input *config.RunningInput,
|
||||
pointChan chan *client.Point,
|
||||
input *internal_models.RunningInput,
|
||||
metricC chan telegraf.Metric,
|
||||
) error {
|
||||
defer panicRecover(input)
|
||||
|
||||
ticker := time.NewTicker(input.Config.Interval)
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
start := time.Now()
|
||||
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// acc.SetPrefix(input.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
}
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
@@ -170,13 +199,13 @@ func (a *Agent) gatherSeparate(
|
||||
func (a *Agent) Test() error {
|
||||
shutdown := make(chan struct{})
|
||||
defer close(shutdown)
|
||||
pointChan := make(chan *client.Point)
|
||||
metricC := make(chan telegraf.Metric)
|
||||
|
||||
// dummy receiver for the point channel
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-pointChan:
|
||||
case <-metricC:
|
||||
// do nothing
|
||||
case <-shutdown:
|
||||
return
|
||||
@@ -185,9 +214,8 @@ func (a *Agent) Test() error {
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, pointChan)
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(true)
|
||||
// acc.SetPrefix(input.Name + "_")
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
if input.Config.Interval != 0 {
|
||||
@@ -201,7 +229,7 @@ func (a *Agent) Test() error {
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name {
|
||||
case "cpu", "mongodb":
|
||||
case "cpu", "mongodb", "procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
@@ -213,91 +241,45 @@ func (a *Agent) Test() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeOutput writes a list of points to a single output, with retries.
|
||||
// Optionally takes a `done` channel to indicate that it is done writing.
|
||||
func (a *Agent) writeOutput(
|
||||
points []*client.Point,
|
||||
ro *config.RunningOutput,
|
||||
shutdown chan struct{},
|
||||
wg *sync.WaitGroup,
|
||||
) {
|
||||
defer wg.Done()
|
||||
if len(points) == 0 {
|
||||
return
|
||||
}
|
||||
retry := 0
|
||||
retries := a.Config.Agent.FlushRetries
|
||||
start := time.Now()
|
||||
|
||||
for {
|
||||
filtered := ro.FilterPoints(points)
|
||||
err := ro.Output.Write(filtered)
|
||||
if err == nil {
|
||||
// Write successful
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("Flushed %d metrics to output %s in %s\n",
|
||||
len(filtered), ro.Name, elapsed)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return
|
||||
default:
|
||||
if retry >= retries {
|
||||
// No more retries
|
||||
msg := "FATAL: Write to output [%s] failed %d times, dropping" +
|
||||
" %d metrics\n"
|
||||
log.Printf(msg, ro.Name, retries+1, len(points))
|
||||
return
|
||||
} else if err != nil {
|
||||
// Sleep for a retry
|
||||
log.Printf("Error in output [%s]: %s, retrying in %s",
|
||||
ro.Name, err.Error(), a.Config.Agent.FlushInterval.Duration)
|
||||
time.Sleep(a.Config.Agent.FlushInterval.Duration)
|
||||
}
|
||||
}
|
||||
|
||||
retry++
|
||||
}
|
||||
}
|
||||
|
||||
// flush writes a list of points to all configured outputs
|
||||
func (a *Agent) flush(
|
||||
points []*client.Point,
|
||||
shutdown chan struct{},
|
||||
wait bool,
|
||||
) {
|
||||
func (a *Agent) flush() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(a.Config.Outputs))
|
||||
for _, o := range a.Config.Outputs {
|
||||
wg.Add(1)
|
||||
go a.writeOutput(points, o, shutdown, &wg)
|
||||
}
|
||||
if wait {
|
||||
wg.Wait()
|
||||
go func(output *internal_models.RunningOutput) {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// flusher monitors the points input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
points := make([]*client.Point, 0)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached points before shutdown")
|
||||
a.flush(points, shutdown, true)
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
a.flush(points, shutdown, false)
|
||||
points = make([]*client.Point, 0)
|
||||
case pt := <-pointChan:
|
||||
points = append(points, pt)
|
||||
a.flush()
|
||||
case m := <-metricC:
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.AddPoint(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -309,7 +291,7 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
|
||||
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
@@ -327,16 +309,17 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(
|
||||
a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s\n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug,
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating points
|
||||
pointChan := make(chan *client.Point, 1000)
|
||||
metricC := make(chan telegraf.Metric, 1000)
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
@@ -348,7 +331,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, pointChan); err != nil {
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
@@ -358,7 +341,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
|
||||
// Start service of any ServicePlugins
|
||||
switch p := input.Input.(type) {
|
||||
case inputs.ServiceInput:
|
||||
case telegraf.ServiceInput:
|
||||
if err := p.Start(); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
@@ -371,9 +354,9 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
if input.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(input *config.RunningInput) {
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherSeparate(shutdown, input, pointChan); err != nil {
|
||||
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input)
|
||||
@@ -383,7 +366,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
defer wg.Wait()
|
||||
|
||||
for {
|
||||
if err := a.gatherParallel(pointChan); err != nil {
|
||||
if err := a.gatherParallel(metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
|
||||
@@ -1,50 +1,50 @@
|
||||
package telegraf
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
// needing to load the plugins
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.InputFilters = []string{"mysql"}
|
||||
err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "redis"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
@@ -53,42 +53,42 @@ func TestAgent_LoadPlugin(t *testing.T) {
|
||||
func TestAgent_LoadOutput(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb"}
|
||||
err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"kafka"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(c.Outputs))
|
||||
a, _ = NewAgent(c)
|
||||
@@ -96,7 +96,7 @@ func TestAgent_LoadOutput(t *testing.T) {
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
339
build.py
339
build.py
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2.7
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# This is the Telegraf build script.
|
||||
#
|
||||
@@ -17,11 +17,7 @@ import tempfile
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
try:
|
||||
import boto
|
||||
from boto.s3.key import Key
|
||||
except ImportError:
|
||||
pass
|
||||
debug = False
|
||||
|
||||
# PACKAGING VARIABLES
|
||||
INSTALL_ROOT_DIR = "/usr/bin"
|
||||
@@ -73,12 +69,10 @@ targets = {
|
||||
}
|
||||
|
||||
supported_builds = {
|
||||
# TODO(rossmcdonald): Add support for multiple GOARM values
|
||||
'darwin': [ "amd64", "386" ],
|
||||
# 'windows': [ "amd64", "386", "arm", "arm64" ],
|
||||
'linux': [ "amd64", "386", "arm" ]
|
||||
'darwin': [ "amd64", "i386" ],
|
||||
'windows': [ "amd64", "i386", "arm" ],
|
||||
'linux': [ "amd64", "i386", "arm" ]
|
||||
}
|
||||
supported_go = [ '1.5.1' ]
|
||||
supported_packages = {
|
||||
"darwin": [ "tar", "zip" ],
|
||||
"linux": [ "deb", "rpm", "tar", "zip" ],
|
||||
@@ -87,42 +81,48 @@ supported_packages = {
|
||||
|
||||
def run(command, allow_failure=False, shell=False):
|
||||
out = None
|
||||
if debug:
|
||||
print("[DEBUG] {}".format(command))
|
||||
try:
|
||||
if shell:
|
||||
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
|
||||
else:
|
||||
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
|
||||
out = out.decode("utf8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print ""
|
||||
print ""
|
||||
print "Executed command failed!"
|
||||
print "-- Command run was: {}".format(command)
|
||||
print "-- Failure was: {}".format(e.output)
|
||||
print("")
|
||||
print("")
|
||||
print("Executed command failed!")
|
||||
print("-- Command run was: {}".format(command))
|
||||
print("-- Failure was: {}".format(e.output))
|
||||
if allow_failure:
|
||||
print "Continuing..."
|
||||
print("Continuing...")
|
||||
return None
|
||||
else:
|
||||
print ""
|
||||
print "Stopping."
|
||||
print("")
|
||||
print("Stopping.")
|
||||
sys.exit(1)
|
||||
except OSError as e:
|
||||
print ""
|
||||
print ""
|
||||
print "Invalid command!"
|
||||
print "-- Command run was: {}".format(command)
|
||||
print "-- Failure was: {}".format(e)
|
||||
print("")
|
||||
print("")
|
||||
print("Invalid command!")
|
||||
print("-- Command run was: {}".format(command))
|
||||
print("-- Failure was: {}".format(e))
|
||||
if allow_failure:
|
||||
print "Continuing..."
|
||||
print("Continuing...")
|
||||
return out
|
||||
else:
|
||||
print ""
|
||||
print "Stopping."
|
||||
print("")
|
||||
print("Stopping.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
return out
|
||||
|
||||
def create_temp_dir():
|
||||
return tempfile.mkdtemp(prefix="telegraf-build.")
|
||||
def create_temp_dir(prefix=None):
|
||||
if prefix is None:
|
||||
return tempfile.mkdtemp(prefix="telegraf-build.")
|
||||
else:
|
||||
return tempfile.mkdtemp(prefix=prefix)
|
||||
|
||||
def get_current_version():
|
||||
command = "git describe --always --tags --abbrev=0"
|
||||
@@ -173,42 +173,55 @@ def check_path_for(b):
|
||||
return full_path
|
||||
|
||||
def check_environ(build_dir = None):
|
||||
print "\nChecking environment:"
|
||||
print("\nChecking environment:")
|
||||
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
|
||||
print "\t- {} -> {}".format(v, os.environ.get(v))
|
||||
print("\t- {} -> {}".format(v, os.environ.get(v)))
|
||||
|
||||
cwd = os.getcwd()
|
||||
if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
|
||||
print "\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures."
|
||||
print("\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.")
|
||||
|
||||
def check_prereqs():
|
||||
print "\nChecking for dependencies:"
|
||||
print("\nChecking for dependencies:")
|
||||
for req in prereqs:
|
||||
print "\t- {} ->".format(req),
|
||||
path = check_path_for(req)
|
||||
if path:
|
||||
print "{}".format(path)
|
||||
else:
|
||||
print "?"
|
||||
if path is None:
|
||||
path = '?'
|
||||
print("\t- {} -> {}".format(req, path))
|
||||
for req in optional_prereqs:
|
||||
print "\t- {} (optional) ->".format(req),
|
||||
path = check_path_for(req)
|
||||
if path:
|
||||
print "{}".format(path)
|
||||
else:
|
||||
print "?"
|
||||
print ""
|
||||
if path is None:
|
||||
path = '?'
|
||||
print("\t- {} (optional) -> {}".format(req, path))
|
||||
print("")
|
||||
|
||||
def upload_packages(packages, nightly=False):
|
||||
print "Uploading packages to S3..."
|
||||
print ""
|
||||
def upload_packages(packages, bucket_name=None, nightly=False):
|
||||
if debug:
|
||||
print("[DEBUG] upload_packags: {}".format(packages))
|
||||
try:
|
||||
import boto
|
||||
from boto.s3.key import Key
|
||||
except ImportError:
|
||||
print "!! Cannot upload packages without the 'boto' python library."
|
||||
return 1
|
||||
print("Uploading packages to S3...")
|
||||
print("")
|
||||
c = boto.connect_s3()
|
||||
# TODO(rossmcdonald) - Set to different S3 bucket for release vs nightly
|
||||
bucket = c.get_bucket('telegraf-nightly')
|
||||
if bucket_name is None:
|
||||
bucket_name = 'get.influxdb.org/telegraf'
|
||||
bucket = c.get_bucket(bucket_name.split('/')[0])
|
||||
print("\t - Using bucket: {}".format(bucket_name))
|
||||
for p in packages:
|
||||
name = os.path.basename(p)
|
||||
if '/' in bucket_name:
|
||||
# Allow for nested paths within the bucket name (ex:
|
||||
# bucket/telegraf). Assuming forward-slashes as path
|
||||
# delimiter.
|
||||
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
|
||||
os.path.basename(p))
|
||||
else:
|
||||
name = os.path.basename(p)
|
||||
if bucket.get_key(name) is None or nightly:
|
||||
print "\t - Uploading {}...".format(name),
|
||||
print("\t - Uploading {} to {}...".format(name, bucket_name))
|
||||
k = Key(bucket)
|
||||
k.key = name
|
||||
if nightly:
|
||||
@@ -216,41 +229,39 @@ def upload_packages(packages, nightly=False):
|
||||
else:
|
||||
n = k.set_contents_from_filename(p, replace=False)
|
||||
k.make_public()
|
||||
print "[ DONE ]"
|
||||
else:
|
||||
print "\t - Not uploading {}, already exists.".format(p)
|
||||
print ""
|
||||
print("\t - Not uploading {}, already exists.".format(p))
|
||||
print("")
|
||||
|
||||
def run_tests(race, parallel, timeout, no_vet):
|
||||
get_command = "go get -d -t ./..."
|
||||
print "Retrieving Go dependencies...",
|
||||
print("Retrieving Go dependencies...")
|
||||
sys.stdout.flush()
|
||||
run(get_command)
|
||||
print "done."
|
||||
print "Running tests:"
|
||||
print "\tRace: ", race
|
||||
print("Running tests:")
|
||||
print("\tRace: ", race)
|
||||
if parallel is not None:
|
||||
print "\tParallel:", parallel
|
||||
print("\tParallel:", parallel)
|
||||
if timeout is not None:
|
||||
print "\tTimeout:", timeout
|
||||
print("\tTimeout:", timeout)
|
||||
sys.stdout.flush()
|
||||
p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
if len(out) > 0 or len(err) > 0:
|
||||
print "Code not formatted. Please use 'go fmt ./...' to fix formatting errors."
|
||||
print out
|
||||
print err
|
||||
print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
|
||||
print(out)
|
||||
print(err)
|
||||
return False
|
||||
if not no_vet:
|
||||
p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
if len(out) > 0 or len(err) > 0:
|
||||
print "Go vet failed. Please run 'go vet ./...' and fix any errors."
|
||||
print out
|
||||
print err
|
||||
print("Go vet failed. Please run 'go vet ./...' and fix any errors.")
|
||||
print(out)
|
||||
print(err)
|
||||
return False
|
||||
else:
|
||||
print "Skipping go vet ..."
|
||||
print("Skipping go vet ...")
|
||||
sys.stdout.flush()
|
||||
test_command = "go test -v"
|
||||
if race:
|
||||
@@ -262,10 +273,10 @@ def run_tests(race, parallel, timeout, no_vet):
|
||||
test_command += " ./..."
|
||||
code = os.system(test_command)
|
||||
if code != 0:
|
||||
print "Tests Failed"
|
||||
print("Tests Failed")
|
||||
return False
|
||||
else:
|
||||
print "Tests Passed"
|
||||
print("Tests Passed")
|
||||
return True
|
||||
|
||||
def build(version=None,
|
||||
@@ -279,26 +290,26 @@ def build(version=None,
|
||||
clean=False,
|
||||
outdir=".",
|
||||
goarm_version="6"):
|
||||
print "-------------------------"
|
||||
print ""
|
||||
print "Build plan:"
|
||||
print "\t- version: {}".format(version)
|
||||
print("-------------------------")
|
||||
print("")
|
||||
print("Build plan:")
|
||||
print("\t- version: {}".format(version))
|
||||
if rc:
|
||||
print "\t- release candidate: {}".format(rc)
|
||||
print "\t- commit: {}".format(commit)
|
||||
print "\t- branch: {}".format(branch)
|
||||
print "\t- platform: {}".format(platform)
|
||||
print "\t- arch: {}".format(arch)
|
||||
print("\t- release candidate: {}".format(rc))
|
||||
print("\t- commit: {}".format(commit))
|
||||
print("\t- branch: {}".format(branch))
|
||||
print("\t- platform: {}".format(platform))
|
||||
print("\t- arch: {}".format(arch))
|
||||
if arch == 'arm' and goarm_version:
|
||||
print "\t- ARM version: {}".format(goarm_version)
|
||||
print "\t- nightly? {}".format(str(nightly).lower())
|
||||
print "\t- race enabled? {}".format(str(race).lower())
|
||||
print ""
|
||||
print("\t- ARM version: {}".format(goarm_version))
|
||||
print("\t- nightly? {}".format(str(nightly).lower()))
|
||||
print("\t- race enabled? {}".format(str(race).lower()))
|
||||
print("")
|
||||
|
||||
if not os.path.exists(outdir):
|
||||
os.makedirs(outdir)
|
||||
elif clean and outdir != '/':
|
||||
print "Cleaning build directory..."
|
||||
print("Cleaning build directory...")
|
||||
shutil.rmtree(outdir)
|
||||
os.makedirs(outdir)
|
||||
|
||||
@@ -306,14 +317,20 @@ def build(version=None,
|
||||
# If a release candidate, update the version information accordingly
|
||||
version = "{}rc{}".format(version, rc)
|
||||
|
||||
print "Starting build..."
|
||||
for b, c in targets.iteritems():
|
||||
print "\t- Building '{}'...".format(os.path.join(outdir, b)),
|
||||
# Set architecture to something that Go expects
|
||||
if arch == 'i386':
|
||||
arch = '386'
|
||||
elif arch == 'x86_64':
|
||||
arch = 'amd64'
|
||||
|
||||
print("Starting build...")
|
||||
for b, c in targets.items():
|
||||
print("\t- Building '{}'...".format(os.path.join(outdir, b)))
|
||||
build_command = ""
|
||||
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
|
||||
if arch == "arm" and goarm_version:
|
||||
if goarm_version not in ["5", "6", "7", "arm64"]:
|
||||
print "!! Invalid ARM build version: {}".format(goarm_version)
|
||||
print("!! Invalid ARM build version: {}".format(goarm_version))
|
||||
build_command += "GOARM={} ".format(goarm_version)
|
||||
build_command += "go build -o {} ".format(os.path.join(outdir, b))
|
||||
if race:
|
||||
@@ -322,29 +339,28 @@ def build(version=None,
|
||||
if "1.4" in go_version:
|
||||
build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version {} ".format(version)
|
||||
build_command += "-X main.Branch {} ".format(branch)
|
||||
build_command += "-X main.Branch {} ".format(get_current_branch())
|
||||
build_command += "-X main.Commit {}\" ".format(get_current_commit())
|
||||
else:
|
||||
build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat())
|
||||
build_command += "-X main.Version={} ".format(version)
|
||||
build_command += "-X main.Branch={} ".format(branch)
|
||||
build_command += "-X main.Branch={} ".format(get_current_branch())
|
||||
build_command += "-X main.Commit={}\" ".format(get_current_commit())
|
||||
build_command += c
|
||||
run(build_command, shell=True)
|
||||
print "[ DONE ]"
|
||||
print ""
|
||||
print("")
|
||||
|
||||
def create_dir(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
def rename_file(fr, to):
|
||||
try:
|
||||
os.rename(fr, to)
|
||||
except OSError as e:
|
||||
print e
|
||||
print(e)
|
||||
# Return the original filename
|
||||
return fr
|
||||
else:
|
||||
@@ -355,27 +371,27 @@ def copy_file(fr, to):
|
||||
try:
|
||||
shutil.copy(fr, to)
|
||||
except OSError as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
def create_package_fs(build_root):
|
||||
print "\t- Creating a filesystem hierarchy from directory: {}".format(build_root)
|
||||
print("\t- Creating a filesystem hierarchy from directory: {}".format(build_root))
|
||||
# Using [1:] for the path names due to them being absolute
|
||||
# (will overwrite previous paths, per 'os.path.join' documentation)
|
||||
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
|
||||
for d in dirs:
|
||||
create_dir(os.path.join(build_root, d))
|
||||
os.chmod(os.path.join(build_root, d), 0755)
|
||||
os.chmod(os.path.join(build_root, d), 0o755)
|
||||
|
||||
def package_scripts(build_root):
|
||||
print "\t- Copying scripts and sample configuration to build directory"
|
||||
print("\t- Copying scripts and sample configuration to build directory")
|
||||
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0644)
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0644)
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0644)
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
|
||||
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0644)
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
|
||||
|
||||
def go_get(update=False):
|
||||
get_command = None
|
||||
@@ -383,24 +399,28 @@ def go_get(update=False):
|
||||
get_command = "go get -u -f -d ./..."
|
||||
else:
|
||||
get_command = "go get -d ./..."
|
||||
print "Retrieving Go dependencies...",
|
||||
print("Retrieving Go dependencies...")
|
||||
run(get_command)
|
||||
print "done.\n"
|
||||
|
||||
def generate_md5_from_file(path):
|
||||
m = hashlib.md5()
|
||||
with open(path, 'rb') as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
m.update(chunk)
|
||||
while True:
|
||||
data = f.read(4096)
|
||||
if not data:
|
||||
break
|
||||
m.update(data)
|
||||
return m.hexdigest()
|
||||
|
||||
def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
|
||||
outfiles = []
|
||||
tmp_build_dir = create_temp_dir()
|
||||
if debug:
|
||||
print("[DEBUG] build_output = {}".format(build_output))
|
||||
try:
|
||||
print "-------------------------"
|
||||
print ""
|
||||
print "Packaging..."
|
||||
print("-------------------------")
|
||||
print("")
|
||||
print("Packaging...")
|
||||
for p in build_output:
|
||||
# Create top-level folder displaying which platform (linux, etc)
|
||||
create_dir(os.path.join(tmp_build_dir, p))
|
||||
@@ -419,24 +439,30 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
|
||||
b = b + '.exe'
|
||||
fr = os.path.join(current_location, b)
|
||||
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b)
|
||||
print "\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)
|
||||
print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to))
|
||||
copy_file(fr, to)
|
||||
# Package the directory structure
|
||||
for package_type in supported_packages[p]:
|
||||
print "\t- Packaging directory '{}' as '{}'...".format(build_root, package_type),
|
||||
print("\t- Packaging directory '{}' as '{}'...".format(build_root, package_type))
|
||||
name = "telegraf"
|
||||
# Reset version, iteration, and current location on each run
|
||||
# since they may be modified below.
|
||||
package_version = version
|
||||
package_iteration = iteration
|
||||
current_location = build_output[p][a]
|
||||
|
||||
if package_type in ['zip', 'tar']:
|
||||
if nightly:
|
||||
name = '{}-nightly_{}_{}'.format(name, p, a)
|
||||
else:
|
||||
name = '{}-{}_{}_{}'.format(name, version, p, a)
|
||||
name = '{}-{}-{}_{}_{}'.format(name, package_version, package_iteration, p, a)
|
||||
if package_type == 'tar':
|
||||
# Add `tar.gz` to path to reduce package size
|
||||
current_location = os.path.join(current_location, name + '.tar.gz')
|
||||
if rc is not None:
|
||||
package_iteration = "0.rc{}".format(rc)
|
||||
if a == '386':
|
||||
a = 'i386'
|
||||
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
|
||||
fpm_common_args,
|
||||
name,
|
||||
@@ -448,52 +474,57 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
|
||||
current_location)
|
||||
if package_type == "rpm":
|
||||
fpm_command += "--depends coreutils "
|
||||
fpm_command += "--depends lsof"
|
||||
out = run(fpm_command, shell=True)
|
||||
matches = re.search(':path=>"(.*)"', out)
|
||||
outfile = None
|
||||
if matches is not None:
|
||||
outfile = matches.groups()[0]
|
||||
if outfile is None:
|
||||
print "[ COULD NOT DETERMINE OUTPUT ]"
|
||||
print("[ COULD NOT DETERMINE OUTPUT ]")
|
||||
else:
|
||||
# Strip nightly version (the unix epoch) from filename
|
||||
if nightly and package_type in ['deb', 'rpm']:
|
||||
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
|
||||
outfiles.append(os.path.join(os.getcwd(), outfile))
|
||||
print "[ DONE ]"
|
||||
# Display MD5 hash for generated package
|
||||
print "\t\tMD5 = {}".format(generate_md5_from_file(outfile))
|
||||
print ""
|
||||
print("\t\tMD5 = {}".format(generate_md5_from_file(outfile)))
|
||||
print("")
|
||||
if debug:
|
||||
print("[DEBUG] package outfiles: {}".format(outfiles))
|
||||
return outfiles
|
||||
finally:
|
||||
# Cleanup
|
||||
shutil.rmtree(tmp_build_dir)
|
||||
|
||||
def print_usage():
|
||||
print "Usage: ./build.py [options]"
|
||||
print ""
|
||||
print "Options:"
|
||||
print "\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build."
|
||||
print "\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all"
|
||||
print "\t --goarm=<arm version> \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6"
|
||||
print "\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all"
|
||||
print "\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag."
|
||||
print "\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP)."
|
||||
print "\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP)."
|
||||
print "\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information)."
|
||||
print "\t --iteration=<iteration number> \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)."
|
||||
print "\t --race \n\t\t- Whether the produced build should have race detection enabled."
|
||||
print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)."
|
||||
print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)."
|
||||
print "\t --update \n\t\t- Whether dependencies should be updated prior to building."
|
||||
print "\t --test \n\t\t- Run Go tests. Will not produce a build."
|
||||
print "\t --parallel \n\t\t- Run Go tests in parallel up to the count specified."
|
||||
print "\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s."
|
||||
print "\t --clean \n\t\t- Clean the build output directory prior to creating build."
|
||||
print ""
|
||||
print("Usage: ./build.py [options]")
|
||||
print("")
|
||||
print("Options:")
|
||||
print("\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build.")
|
||||
print("\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all")
|
||||
print("\t --goarm=<arm version> \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6")
|
||||
print("\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all")
|
||||
print("\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.")
|
||||
print("\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP).")
|
||||
print("\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP).")
|
||||
print("\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information).")
|
||||
print("\t --iteration=<iteration number> \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise).")
|
||||
print("\t --race \n\t\t- Whether the produced build should have race detection enabled.")
|
||||
print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).")
|
||||
print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).")
|
||||
print("\t --update \n\t\t- Whether dependencies should be updated prior to building.")
|
||||
print("\t --test \n\t\t- Run Go tests. Will not produce a build.")
|
||||
print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.")
|
||||
print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.")
|
||||
print("\t --clean \n\t\t- Clean the build output directory prior to creating build.")
|
||||
print("\t --no-get \n\t\t- Do not run `go get` before building.")
|
||||
print("\t --bucket=<S3 bucket>\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).")
|
||||
print("\t --debug \n\t\t- Displays debug output.")
|
||||
print("")
|
||||
|
||||
def print_package_summary(packages):
|
||||
print packages
|
||||
print(packages)
|
||||
|
||||
def main():
|
||||
# Command-line arguments
|
||||
@@ -516,6 +547,9 @@ def main():
|
||||
iteration = 1
|
||||
no_vet = False
|
||||
goarm_version = "6"
|
||||
run_get = True
|
||||
upload_bucket = None
|
||||
global debug
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
if '--outdir' in arg:
|
||||
@@ -573,17 +607,25 @@ def main():
|
||||
elif '--goarm' in arg:
|
||||
# Signifies GOARM flag to pass to build command when compiling for ARM
|
||||
goarm_version = arg.split("=")[1]
|
||||
elif '--bucket' in arg:
|
||||
# The bucket to upload the packages to, relies on boto
|
||||
upload_bucket = arg.split("=")[1]
|
||||
elif '--no-get' in arg:
|
||||
run_get = False
|
||||
elif '--debug' in arg:
|
||||
print "[DEBUG] Using debug output"
|
||||
debug = True
|
||||
elif '--help' in arg:
|
||||
print_usage()
|
||||
return 0
|
||||
else:
|
||||
print "!! Unknown argument: {}".format(arg)
|
||||
print("!! Unknown argument: {}".format(arg))
|
||||
print_usage()
|
||||
return 1
|
||||
|
||||
if nightly:
|
||||
if rc:
|
||||
print "!! Cannot be both nightly and a release candidate! Stopping."
|
||||
print("!! Cannot be both nightly and a release candidate! Stopping.")
|
||||
return 1
|
||||
# In order to support nightly builds on the repository, we are adding the epoch timestamp
|
||||
# to the version so that version numbers are always greater than the previous nightly.
|
||||
@@ -609,20 +651,24 @@ def main():
|
||||
# If a release candidate or nightly, set iteration to 0 (instead of 1)
|
||||
iteration = 0
|
||||
|
||||
if target_arch == '386':
|
||||
target_arch = 'i386'
|
||||
elif target_arch == 'x86_64':
|
||||
target_arch = 'amd64'
|
||||
|
||||
build_output = {}
|
||||
# TODO(rossmcdonald): Prepare git repo for build (checking out correct branch/commit, etc.)
|
||||
# prepare(branch=branch, commit=commit)
|
||||
if test:
|
||||
if not run_tests(race, parallel, timeout, no_vet):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
go_get(update=update)
|
||||
if run_get:
|
||||
go_get(update=update)
|
||||
|
||||
platforms = []
|
||||
single_build = True
|
||||
if target_platform == 'all':
|
||||
platforms = supported_builds.keys()
|
||||
platforms = list(supported_builds.keys())
|
||||
single_build = False
|
||||
else:
|
||||
platforms = [target_platform]
|
||||
@@ -655,16 +701,13 @@ def main():
|
||||
# Build packages
|
||||
if package:
|
||||
if not check_path_for("fpm"):
|
||||
print "!! Cannot package without command 'fpm'. Stopping."
|
||||
print("!! Cannot package without command 'fpm'. Stopping.")
|
||||
return 1
|
||||
packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration)
|
||||
# TODO(rossmcdonald): Add nice output for print_package_summary()
|
||||
# print_package_summary(packages)
|
||||
# Optionally upload to S3
|
||||
if upload:
|
||||
upload_packages(packages, nightly=nightly)
|
||||
upload_packages(packages, bucket_name=upload_bucket, nightly=nightly)
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
|
||||
|
||||
@@ -7,15 +7,18 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdb/telegraf"
|
||||
"github.com/influxdb/telegraf/internal/config"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/all"
|
||||
_ "github.com/influxdb/telegraf/plugins/outputs/all"
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
var fConfig = flag.String("config", "", "configuration file to load")
|
||||
var fConfigDirectory = flag.String("config-directory", "",
|
||||
@@ -25,14 +28,14 @@ var fSampleConfig = flag.Bool("sample-config", false,
|
||||
"print out full sample configuration")
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the plugins to enable, separator is :")
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the plugins to enable, separator is :")
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||
@@ -57,6 +60,8 @@ The flags are:
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
@@ -78,142 +83,158 @@ Examples:
|
||||
`
|
||||
|
||||
func main() {
|
||||
flag.Usage = usageExit
|
||||
flag.Parse()
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
|
||||
if flag.NFlag() == 0 {
|
||||
usageExit()
|
||||
}
|
||||
if flag.NFlag() == 0 {
|
||||
usageExit(0)
|
||||
}
|
||||
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
|
||||
if *fConfig != "" {
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("You must specify a config file. See telegraf --help")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
if *fConfig != "" {
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Usage: Telegraf")
|
||||
flag.PrintDefaults()
|
||||
return
|
||||
}
|
||||
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fQuiet {
|
||||
ag.Config.Agent.Quiet = true
|
||||
}
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no plugins found, did you provide a valid config file?")
|
||||
}
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
go func() {
|
||||
sig := <-signals
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
ag, err := telegraf.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
go func() {
|
||||
<-signals
|
||||
close(shutdown)
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
f.Close()
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
func usageExit() {
|
||||
func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
os.Exit(0)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
@@ -1,27 +1,18 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[tags]
|
||||
# dc = "us-east-1"
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
@@ -84,22 +75,22 @@
|
||||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["cpu_time"]
|
||||
drop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.disk]]
|
||||
# By default, telegraf gather stats for all mountpoints.
|
||||
# Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# Mountpoints=["/"]
|
||||
# mount_points=["/"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
# disk partitions.
|
||||
# Setting devices will restrict the stats to the specified devices.
|
||||
# Devices=["sda","sdb"]
|
||||
# devices = ["sda", "sdb"]
|
||||
# Uncomment the following line if you do not need disk serial numbers.
|
||||
# SkipSerialNumber = true
|
||||
# skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
|
||||
31
input.go
Normal file
31
input.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package telegraf
|
||||
|
||||
type Input interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type ServiceInput interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
|
||||
// Start starts the ServiceInput's service, whatever that may be
|
||||
Start() error
|
||||
|
||||
// Stop stops the services and closes any necessary channels and connections
|
||||
Stop()
|
||||
}
|
||||
@@ -10,14 +10,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
|
||||
"github.com/naoina/toml"
|
||||
"github.com/influxdata/config"
|
||||
"github.com/naoina/toml/ast"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
@@ -29,8 +29,8 @@ type Config struct {
|
||||
OutputFilters []string
|
||||
|
||||
Agent *AgentConfig
|
||||
Inputs []*RunningInput
|
||||
Outputs []*RunningOutput
|
||||
Inputs []*internal_models.RunningInput
|
||||
Outputs []*internal_models.RunningOutput
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
@@ -40,13 +40,12 @@ func NewConfig() *Config {
|
||||
Interval: internal.Duration{Duration: 10 * time.Second},
|
||||
RoundInterval: true,
|
||||
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
||||
FlushRetries: 2,
|
||||
FlushJitter: internal.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
|
||||
Tags: make(map[string]string),
|
||||
Inputs: make([]*RunningInput, 0),
|
||||
Outputs: make([]*RunningOutput, 0),
|
||||
Inputs: make([]*internal_models.RunningInput, 0),
|
||||
Outputs: make([]*internal_models.RunningOutput, 0),
|
||||
InputFilters: make([]string, 0),
|
||||
OutputFilters: make([]string, 0),
|
||||
}
|
||||
@@ -61,149 +60,40 @@ type AgentConfig struct {
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// CollectionJitter is used to jitter the collection by a random amount.
|
||||
// Each plugin will sleep for a random time within jitter before collecting.
|
||||
// This can be used to avoid many plugins querying things like sysfs at the
|
||||
// same time, which can have a measurable effect on the system.
|
||||
CollectionJitter internal.Duration
|
||||
|
||||
// Interval at which to flush data
|
||||
FlushInterval internal.Duration
|
||||
|
||||
// FlushRetries is the number of times to retry each data flush
|
||||
FlushRetries int
|
||||
|
||||
// FlushJitter tells
|
||||
// FlushJitter Jitters the flush interval by a random amount.
|
||||
// This is primarily to avoid large write spikes for users running a large
|
||||
// number of telegraf instances.
|
||||
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
FlushJitter internal.Duration
|
||||
|
||||
// MetricBufferLimit is the max number of metrics that each output plugin
|
||||
// will cache. The buffer is cleared when a successful write occurs. When
|
||||
// full, the oldest metrics will be overwritten.
|
||||
MetricBufferLimit int
|
||||
|
||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
||||
// valid for the agent config. Leaving them here for now for backwards-
|
||||
// compatability
|
||||
UTC bool `toml:"utc"`
|
||||
Precision string
|
||||
|
||||
// Option for running in debug mode
|
||||
Debug bool
|
||||
// Debug is the option for running in debug mode
|
||||
Debug bool
|
||||
|
||||
// Quiet is the option for running in quiet mode
|
||||
Quiet bool
|
||||
Hostname string
|
||||
}
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
}
|
||||
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output outputs.Output
|
||||
Config *OutputConfig
|
||||
}
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input inputs.Input
|
||||
Config *InputConfig
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
IsActive bool
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
type InputConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
// Filter returns filtered slice of client.Points based on whether filters
|
||||
// are active for this RunningOutput.
|
||||
func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point {
|
||||
if !ro.Config.Filter.IsActive {
|
||||
return points
|
||||
}
|
||||
|
||||
var filteredPoints []*client.Point
|
||||
for i := range points {
|
||||
if !ro.Config.Filter.ShouldPass(points[i].Name()) || !ro.Config.Filter.ShouldTagsPass(points[i].Tags()) {
|
||||
continue
|
||||
}
|
||||
filteredPoints = append(filteredPoints, points[i])
|
||||
}
|
||||
return filteredPoints
|
||||
}
|
||||
|
||||
// ShouldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldPass(fieldkey string) bool {
|
||||
if f.Pass != nil {
|
||||
for _, pat := range f.Pass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.Drop != nil {
|
||||
for _, pat := range f.Drop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Inputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) InputNames() []string {
|
||||
var name []string
|
||||
@@ -239,27 +129,18 @@ func (c *Config) ListTags() string {
|
||||
var header = `# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[tags]
|
||||
# dc = "us-east-1"
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
@@ -269,6 +150,16 @@ var header = `# Telegraf configuration
|
||||
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
# Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
# flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
# Collection jitter is used to jitter the collection by a random amount.
|
||||
# Each plugin will sleep for a random time within jitter before collecting.
|
||||
# This can be used to avoid many plugins querying things like sysfs at the
|
||||
# same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
# Default data flushing interval for all outputs. You should not set this below
|
||||
# interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
@@ -279,6 +170,8 @@ var header = `# Telegraf configuration
|
||||
|
||||
# Run telegraf in debug mode
|
||||
debug = false
|
||||
# Run telegraf in quiet mode
|
||||
quiet = false
|
||||
# Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
@@ -335,13 +228,13 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
|
||||
|
||||
// Print Inputs
|
||||
fmt.Printf(pluginHeader)
|
||||
servInputs := make(map[string]inputs.ServiceInput)
|
||||
servInputs := make(map[string]telegraf.ServiceInput)
|
||||
for _, pname := range pnames {
|
||||
creator := inputs.Inputs[pname]
|
||||
input := creator()
|
||||
|
||||
switch p := input.(type) {
|
||||
case inputs.ServiceInput:
|
||||
case telegraf.ServiceInput:
|
||||
servInputs[pname] = p
|
||||
continue
|
||||
}
|
||||
@@ -423,12 +316,7 @@ func (c *Config) LoadDirectory(path string) error {
|
||||
|
||||
// LoadConfig loads the given config file and applies it to c
|
||||
func (c *Config) LoadConfig(path string) error {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tbl, err := toml.Parse(data)
|
||||
tbl, err := config.ParseFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -441,12 +329,12 @@ func (c *Config) LoadConfig(path string) error {
|
||||
|
||||
switch name {
|
||||
case "agent":
|
||||
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("Could not parse [agent] config\n")
|
||||
return err
|
||||
}
|
||||
case "tags":
|
||||
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("Could not parse [tags] config\n")
|
||||
return err
|
||||
}
|
||||
@@ -512,15 +400,15 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, output); err != nil {
|
||||
if err := config.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
Output: output,
|
||||
Config: outputConfig,
|
||||
ro := internal_models.NewRunningOutput(name, output, outputConfig)
|
||||
if c.Agent.MetricBufferLimit > 0 {
|
||||
ro.PointBufferLimit = c.Agent.MetricBufferLimit
|
||||
}
|
||||
ro.Quiet = c.Agent.Quiet
|
||||
c.Outputs = append(c.Outputs, ro)
|
||||
return nil
|
||||
}
|
||||
@@ -545,11 +433,11 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := toml.UnmarshalTable(table, input); err != nil {
|
||||
if err := config.UnmarshalTable(table, input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &RunningInput{
|
||||
rp := &internal_models.RunningInput{
|
||||
Name: name,
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
@@ -559,10 +447,10 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
}
|
||||
|
||||
// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to
|
||||
// be inserted into the OutputConfig/InputConfig to be used for prefix
|
||||
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig to be used for prefix
|
||||
// filtering on tags and measurements
|
||||
func buildFilter(tbl *ast.Table) Filter {
|
||||
f := Filter{}
|
||||
func buildFilter(tbl *ast.Table) internal_models.Filter {
|
||||
f := internal_models.Filter{}
|
||||
|
||||
if node, ok := tbl.Fields["pass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
@@ -594,7 +482,7 @@ func buildFilter(tbl *ast.Table) Filter {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &TagFilter{Name: name}
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
@@ -613,7 +501,7 @@ func buildFilter(tbl *ast.Table) Filter {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &TagFilter{Name: name}
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
@@ -637,9 +525,9 @@ func buildFilter(tbl *ast.Table) Filter {
|
||||
|
||||
// buildInput parses input specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// InputConfig to be inserted into RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
|
||||
cp := &InputConfig{Name: name}
|
||||
// internal_models.InputConfig to be inserted into internal_models.RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) {
|
||||
cp := &internal_models.InputConfig{Name: name}
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
@@ -680,7 +568,7 @@ func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
@@ -696,10 +584,10 @@ func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
|
||||
}
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
|
||||
// OutputConfig to be inserted into RunningInput
|
||||
// internal_models.OutputConfig to be inserted into internal_models.RunningInput
|
||||
// Note: error exists in the return for future calls that might require error
|
||||
func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) {
|
||||
oc := &OutputConfig{
|
||||
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) {
|
||||
oc := &internal_models.OutputConfig{
|
||||
Name: name,
|
||||
Filter: buildFilter(tbl),
|
||||
}
|
||||
|
||||
@@ -4,10 +4,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdb/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdb/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdb/telegraf/plugins/inputs/procstat"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -18,19 +19,19 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &InputConfig{
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: Filter{
|
||||
Filter: internal_models.Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
@@ -61,19 +62,19 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &InputConfig{
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: Filter{
|
||||
Filter: internal_models.Filter{
|
||||
Drop: []string{"other", "stuff"},
|
||||
Pass: []string{"some", "strings"},
|
||||
TagDrop: []TagFilter{
|
||||
TagFilter{
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []TagFilter{
|
||||
TagFilter{
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
@@ -91,7 +92,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
|
||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &InputConfig{
|
||||
eConfig := &internal_models.InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
}
|
||||
@@ -110,7 +111,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
|
||||
pConfig := &InputConfig{Name: "procstat"}
|
||||
pConfig := &internal_models.InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
@@ -118,175 +119,3 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||
"Merged Testdata did not produce correct procstat metadata.")
|
||||
}
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Pass(t *testing.T) {
|
||||
f := Filter{
|
||||
Pass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Drop(t *testing.T) {
|
||||
f := Filter{
|
||||
Drop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,13 +2,20 @@ package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
// Duration just wraps time.Duration
|
||||
type Duration struct {
|
||||
Duration time.Duration
|
||||
@@ -49,9 +56,17 @@ func (f *JSONFlattener) FlattenJSON(
|
||||
return err
|
||||
}
|
||||
}
|
||||
case []interface{}:
|
||||
for i, v := range t {
|
||||
k := strconv.Itoa(i)
|
||||
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
f.Fields[fieldname] = t
|
||||
case bool, string, []interface{}, nil:
|
||||
case bool, string, nil:
|
||||
// ignored types
|
||||
return nil
|
||||
default:
|
||||
@@ -96,6 +111,57 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// RandomString returns a random string of alpha-numeric characters
|
||||
func RandomString(n int) string {
|
||||
var bytes = make([]byte, n)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
) (*tls.Config, error) {
|
||||
t := &tls.Config{}
|
||||
if SSLCert != "" && SSLKey != "" && SSLCA != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
caCert, err := ioutil.ReadFile(SSLCA)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
t = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caCertPool,
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
} else {
|
||||
if InsecureSkipVerify {
|
||||
t.InsecureSkipVerify = true
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
// will be nil by default if nothing is provided
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Glob will test a string pattern, potentially containing globs, against a
|
||||
// subject string. The result is a simple true/false, determining whether or
|
||||
// not the glob pattern matched the subject text.
|
||||
|
||||
92
internal/models/filter.go
Normal file
92
internal/models/filter.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
IsActive bool
|
||||
}
|
||||
|
||||
func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||
if f.ShouldPass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldPass(key string) bool {
|
||||
if f.Pass != nil {
|
||||
for _, pat := range f.Pass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.Drop != nil {
|
||||
for _, pat := range f.Drop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
177
internal/models/filter_test.go
Normal file
177
internal/models/filter_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Pass(t *testing.T) {
|
||||
f := Filter{
|
||||
Pass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_Drop(t *testing.T) {
|
||||
f := Filter{
|
||||
Drop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
24
internal/models/running_input.go
Normal file
24
internal/models/running_input.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
type InputConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
78
internal/models/running_output.go
Normal file
78
internal/models/running_output.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const DEFAULT_POINT_BUFFER_LIMIT = 10000
|
||||
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
Quiet bool
|
||||
PointBufferLimit int
|
||||
|
||||
metrics []telegraf.Metric
|
||||
overwriteCounter int
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
name string,
|
||||
output telegraf.Output,
|
||||
conf *OutputConfig,
|
||||
) *RunningOutput {
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
metrics: make([]telegraf.Metric, 0),
|
||||
Output: output,
|
||||
Config: conf,
|
||||
PointBufferLimit: DEFAULT_POINT_BUFFER_LIMIT,
|
||||
}
|
||||
return ro
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) AddPoint(point telegraf.Metric) {
|
||||
if ro.Config.Filter.IsActive {
|
||||
if !ro.Config.Filter.ShouldMetricPass(point) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(ro.metrics) < ro.PointBufferLimit {
|
||||
ro.metrics = append(ro.metrics, point)
|
||||
} else {
|
||||
log.Printf("WARNING: overwriting cached metrics, you may want to " +
|
||||
"increase the metric_buffer_limit setting in your [agent] config " +
|
||||
"if you do not wish to overwrite metrics.\n")
|
||||
if ro.overwriteCounter == len(ro.metrics) {
|
||||
ro.overwriteCounter = 0
|
||||
}
|
||||
ro.metrics[ro.overwriteCounter] = point
|
||||
ro.overwriteCounter++
|
||||
}
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) Write() error {
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(ro.metrics)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Wrote %d metrics to output %s in %s\n",
|
||||
len(ro.metrics), ro.Name, elapsed)
|
||||
}
|
||||
ro.metrics = make([]telegraf.Metric, 0)
|
||||
ro.overwriteCounter = 0
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
115
metric.go
Normal file
115
metric.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
Name() string
|
||||
|
||||
// Name returns the tags associated with the metric
|
||||
Tags() map[string]string
|
||||
|
||||
// Time return the timestamp for the metric
|
||||
Time() time.Time
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
// Fields returns the fields for the metric
|
||||
Fields() map[string]interface{}
|
||||
|
||||
// String returns a line-protocol string of the metric
|
||||
String() string
|
||||
|
||||
// PrecisionString returns a line-protocol string of the metric, at precision
|
||||
PrecisionString(precison string) string
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
Point() *client.Point
|
||||
}
|
||||
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt *client.Point
|
||||
}
|
||||
|
||||
// NewMetric returns a metric with the given timestamp. If a timestamp is not
|
||||
// given, then data is sent to the database without a timestamp, in which case
|
||||
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||
// send data with a timestamp.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t ...time.Time,
|
||||
) (Metric, error) {
|
||||
var T time.Time
|
||||
if len(t) > 0 {
|
||||
T = t[0]
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(name, tags, fields, T)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ParseMetrics returns a slice of Metrics from a text representation of a
|
||||
// metric (in line-protocol format)
|
||||
// with each metric separated by newlines. If any metrics fail to parse,
|
||||
// a non-nil error will be returned in addition to the metrics that parsed
|
||||
// successfully.
|
||||
func ParseMetrics(buf []byte) ([]Metric, error) {
|
||||
// parse even if the buffer begins with a newline
|
||||
buf = bytes.TrimPrefix(buf, []byte("\n"))
|
||||
points, err := models.ParsePoints(buf)
|
||||
metrics := make([]Metric, len(points))
|
||||
for i, point := range points {
|
||||
// Ignore error here because it's impossible that a model.Point
|
||||
// wouldn't parse into client.Point properly
|
||||
metrics[i], _ = NewMetric(point.Name(), point.Tags(),
|
||||
point.Fields(), point.Time())
|
||||
}
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.pt.Name()
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
return m.pt.Tags()
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
return m.pt.Fields()
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return m.pt.String()
|
||||
}
|
||||
|
||||
func (m *metric) PrecisionString(precison string) string {
|
||||
return m.pt.PrecisionString(precison)
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
return m.pt
|
||||
}
|
||||
135
metric_test.go
Normal file
135
metric_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const validMs = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 1454105876344540456
|
||||
`
|
||||
|
||||
const invalidMs = `
|
||||
cpu, cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo usage_idle
|
||||
cpu,host usage_idle=99
|
||||
cpu,host=foo usage_idle=99 very bad metric
|
||||
`
|
||||
|
||||
const validInvalidMs = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=51,usage_busy=49
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=60,usage_busy=40
|
||||
cpu,host usage_idle=99
|
||||
`
|
||||
|
||||
func TestParseValidMetrics(t *testing.T) {
|
||||
metrics, err := ParseMetrics([]byte(validMs))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
m := metrics[0]
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
"cpu": "cpu0",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, int64(1454105876344540456), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestParseInvalidMetrics(t *testing.T) {
|
||||
metrics, err := ParseMetrics([]byte(invalidMs))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 0)
|
||||
}
|
||||
|
||||
func TestParseValidAndInvalidMetrics(t *testing.T) {
|
||||
metrics, err := ParseMetrics([]byte(validInvalidMs))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 3)
|
||||
}
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.Unix())
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricStringNoTime(t *testing.T) {
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
31
output.go
Normal file
31
output.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package telegraf
|
||||
|
||||
type Output interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(metrics []Metric) error
|
||||
}
|
||||
|
||||
type ServiceOutput interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(metrics []Metric) error
|
||||
// Start the "service" that will provide an Output
|
||||
Start() error
|
||||
// Stop the "service" that will provide an Output
|
||||
Stop()
|
||||
}
|
||||
39
plugins/inputs/EXAMPLE_README.md
Normal file
39
plugins/inputs/EXAMPLE_README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Example Input Plugin
|
||||
|
||||
The example plugin gathers metrics about example things
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Description
|
||||
[[inputs.example]]
|
||||
# SampleConfig
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
<optional description>
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
- field2 (float, percent)
|
||||
- measurement2
|
||||
- field3 (integer, bytes)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- tag1 (optional description)
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
### Example Output:
|
||||
|
||||
Give an example `-test` output here
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
@@ -4,7 +4,8 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -119,7 +120,7 @@ func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc inputs.Accumulator) error {
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
if len(a.Servers) == 0 {
|
||||
return a.gatherServer("127.0.0.1:3000", acc)
|
||||
}
|
||||
@@ -140,7 +141,7 @@ func (a *Aerospike) Gather(acc inputs.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc inputs.Accumulator) error {
|
||||
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
@@ -249,7 +250,7 @@ func get(key []byte, host string) (map[string]string, error) {
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc inputs.Accumulator,
|
||||
acc telegraf.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
@@ -336,7 +337,7 @@ func msgLenFromBytes(buf [6]byte) int64 {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("aerospike", func() inputs.Input {
|
||||
inputs.Add("aerospike", func() telegraf.Input {
|
||||
return &Aerospike{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -1,37 +1,46 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/procstat"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/prometheus"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/puppetagent"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/rabbitmq"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdb/telegraf/plugins/inputs/zookeeper"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
)
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
@@ -31,7 +32,7 @@ func (n *Apache) Description() string {
|
||||
return "Read Apache status information (mod_status)"
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc inputs.Accumulator) error {
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
@@ -59,7 +60,7 @@ var tr = &http.Transport{
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc inputs.Accumulator) error {
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
@@ -164,7 +165,7 @@ func getTags(addr *url.URL) map[string]string {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("apache", func() inputs.Input {
|
||||
inputs.Add("apache", func() telegraf.Input {
|
||||
return &Apache{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Bcache struct {
|
||||
@@ -69,7 +70,7 @@ func prettyToBytes(v string) uint64 {
|
||||
return uint64(result)
|
||||
}
|
||||
|
||||
func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error {
|
||||
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
|
||||
tags := getTags(bdev)
|
||||
metrics, err := filepath.Glob(bdev + "/stats_total/*")
|
||||
if len(metrics) < 0 {
|
||||
@@ -104,7 +105,7 @@ func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bcache) Gather(acc inputs.Accumulator) error {
|
||||
func (b *Bcache) Gather(acc telegraf.Accumulator) error {
|
||||
bcacheDevsChecked := make(map[string]bool)
|
||||
var restrictDevs bool
|
||||
if len(b.BcacheDevs) != 0 {
|
||||
@@ -135,7 +136,7 @@ func (b *Bcache) Gather(acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("bcache", func() inputs.Input {
|
||||
inputs.Add("bcache", func() telegraf.Input {
|
||||
return &Bcache{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Disque struct {
|
||||
@@ -61,7 +62,7 @@ var ErrProtocolError = errors.New("disque protocol error")
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *Disque) Gather(acc inputs.Accumulator) error {
|
||||
func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
url := &url.URL{
|
||||
Host: ":7711",
|
||||
@@ -98,7 +99,7 @@ func (g *Disque) Gather(acc inputs.Accumulator) error {
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error {
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
if g.c == nil {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
@@ -198,7 +199,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("disque", func() inputs.Input {
|
||||
inputs.Add("disque", func() telegraf.Input {
|
||||
return &Disque{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
148
plugins/inputs/docker/README.md
Normal file
148
plugins/inputs/docker/README.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Docker Input Plugin
|
||||
|
||||
The docker plugin uses the docker remote API to gather metrics on running
|
||||
docker containers. You can read Docker's documentation for their remote API
|
||||
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
|
||||
|
||||
The docker plugin uses the excellent
|
||||
[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to
|
||||
gather stats. Documentation for the library can be found
|
||||
[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation
|
||||
for the stat structure can be found
|
||||
[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats)
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_cpu metric may appear multiple times per collection, based
|
||||
on the availability of per-cpu stats on your system.
|
||||
|
||||
- docker_mem
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
- total_inactive_file
|
||||
- pgpgout
|
||||
- rss
|
||||
- total_mapped_file
|
||||
- writeback
|
||||
- unevictable
|
||||
- pgpgin
|
||||
- total_unevictable
|
||||
- pgmajfault
|
||||
- total_rss
|
||||
- total_rss_huge
|
||||
- total_writeback
|
||||
- total_inactive_anon
|
||||
- rss_huge
|
||||
- hierarchical_memory_limit
|
||||
- total_pgfault
|
||||
- total_active_file
|
||||
- active_anon
|
||||
- total_active_anon
|
||||
- total_pgpgout
|
||||
- total_cache
|
||||
- inactive_anon
|
||||
- active_file
|
||||
- pgfault
|
||||
- inactive_file
|
||||
- total_pgpgin
|
||||
- max_usage
|
||||
- usage
|
||||
- failcnt
|
||||
- limit
|
||||
- docker_cpu
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
- usage_in_kernelmode
|
||||
- usage_in_usermode
|
||||
- usage_system
|
||||
- usage_total
|
||||
- docker_net
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
- tx_packets
|
||||
- tx_dropped
|
||||
- rx_packets
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- docker_blkio
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
- io_service_bytes_recursive_total
|
||||
- io_service_bytes_recursive_write
|
||||
- io_serviced_recursive_async
|
||||
- io_serviced_recursive_read
|
||||
- io_serviced_recursive_sync
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
|
||||
### Tags:
|
||||
|
||||
- All stats have the following tags:
|
||||
- cont_id (container ID)
|
||||
- cont_image (container image)
|
||||
- cont_name (container name)
|
||||
- docker_cpu specific:
|
||||
- cpu
|
||||
- docker_net specific:
|
||||
- network
|
||||
- docker_blkio specific:
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
```
|
||||
318
plugins/inputs/docker/docker.go
Normal file
318
plugins/inputs/docker/docker.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
|
||||
client *docker.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
`
|
||||
|
||||
func (d *Docker) Description() string {
|
||||
return "Read metrics about docker containers"
|
||||
}
|
||||
|
||||
func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.client == nil {
|
||||
var c *docker.Client
|
||||
var err error
|
||||
if d.Endpoint == "ENV" {
|
||||
c, err = docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if d.Endpoint == "" {
|
||||
c, err = docker.NewClient("unix:///var/run/docker.sock")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c, err = docker.NewClient(d.Endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
|
||||
opts := docker.ListContainersOptions{}
|
||||
containers, err := d.client.ListContainers(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
go func(c docker.APIContainers) {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}(container)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherContainer(
|
||||
container docker.APIContainers,
|
||||
acc telegraf.Accumulator,
|
||||
) error {
|
||||
// Parse container name
|
||||
cname := "unknown"
|
||||
if len(container.Names) > 0 {
|
||||
// Not sure what to do with other names, just take the first.
|
||||
cname = strings.TrimPrefix(container.Names[0], "/")
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cont_id": container.ID,
|
||||
"cont_name": cname,
|
||||
"cont_image": container.Image,
|
||||
}
|
||||
if len(d.ContainerNames) > 0 {
|
||||
if !sliceContains(cname, d.ContainerNames) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
statChan := make(chan *docker.Stats)
|
||||
done := make(chan bool)
|
||||
statOpts := docker.StatsOptions{
|
||||
Stream: false,
|
||||
ID: container.ID,
|
||||
Stats: statChan,
|
||||
Done: done,
|
||||
Timeout: time.Duration(time.Second * 5),
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := d.client.Stats(statOpts)
|
||||
if err != nil {
|
||||
log.Printf("Error getting docker stats: %s\n", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
stat := <-statChan
|
||||
close(done)
|
||||
|
||||
if stat == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add labels to tags
|
||||
for k, v := range container.Labels {
|
||||
tags[k] = v
|
||||
}
|
||||
|
||||
gatherContainerStats(stat, acc, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherContainerStats(
|
||||
stat *docker.Stats,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
) {
|
||||
now := stat.Read
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": stat.MemoryStats.MaxUsage,
|
||||
"usage": stat.MemoryStats.Usage,
|
||||
"fail_count": stat.MemoryStats.Failcnt,
|
||||
"limit": stat.MemoryStats.Limit,
|
||||
"total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault,
|
||||
"cache": stat.MemoryStats.Stats.Cache,
|
||||
"mapped_file": stat.MemoryStats.Stats.MappedFile,
|
||||
"total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile,
|
||||
"pgpgout": stat.MemoryStats.Stats.Pgpgout,
|
||||
"rss": stat.MemoryStats.Stats.Rss,
|
||||
"total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile,
|
||||
"writeback": stat.MemoryStats.Stats.Writeback,
|
||||
"unevictable": stat.MemoryStats.Stats.Unevictable,
|
||||
"pgpgin": stat.MemoryStats.Stats.Pgpgin,
|
||||
"total_unevictable": stat.MemoryStats.Stats.TotalUnevictable,
|
||||
"pgmajfault": stat.MemoryStats.Stats.Pgmajfault,
|
||||
"total_rss": stat.MemoryStats.Stats.TotalRss,
|
||||
"total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge,
|
||||
"total_writeback": stat.MemoryStats.Stats.TotalWriteback,
|
||||
"total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon,
|
||||
"rss_huge": stat.MemoryStats.Stats.RssHuge,
|
||||
"hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit,
|
||||
"total_pgfault": stat.MemoryStats.Stats.TotalPgfault,
|
||||
"total_active_file": stat.MemoryStats.Stats.TotalActiveFile,
|
||||
"active_anon": stat.MemoryStats.Stats.ActiveAnon,
|
||||
"total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon,
|
||||
"total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout,
|
||||
"total_cache": stat.MemoryStats.Stats.TotalCache,
|
||||
"inactive_anon": stat.MemoryStats.Stats.InactiveAnon,
|
||||
"active_file": stat.MemoryStats.Stats.ActiveFile,
|
||||
"pgfault": stat.MemoryStats.Stats.Pgfault,
|
||||
"inactive_file": stat.MemoryStats.Stats.InactiveFile,
|
||||
"total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin,
|
||||
}
|
||||
acc.AddFields("docker_mem", memfields, tags, now)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
|
||||
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
|
||||
"usage_system": stat.CPUStats.SystemCPUUsage,
|
||||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
}
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_cpu", cpufields, cputags, now)
|
||||
|
||||
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
||||
percputags := copyTags(tags)
|
||||
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
||||
acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
|
||||
}
|
||||
|
||||
for network, netstats := range stat.Networks {
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": netstats.RxDropped,
|
||||
"rx_bytes": netstats.RxBytes,
|
||||
"rx_errors": netstats.RxErrors,
|
||||
"tx_packets": netstats.TxPackets,
|
||||
"tx_dropped": netstats.TxDropped,
|
||||
"rx_packets": netstats.RxPackets,
|
||||
"tx_errors": netstats.TxErrors,
|
||||
"tx_bytes": netstats.TxBytes,
|
||||
}
|
||||
// Create a new network tag dictionary for the "network" tag
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = network
|
||||
acc.AddFields("docker_net", netfields, nettags, now)
|
||||
}
|
||||
|
||||
gatherBlockIOMetrics(stat, acc, tags, now)
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *docker.Stats,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
now time.Time,
|
||||
) {
|
||||
blkioStats := stat.BlkioStats
|
||||
// Make a map of devices to their block io stats
|
||||
deviceStatMap := make(map[string]map[string]interface{})
|
||||
|
||||
for _, metric := range blkioStats.IOServiceBytesRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOServicedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOQueueRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOServiceTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOWaitTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOMergedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.SectorsRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for device, fields := range deviceStatMap {
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = device
|
||||
acc.AddFields("docker_blkio", fields, iotags, now)
|
||||
}
|
||||
}
|
||||
|
||||
func copyTags(in map[string]string) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func sliceContains(in string, sl []string) bool {
|
||||
for _, str := range sl {
|
||||
if str == in {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{}
|
||||
})
|
||||
}
|
||||
190
plugins/inputs/docker/docker_test.go
Normal file
190
plugins/inputs/docker/docker_test.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := testStats()
|
||||
|
||||
tags := map[string]string{
|
||||
"cont_id": "foobarbaz",
|
||||
"cont_name": "redis",
|
||||
"cont_image": "redis/image",
|
||||
}
|
||||
gatherContainerStats(stats, &acc, tags)
|
||||
|
||||
// test docker_net measurement
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": uint64(1),
|
||||
"rx_bytes": uint64(2),
|
||||
"rx_errors": uint64(3),
|
||||
"tx_packets": uint64(4),
|
||||
"tx_dropped": uint64(1),
|
||||
"rx_packets": uint64(2),
|
||||
"tx_errors": uint64(3),
|
||||
"tx_bytes": uint64(4),
|
||||
}
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = "eth0"
|
||||
acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags)
|
||||
|
||||
// test docker_blkio measurement
|
||||
blkiotags := copyTags(tags)
|
||||
blkiotags["device"] = "6:0"
|
||||
blkiofields := map[string]interface{}{
|
||||
"io_service_bytes_recursive_read": uint64(100),
|
||||
"io_serviced_recursive_write": uint64(101),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags)
|
||||
|
||||
// test docker_mem measurement
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": uint64(1001),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(20),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_rss": uint64(44),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_writeback": uint64(55),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
|
||||
|
||||
// test docker_cpu measurement
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": uint64(500),
|
||||
"usage_in_usermode": uint64(100),
|
||||
"usage_in_kernelmode": uint64(200),
|
||||
"usage_system": uint64(100),
|
||||
"throttling_periods": uint64(1),
|
||||
"throttling_throttled_periods": uint64(0),
|
||||
"throttling_throttled_time": uint64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu0"
|
||||
cpu0fields := map[string]interface{}{
|
||||
"usage_total": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu1"
|
||||
cpu1fields := map[string]interface{}{
|
||||
"usage_total": uint64(1002),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
|
||||
}
|
||||
|
||||
func testStats() *docker.Stats {
|
||||
stats := &docker.Stats{
|
||||
Read: time.Now(),
|
||||
Networks: make(map[string]docker.NetworkStats),
|
||||
}
|
||||
|
||||
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
|
||||
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
||||
stats.CPUStats.CPUUsage.TotalUsage = 500
|
||||
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
||||
stats.CPUStats.SystemCPUUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.MemoryStats.Stats.TotalPgmafault = 0
|
||||
stats.MemoryStats.Stats.Cache = 0
|
||||
stats.MemoryStats.Stats.MappedFile = 0
|
||||
stats.MemoryStats.Stats.TotalInactiveFile = 0
|
||||
stats.MemoryStats.Stats.Pgpgout = 0
|
||||
stats.MemoryStats.Stats.Rss = 0
|
||||
stats.MemoryStats.Stats.TotalMappedFile = 0
|
||||
stats.MemoryStats.Stats.Writeback = 0
|
||||
stats.MemoryStats.Stats.Unevictable = 0
|
||||
stats.MemoryStats.Stats.Pgpgin = 0
|
||||
stats.MemoryStats.Stats.TotalUnevictable = 0
|
||||
stats.MemoryStats.Stats.Pgmajfault = 0
|
||||
stats.MemoryStats.Stats.TotalRss = 44
|
||||
stats.MemoryStats.Stats.TotalRssHuge = 444
|
||||
stats.MemoryStats.Stats.TotalWriteback = 55
|
||||
stats.MemoryStats.Stats.TotalInactiveAnon = 0
|
||||
stats.MemoryStats.Stats.RssHuge = 0
|
||||
stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0
|
||||
stats.MemoryStats.Stats.TotalPgfault = 0
|
||||
stats.MemoryStats.Stats.TotalActiveFile = 0
|
||||
stats.MemoryStats.Stats.ActiveAnon = 0
|
||||
stats.MemoryStats.Stats.TotalActiveAnon = 0
|
||||
stats.MemoryStats.Stats.TotalPgpgout = 0
|
||||
stats.MemoryStats.Stats.TotalCache = 0
|
||||
stats.MemoryStats.Stats.InactiveAnon = 0
|
||||
stats.MemoryStats.Stats.ActiveFile = 1
|
||||
stats.MemoryStats.Stats.Pgfault = 2
|
||||
stats.MemoryStats.Stats.InactiveFile = 3
|
||||
stats.MemoryStats.Stats.TotalPgpgin = 4
|
||||
|
||||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 20
|
||||
|
||||
stats.Networks["eth0"] = docker.NetworkStats{
|
||||
RxDropped: 1,
|
||||
RxBytes: 2,
|
||||
RxErrors: 3,
|
||||
TxPackets: 4,
|
||||
TxDropped: 1,
|
||||
RxPackets: 2,
|
||||
TxErrors: 3,
|
||||
TxBytes: 4,
|
||||
}
|
||||
|
||||
sbr := docker.BlkioStatsEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "read",
|
||||
Value: 100,
|
||||
}
|
||||
sr := docker.BlkioStatsEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "write",
|
||||
Value: 101,
|
||||
}
|
||||
|
||||
stats.BlkioStats.IOServiceBytesRecursive = append(
|
||||
stats.BlkioStats.IOServiceBytesRecursive, sbr)
|
||||
stats.BlkioStats.IOServicedRecursive = append(
|
||||
stats.BlkioStats.IOServicedRecursive, sr)
|
||||
|
||||
return stats
|
||||
}
|
||||
@@ -2,12 +2,16 @@ package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const statsPath = "/_nodes/stats"
|
||||
@@ -92,25 +96,45 @@ func (e *Elasticsearch) Description() string {
|
||||
|
||||
// Gather reads the stats from Elasticsearch and writes it to the
|
||||
// Accumulator.
|
||||
func (e *Elasticsearch) Gather(acc inputs.Accumulator) error {
|
||||
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
errChan := make(chan error, len(e.Servers))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
for _, serv := range e.Servers {
|
||||
var url string
|
||||
if e.Local {
|
||||
url = serv + statsPathLocal
|
||||
} else {
|
||||
url = serv + statsPath
|
||||
}
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", serv), acc)
|
||||
}
|
||||
go func(s string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
var url string
|
||||
if e.Local {
|
||||
url = s + statsPathLocal
|
||||
} else {
|
||||
url = s + statsPath
|
||||
}
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
||||
}
|
||||
}(serv, acc)
|
||||
}
|
||||
return nil
|
||||
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
// Get all errors and return them as one giant error
|
||||
errStrings := []string{}
|
||||
for err := range errChan {
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errStrings, "\n"))
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error {
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*node `json:"nodes"`
|
||||
@@ -155,7 +179,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc inputs.Accumulator) error {
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterHealth{}
|
||||
if err := e.gatherData(url, clusterStats); err != nil {
|
||||
return err
|
||||
@@ -220,7 +244,7 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("elasticsearch", func() inputs.Input {
|
||||
inputs.Add("elasticsearch", func() telegraf.Input {
|
||||
return NewElasticsearch()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -562,6 +562,9 @@ var indicesExpected = map[string]interface{}{
|
||||
}
|
||||
|
||||
var osExpected = map[string]interface{}{
|
||||
"load_average_0": float64(0.01),
|
||||
"load_average_1": float64(0.04),
|
||||
"load_average_2": float64(0.05),
|
||||
"swap_used_in_bytes": float64(0),
|
||||
"swap_free_in_bytes": float64(487997440),
|
||||
"timestamp": float64(1436460392944),
|
||||
@@ -724,10 +727,13 @@ var threadPoolExpected = map[string]interface{}{
|
||||
}
|
||||
|
||||
var fsExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392946),
|
||||
"total_free_in_bytes": float64(16909316096),
|
||||
"total_available_in_bytes": float64(15894814720),
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
"data_0_total_in_bytes": float64(19507089408),
|
||||
"data_0_free_in_bytes": float64(16909316096),
|
||||
"data_0_available_in_bytes": float64(15894814720),
|
||||
"timestamp": float64(1436460392946),
|
||||
"total_free_in_bytes": float64(16909316096),
|
||||
"total_available_in_bytes": float64(15894814720),
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var transportExpected = map[string]interface{}{
|
||||
|
||||
@@ -1,42 +1,82 @@
|
||||
# Exec Plugin
|
||||
# Exec Input Plugin
|
||||
|
||||
The exec plugin can execute arbitrary commands which output JSON. Then it flattens JSON and finds
|
||||
all numeric values, treating them as floats.
|
||||
The exec plugin can execute arbitrary commands which output JSON or
|
||||
InfluxDB [line-protocol](https://docs.influxdata.com/influxdb/v0.9/write_protocols/line/).
|
||||
|
||||
For example, if you have a json-returning command called mycollector, you could
|
||||
setup the exec plugin with:
|
||||
If using JSON, only numeric values are parsed and turned into floats. Booleans
|
||||
and strings will be ignored.
|
||||
|
||||
### Configuration
|
||||
|
||||
```
|
||||
[[exec.commands]]
|
||||
command = "/usr/bin/mycollector --output=json"
|
||||
name = "mycollector"
|
||||
interval = 10
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
|
||||
# Data format to consume. This can be "json" or "influx" (line-protocol)
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "json"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
```
|
||||
|
||||
The name is used as a prefix for the measurements.
|
||||
Other options for modifying the measurement names are:
|
||||
|
||||
The interval is used to determine how often a particular command should be run. Each
|
||||
time the exec plugin runs, it will only run a particular command if it has been at least
|
||||
`interval` seconds since the exec plugin last ran the command.
|
||||
```
|
||||
name_override = "measurement_name"
|
||||
name_prefix = "prefix_"
|
||||
```
|
||||
|
||||
### Example 1
|
||||
|
||||
# Sample
|
||||
Let's say that we have the above configuration, and mycollector outputs the
|
||||
following JSON:
|
||||
|
||||
Let's say that we have a command named "mycollector", which gives the following output:
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
"c": 0.1,
|
||||
"d": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
The collected metrics will be stored as fields under the measurement
|
||||
"exec_mycollector":
|
||||
|
||||
```
|
||||
exec_mycollector_a value=0.5
|
||||
exec_mycollector_b_d value=0.1
|
||||
exec_mycollector_b_e value=5
|
||||
exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567
|
||||
```
|
||||
|
||||
### Example 2
|
||||
|
||||
Now let's say we have the following configuration:
|
||||
|
||||
```
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/line_protocol_collector"
|
||||
|
||||
# Data format to consume. This can be "json" or "influx" (line-protocol)
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
And line_protocol_collector outputs the following line protocol:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
```
|
||||
|
||||
You will get data in InfluxDB exactly as it is defined above,
|
||||
tags are cpu=cpuN, host=foo, and datacenter=us-east with fields usage_idle
|
||||
and usage_busy. They will receive a timestamp at collection time.
|
||||
|
||||
@@ -5,23 +5,30 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
|
||||
# Data format to consume. This can be "json" or "influx" (line-protocol)
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "json"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
`
|
||||
|
||||
type Exec struct {
|
||||
Command string
|
||||
Command string
|
||||
DataFormat string
|
||||
|
||||
runner Runner
|
||||
}
|
||||
@@ -61,31 +68,43 @@ func (e *Exec) Description() string {
|
||||
return "Read flattened metrics from one or more commands that output JSON to stdout"
|
||||
}
|
||||
|
||||
func (e *Exec) Gather(acc inputs.Accumulator) error {
|
||||
func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
out, err := e.runner.Run(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var jsonOut interface{}
|
||||
err = json.Unmarshal(out, &jsonOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
|
||||
e.Command, err)
|
||||
}
|
||||
switch e.DataFormat {
|
||||
case "", "json":
|
||||
var jsonOut interface{}
|
||||
err = json.Unmarshal(out, &jsonOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
|
||||
e.Command, err)
|
||||
}
|
||||
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("exec", f.Fields, nil)
|
||||
case "influx":
|
||||
now := time.Now()
|
||||
metrics, err := telegraf.ParseMetrics(out)
|
||||
for _, metric := range metrics {
|
||||
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), now)
|
||||
}
|
||||
return err
|
||||
default:
|
||||
return fmt.Errorf("Unsupported data format: %s. Must be either json "+
|
||||
"or influx.", e.DataFormat)
|
||||
}
|
||||
|
||||
acc.AddFields("exec", f.Fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("exec", func() inputs.Input {
|
||||
inputs.Add("exec", func() telegraf.Input {
|
||||
return NewExec()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -31,6 +31,18 @@ const malformedJson = `
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1"
|
||||
|
||||
const lineProtocolMulti = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
type runnerMock struct {
|
||||
out []byte
|
||||
err error
|
||||
@@ -59,13 +71,17 @@ func TestExec(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 4, "non-numeric measurements should be ignored")
|
||||
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"num_processes": float64(82),
|
||||
"cpu_used": float64(8234),
|
||||
"cpu_free": float64(32),
|
||||
"percent": float64(0.81),
|
||||
"users_0": float64(0),
|
||||
"users_1": float64(1),
|
||||
"users_2": float64(2),
|
||||
"users_3": float64(3),
|
||||
}
|
||||
acc.AssertContainsFields(t, "exec", fields)
|
||||
}
|
||||
@@ -93,3 +109,64 @@ func TestCommandError(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestLineProtocolParse(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocol), nil),
|
||||
Command: "line-protocol",
|
||||
DataFormat: "influx",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
|
||||
func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolMulti), nil),
|
||||
Command: "line-protocol",
|
||||
DataFormat: "influx",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"}
|
||||
|
||||
for _, cpu := range cpuTags {
|
||||
tags["cpu"] = cpu
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidDataFormat(t *testing.T) {
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocol), nil),
|
||||
Command: "bad data format",
|
||||
DataFormat: "FooBar",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
369
plugins/inputs/github_webhooks/README.md
Normal file
369
plugins/inputs/github_webhooks/README.md
Normal file
@@ -0,0 +1,369 @@
|
||||
# github_webhooks
|
||||
|
||||
This is a Telegraf service plugin that listens for events kicked off by Github's Webhooks service and persists data from them into configured outputs. To set up the listener first generate the proper configuration:
|
||||
```sh
|
||||
$ telegraf -sample-config -input-filter github_webhooks -output-filter influxdb > config.conf.new
|
||||
```
|
||||
Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete:
|
||||
```sh
|
||||
$ cp config.conf.new /etc/telegraf/telegraf.conf
|
||||
$ sudo service telegraf start
|
||||
```
|
||||
Once the server is running you should configure your Organization's Webhooks to point at the `github_webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://<my_ip>:1618`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me <b>everything</b>'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
|
||||
|
||||
## Events
|
||||
|
||||
The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows:
|
||||
```
|
||||
# TAGS
|
||||
* 'tagKey' = `tagValue` type
|
||||
# FIELDS
|
||||
* 'fieldKey' = `fieldValue` type
|
||||
```
|
||||
The tag values and field values show the place on the incoming JSON object where the data is sourced from.
|
||||
|
||||
#### [`commit_comment` event](https://developer.github.com/v3/activity/events/types/#commitcommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.comment.commit_id` string
|
||||
* 'comment' = `event.comment.body` string
|
||||
|
||||
#### [`create` event](https://developer.github.com/v3/activity/events/types/#createevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'issues' = `event.ref_type` string
|
||||
|
||||
#### [`delete` event](https://developer.github.com/v3/activity/events/types/#deleteevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'issues' = `event.ref_type` string
|
||||
|
||||
#### [`deployment` event](https://developer.github.com/v3/activity/events/types/#deploymentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.deployment.sha` string
|
||||
* 'task' = `event.deployment.task` string
|
||||
* 'environment' = `event.deployment.evnironment` string
|
||||
* 'description' = `event.deployment.description` string
|
||||
|
||||
#### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.deployment.sha` string
|
||||
* 'task' = `event.deployment.task` string
|
||||
* 'environment' = `event.deployment.evnironment` string
|
||||
* 'description' = `event.deployment.description` string
|
||||
* 'depState' = `event.deployment_status.state` string
|
||||
* 'depDescription' = `event.deployment_status.description` string
|
||||
|
||||
#### [`fork` event](https://developer.github.com/v3/activity/events/types/#forkevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'forkee' = `event.forkee.repository` string
|
||||
|
||||
#### [`gollum` event](https://developer.github.com/v3/activity/events/types/#gollumevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`issue_comment` event](https://developer.github.com/v3/activity/events/types/#issuecommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'issue' = `event.issue.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'title' = `event.issue.title` string
|
||||
* 'comments' = `event.issue.comments` int
|
||||
* 'body' = `event.comment.body` string
|
||||
|
||||
#### [`issues` event](https://developer.github.com/v3/activity/events/types/#issuesevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'issue' = `event.issue.number` int
|
||||
* 'action' = `event.action` string
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'title' = `event.issue.title` string
|
||||
* 'comments' = `event.issue.comments` int
|
||||
|
||||
#### [`member` event](https://developer.github.com/v3/activity/events/types/#memberevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'newMember' = `event.sender.login` string
|
||||
* 'newMemberStatus' = `event.sender.site_admin` bool
|
||||
|
||||
#### [`membership` event](https://developer.github.com/v3/activity/events/types/#membershipevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'action' = `event.action` string
|
||||
|
||||
**Fields:**
|
||||
* 'newMember' = `event.sender.login` string
|
||||
* 'newMemberStatus' = `event.sender.site_admin` bool
|
||||
|
||||
#### [`page_build` event](https://developer.github.com/v3/activity/events/types/#pagebuildevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`public` event](https://developer.github.com/v3/activity/events/types/#publicevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`pull_request_review_comment` event](https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'action' = `event.action` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'prNumber' = `event.pull_request.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'state' = `event.pull_request.state` string
|
||||
* 'title' = `event.pull_request.title` string
|
||||
* 'comments' = `event.pull_request.comments` int
|
||||
* 'commits' = `event.pull_request.commits` int
|
||||
* 'additions' = `event.pull_request.additions` int
|
||||
* 'deletions' = `event.pull_request.deletions` int
|
||||
* 'changedFiles' = `event.pull_request.changed_files` int
|
||||
* 'commentFile' = `event.comment.file` string
|
||||
* 'comment' = `event.comment.body` string
|
||||
|
||||
#### [`pull_request` event](https://developer.github.com/v3/activity/events/types/#pullrequestevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'action' = `event.action` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'prNumber' = `event.pull_request.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'state' = `event.pull_request.state` string
|
||||
* 'title' = `event.pull_request.title` string
|
||||
* 'comments' = `event.pull_request.comments` int
|
||||
* 'commits' = `event.pull_request.commits` int
|
||||
* 'additions' = `event.pull_request.additions` int
|
||||
* 'deletions' = `event.pull_request.deletions` int
|
||||
* 'changedFiles' = `event.pull_request.changed_files` int
|
||||
|
||||
#### [`push` event](https://developer.github.com/v3/activity/events/types/#pushevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'before' = `event.before` string
|
||||
* 'after' = `event.after` string
|
||||
|
||||
#### [`repository` event](https://developer.github.com/v3/activity/events/types/#repositoryevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`release` event](https://developer.github.com/v3/activity/events/types/#releaseevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'tagName' = `event.release.tag_name` string
|
||||
|
||||
#### [`status` event](https://developer.github.com/v3/activity/events/types/#statusevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.sha` string
|
||||
* 'state' = `event.state` string
|
||||
|
||||
#### [`team_add` event](https://developer.github.com/v3/activity/events/types/#teamaddevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'teamName' = `event.team.name` string
|
||||
|
||||
#### [`watch` event](https://developer.github.com/v3/activity/events/types/#watchevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
335
plugins/inputs/github_webhooks/github_webhooks.go
Normal file
335
plugins/inputs/github_webhooks/github_webhooks.go
Normal file
@@ -0,0 +1,335 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("github_webhooks", func() telegraf.Input { return &GithubWebhooks{} })
|
||||
}
|
||||
|
||||
type GithubWebhooks struct {
|
||||
ServiceAddress string
|
||||
// Lock for the struct
|
||||
sync.Mutex
|
||||
// Events buffer to store events between Gather calls
|
||||
events []Event
|
||||
}
|
||||
|
||||
func NewGithubWebhooks() *GithubWebhooks {
|
||||
return &GithubWebhooks{}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) SampleConfig() string {
|
||||
return `
|
||||
# Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
`
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Description() string {
|
||||
return "A Github Webhook Event collector"
|
||||
}
|
||||
|
||||
// Writes the points from <-gh.in to the Accumulator
|
||||
func (gh *GithubWebhooks) Gather(acc telegraf.Accumulator) error {
|
||||
gh.Lock()
|
||||
defer gh.Unlock()
|
||||
for _, event := range gh.events {
|
||||
p := event.NewMetric()
|
||||
acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
|
||||
}
|
||||
gh.events = make([]Event, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Listen() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", gh.eventHandler).Methods("POST")
|
||||
err := http.ListenAndServe(fmt.Sprintf("%s", gh.ServiceAddress), r)
|
||||
if err != nil {
|
||||
log.Printf("Error starting server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Start() error {
|
||||
go gh.Listen()
|
||||
log.Printf("Started the github_webhooks service on %s\n", gh.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Stop() {
|
||||
log.Println("Stopping the ghWebhooks service")
|
||||
}
|
||||
|
||||
// Handles the / route
|
||||
func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
eventType := r.Header["X-Github-Event"][0]
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
e, err := NewEvent(data, eventType)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
gh.Lock()
|
||||
gh.events = append(gh.events, e)
|
||||
gh.Unlock()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func newCommitComment(data []byte) (Event, error) {
|
||||
commitCommentStruct := CommitCommentEvent{}
|
||||
err := json.Unmarshal(data, &commitCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return commitCommentStruct, nil
|
||||
}
|
||||
|
||||
func newCreate(data []byte) (Event, error) {
|
||||
createStruct := CreateEvent{}
|
||||
err := json.Unmarshal(data, &createStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return createStruct, nil
|
||||
}
|
||||
|
||||
func newDelete(data []byte) (Event, error) {
|
||||
deleteStruct := DeleteEvent{}
|
||||
err := json.Unmarshal(data, &deleteStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deleteStruct, nil
|
||||
}
|
||||
|
||||
func newDeployment(data []byte) (Event, error) {
|
||||
deploymentStruct := DeploymentEvent{}
|
||||
err := json.Unmarshal(data, &deploymentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deploymentStruct, nil
|
||||
}
|
||||
|
||||
func newDeploymentStatus(data []byte) (Event, error) {
|
||||
deploymentStatusStruct := DeploymentStatusEvent{}
|
||||
err := json.Unmarshal(data, &deploymentStatusStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deploymentStatusStruct, nil
|
||||
}
|
||||
|
||||
func newFork(data []byte) (Event, error) {
|
||||
forkStruct := ForkEvent{}
|
||||
err := json.Unmarshal(data, &forkStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return forkStruct, nil
|
||||
}
|
||||
|
||||
func newGollum(data []byte) (Event, error) {
|
||||
gollumStruct := GollumEvent{}
|
||||
err := json.Unmarshal(data, &gollumStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gollumStruct, nil
|
||||
}
|
||||
|
||||
func newIssueComment(data []byte) (Event, error) {
|
||||
issueCommentStruct := IssueCommentEvent{}
|
||||
err := json.Unmarshal(data, &issueCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return issueCommentStruct, nil
|
||||
}
|
||||
|
||||
func newIssues(data []byte) (Event, error) {
|
||||
issuesStruct := IssuesEvent{}
|
||||
err := json.Unmarshal(data, &issuesStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return issuesStruct, nil
|
||||
}
|
||||
|
||||
func newMember(data []byte) (Event, error) {
|
||||
memberStruct := MemberEvent{}
|
||||
err := json.Unmarshal(data, &memberStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return memberStruct, nil
|
||||
}
|
||||
|
||||
func newMembership(data []byte) (Event, error) {
|
||||
membershipStruct := MembershipEvent{}
|
||||
err := json.Unmarshal(data, &membershipStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return membershipStruct, nil
|
||||
}
|
||||
|
||||
func newPageBuild(data []byte) (Event, error) {
|
||||
pageBuildEvent := PageBuildEvent{}
|
||||
err := json.Unmarshal(data, &pageBuildEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pageBuildEvent, nil
|
||||
}
|
||||
|
||||
func newPublic(data []byte) (Event, error) {
|
||||
publicEvent := PublicEvent{}
|
||||
err := json.Unmarshal(data, &publicEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return publicEvent, nil
|
||||
}
|
||||
|
||||
func newPullRequest(data []byte) (Event, error) {
|
||||
pullRequestStruct := PullRequestEvent{}
|
||||
err := json.Unmarshal(data, &pullRequestStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pullRequestStruct, nil
|
||||
}
|
||||
|
||||
func newPullRequestReviewComment(data []byte) (Event, error) {
|
||||
pullRequestReviewCommentStruct := PullRequestReviewCommentEvent{}
|
||||
err := json.Unmarshal(data, &pullRequestReviewCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pullRequestReviewCommentStruct, nil
|
||||
}
|
||||
|
||||
func newPush(data []byte) (Event, error) {
|
||||
pushStruct := PushEvent{}
|
||||
err := json.Unmarshal(data, &pushStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pushStruct, nil
|
||||
}
|
||||
|
||||
func newRelease(data []byte) (Event, error) {
|
||||
releaseStruct := ReleaseEvent{}
|
||||
err := json.Unmarshal(data, &releaseStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return releaseStruct, nil
|
||||
}
|
||||
|
||||
func newRepository(data []byte) (Event, error) {
|
||||
repositoryStruct := RepositoryEvent{}
|
||||
err := json.Unmarshal(data, &repositoryStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return repositoryStruct, nil
|
||||
}
|
||||
|
||||
func newStatus(data []byte) (Event, error) {
|
||||
statusStruct := StatusEvent{}
|
||||
err := json.Unmarshal(data, &statusStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statusStruct, nil
|
||||
}
|
||||
|
||||
func newTeamAdd(data []byte) (Event, error) {
|
||||
teamAddStruct := TeamAddEvent{}
|
||||
err := json.Unmarshal(data, &teamAddStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return teamAddStruct, nil
|
||||
}
|
||||
|
||||
func newWatch(data []byte) (Event, error) {
|
||||
watchStruct := WatchEvent{}
|
||||
err := json.Unmarshal(data, &watchStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watchStruct, nil
|
||||
}
|
||||
|
||||
type newEventError struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func (e *newEventError) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
func NewEvent(r []byte, t string) (Event, error) {
|
||||
log.Printf("New %v event recieved", t)
|
||||
switch t {
|
||||
case "commit_comment":
|
||||
return newCommitComment(r)
|
||||
case "create":
|
||||
return newCreate(r)
|
||||
case "delete":
|
||||
return newDelete(r)
|
||||
case "deployment":
|
||||
return newDeployment(r)
|
||||
case "deployment_status":
|
||||
return newDeploymentStatus(r)
|
||||
case "fork":
|
||||
return newFork(r)
|
||||
case "gollum":
|
||||
return newGollum(r)
|
||||
case "issue_comment":
|
||||
return newIssueComment(r)
|
||||
case "issues":
|
||||
return newIssues(r)
|
||||
case "member":
|
||||
return newMember(r)
|
||||
case "membership":
|
||||
return newMembership(r)
|
||||
case "page_build":
|
||||
return newPageBuild(r)
|
||||
case "public":
|
||||
return newPublic(r)
|
||||
case "pull_request":
|
||||
return newPullRequest(r)
|
||||
case "pull_request_review_comment":
|
||||
return newPullRequestReviewComment(r)
|
||||
case "push":
|
||||
return newPush(r)
|
||||
case "release":
|
||||
return newRelease(r)
|
||||
case "repository":
|
||||
return newRepository(r)
|
||||
case "status":
|
||||
return newStatus(r)
|
||||
case "team_add":
|
||||
return newTeamAdd(r)
|
||||
case "watch":
|
||||
return newWatch(r)
|
||||
}
|
||||
return nil, &newEventError{"Not a recgonized event type"}
|
||||
}
|
||||
3559
plugins/inputs/github_webhooks/github_webhooks_mock_json.go
Normal file
3559
plugins/inputs/github_webhooks/github_webhooks_mock_json.go
Normal file
File diff suppressed because it is too large
Load Diff
711
plugins/inputs/github_webhooks/github_webhooks_models.go
Normal file
711
plugins/inputs/github_webhooks/github_webhooks_models.go
Normal file
@@ -0,0 +1,711 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const meas = "github_webhooks"
|
||||
|
||||
type Event interface {
|
||||
NewMetric() telegraf.Metric
|
||||
}
|
||||
|
||||
type Repository struct {
|
||||
Repository string `json:"full_name"`
|
||||
Private bool `json:"private"`
|
||||
Stars int `json:"stargazers_count"`
|
||||
Forks int `json:"forks_count"`
|
||||
Issues int `json:"open_issues_count"`
|
||||
}
|
||||
|
||||
type Sender struct {
|
||||
User string `json:"login"`
|
||||
Admin bool `json:"site_admin"`
|
||||
}
|
||||
|
||||
type CommitComment struct {
|
||||
Commit string `json:"commit_id"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
type Deployment struct {
|
||||
Commit string `json:"sha"`
|
||||
Task string `json:"task"`
|
||||
Environment string `json:"environment"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type Page struct {
|
||||
Name string `json:"page_name"`
|
||||
Title string `json:"title"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type Issue struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
Comments int `json:"comments"`
|
||||
}
|
||||
|
||||
type IssueComment struct {
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
type Team struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type PullRequest struct {
|
||||
Number int `json:"number"`
|
||||
State string `json:"state"`
|
||||
Title string `json:"title"`
|
||||
Comments int `json:"comments"`
|
||||
Commits int `json:"commits"`
|
||||
Additions int `json:"additions"`
|
||||
Deletions int `json:"deletions"`
|
||||
ChangedFiles int `json:"changed_files"`
|
||||
}
|
||||
|
||||
type PullRequestReviewComment struct {
|
||||
File string `json:"path"`
|
||||
Comment string `json:"body"`
|
||||
}
|
||||
|
||||
type Release struct {
|
||||
TagName string `json:"tag_name"`
|
||||
}
|
||||
|
||||
type DeploymentStatus struct {
|
||||
State string `json:"state"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CommitCommentEvent struct {
|
||||
Comment CommitComment `json:"comment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CommitCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "commit_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Comment.Commit,
|
||||
"comment": s.Comment.Body,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type CreateEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
RefType string `json:"ref_type"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CreateEvent) NewMetric() telegraf.Metric {
|
||||
event := "create"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type DeleteEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
RefType string `json:"ref_type"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeleteEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type DeploymentEvent struct {
|
||||
Deployment Deployment `json:"deployment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentEvent) NewMetric() telegraf.Metric {
|
||||
event := "deployment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Deployment.Commit,
|
||||
"task": s.Deployment.Task,
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type DeploymentStatusEvent struct {
|
||||
Deployment Deployment `json:"deployment"`
|
||||
DeploymentStatus DeploymentStatus `json:"deployment_status"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentStatusEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Deployment.Commit,
|
||||
"task": s.Deployment.Task,
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
"depState": s.DeploymentStatus.State,
|
||||
"depDescription": s.DeploymentStatus.Description,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type ForkEvent struct {
|
||||
Forkee Repository `json:"forkee"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ForkEvent) NewMetric() telegraf.Metric {
|
||||
event := "fork"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"fork": s.Forkee.Repository,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type GollumEvent struct {
|
||||
Pages []Page `json:"pages"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
// REVIEW: Going to be lazy and not deal with the pages.
|
||||
func (s GollumEvent) NewMetric() telegraf.Metric {
|
||||
event := "gollum"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type IssueCommentEvent struct {
|
||||
Issue Issue `json:"issue"`
|
||||
Comment IssueComment `json:"comment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssueCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "issue_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"issue": fmt.Sprintf("%v", s.Issue.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
"body": s.Comment.Body,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type IssuesEvent struct {
|
||||
Action string `json:"action"`
|
||||
Issue Issue `json:"issue"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssuesEvent) NewMetric() telegraf.Metric {
|
||||
event := "issue"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"issue": fmt.Sprintf("%v", s.Issue.Number),
|
||||
"action": s.Action,
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type MemberEvent struct {
|
||||
Member Sender `json:"member"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s MemberEvent) NewMetric() telegraf.Metric {
|
||||
event := "member"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type MembershipEvent struct {
|
||||
Action string `json:"action"`
|
||||
Member Sender `json:"member"`
|
||||
Sender Sender `json:"sender"`
|
||||
Team Team `json:"team"`
|
||||
}
|
||||
|
||||
func (s MembershipEvent) NewMetric() telegraf.Metric {
|
||||
event := "membership"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"action": s.Action,
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PageBuildEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PageBuildEvent) NewMetric() telegraf.Metric {
|
||||
event := "page_build"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PublicEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PublicEvent) NewMetric() telegraf.Metric {
|
||||
event := "public"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PullRequestEvent struct {
|
||||
Action string `json:"action"`
|
||||
PullRequest PullRequest `json:"pull_request"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestEvent) NewMetric() telegraf.Metric {
|
||||
event := "pull_request"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"action": s.Action,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"state": s.PullRequest.State,
|
||||
"title": s.PullRequest.Title,
|
||||
"comments": s.PullRequest.Comments,
|
||||
"commits": s.PullRequest.Commits,
|
||||
"additions": s.PullRequest.Additions,
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PullRequestReviewCommentEvent struct {
|
||||
Comment PullRequestReviewComment `json:"comment"`
|
||||
PullRequest PullRequest `json:"pull_request"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "pull_request_review_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"state": s.PullRequest.State,
|
||||
"title": s.PullRequest.Title,
|
||||
"comments": s.PullRequest.Comments,
|
||||
"commits": s.PullRequest.Commits,
|
||||
"additions": s.PullRequest.Additions,
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
"commentFile": s.Comment.File,
|
||||
"comment": s.Comment.Comment,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PushEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
Before string `json:"before"`
|
||||
After string `json:"after"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PushEvent) NewMetric() telegraf.Metric {
|
||||
event := "push"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"before": s.Before,
|
||||
"after": s.After,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type ReleaseEvent struct {
|
||||
Release Release `json:"release"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ReleaseEvent) NewMetric() telegraf.Metric {
|
||||
event := "release"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"tagName": s.Release.TagName,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type RepositoryEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s RepositoryEvent) NewMetric() telegraf.Metric {
|
||||
event := "repository"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type StatusEvent struct {
|
||||
Commit string `json:"sha"`
|
||||
State string `json:"state"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s StatusEvent) NewMetric() telegraf.Metric {
|
||||
event := "status"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Commit,
|
||||
"state": s.State,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type TeamAddEvent struct {
|
||||
Team Team `json:"team"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s TeamAddEvent) NewMetric() telegraf.Metric {
|
||||
event := "team_add"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"teamName": s.Team.Name,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type WatchEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s WatchEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
237
plugins/inputs/github_webhooks/github_webhooks_test.go
Normal file
237
plugins/inputs/github_webhooks/github_webhooks_test.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCommitCommentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := CommitCommentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "commit_comment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := DeleteEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "delete")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := DeploymentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "deployment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentStatusEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := DeploymentStatusEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "deployment_status")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := ForkEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "fork")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGollumEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := GollumEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "gollum")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssueCommentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := IssueCommentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "issue_comment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssuesEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := IssuesEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "issues")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := MemberEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "member")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMembershipEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := MembershipEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "membership")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPageBuildEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PageBuildEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "page_build")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublicEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PublicEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "public")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullRequestReviewCommentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PullRequestReviewCommentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "pull_request_review_comment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPushEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PushEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "push")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReleaseEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := ReleaseEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "release")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepositoryEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := RepositoryEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "repository")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
|
||||
jsonString := StatusEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "status")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTeamAddEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := TeamAddEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "team_add")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := WatchEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "watch")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,8 @@ package haproxy
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -104,7 +105,7 @@ func (r *haproxy) Description() string {
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *haproxy) Gather(acc inputs.Accumulator) error {
|
||||
func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
return g.gatherServer("http://127.0.0.1:1936", acc)
|
||||
}
|
||||
@@ -126,7 +127,7 @@ func (g *haproxy) Gather(acc inputs.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error {
|
||||
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
if g.client == nil {
|
||||
|
||||
client := &http.Client{}
|
||||
@@ -156,7 +157,7 @@ func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error {
|
||||
return importCsvResult(res.Body, acc, u.Host)
|
||||
}
|
||||
|
||||
func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error {
|
||||
func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
|
||||
csv := csv.NewReader(r)
|
||||
result, err := csv.ReadAll()
|
||||
now := time.Now()
|
||||
@@ -358,7 +359,7 @@ func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("haproxy", func() inputs.Input {
|
||||
inputs.Add("haproxy", func() telegraf.Input {
|
||||
return &haproxy{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
|
||||
@@ -45,6 +45,16 @@ You can also specify additional request parameters for the service:
|
||||
|
||||
```
|
||||
|
||||
You can also specify additional request header parameters for the service:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
...
|
||||
|
||||
[httpjson.services.headers]
|
||||
X-Auth-Token = "my-xauth-token"
|
||||
apiVersion = "v1"
|
||||
```
|
||||
|
||||
# Example:
|
||||
|
||||
|
||||
@@ -9,9 +9,11 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type HttpJson struct {
|
||||
@@ -20,6 +22,7 @@ type HttpJson struct {
|
||||
Method string
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
@@ -44,6 +47,9 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
# will be ignored.
|
||||
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
@@ -66,6 +72,12 @@ var sampleConfig = `
|
||||
[inputs.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
# HTTP Header parameters (all values must be strings)
|
||||
# [inputs.httpjson.headers]
|
||||
# X-Auth-Token = "my-xauth-token"
|
||||
# apiVersion = "v1"
|
||||
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
@@ -77,7 +89,7 @@ func (h *HttpJson) Description() string {
|
||||
}
|
||||
|
||||
// Gathers data for all servers.
|
||||
func (h *HttpJson) Gather(acc inputs.Accumulator) error {
|
||||
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
@@ -116,10 +128,11 @@ func (h *HttpJson) Gather(acc inputs.Accumulator) error {
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (h *HttpJson) gatherServer(
|
||||
acc inputs.Accumulator,
|
||||
acc telegraf.Accumulator,
|
||||
serverURL string,
|
||||
) error {
|
||||
resp, err := h.sendRequest(serverURL)
|
||||
resp, responseTime, err := h.sendRequest(serverURL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,6 +154,9 @@ func (h *HttpJson) gatherServer(
|
||||
delete(jsonOut, tag)
|
||||
}
|
||||
|
||||
if responseTime >= 0 {
|
||||
jsonOut["response_time"] = responseTime
|
||||
}
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
@@ -164,11 +180,11 @@ func (h *HttpJson) gatherServer(
|
||||
// Returns:
|
||||
// string: body of the response
|
||||
// error : Any error that may have occurred
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, error) {
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
// Prepare URL
|
||||
requestURL, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
@@ -180,19 +196,30 @@ func (h *HttpJson) sendRequest(serverURL string) (string, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
// Add header parameters
|
||||
for k, v := range h.Headers {
|
||||
if strings.ToLower(k) == "host" {
|
||||
req.Host = v
|
||||
} else {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
resp, err := h.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", -1, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
defer resp.Body.Close()
|
||||
responseTime := time.Since(start).Seconds()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return string(body), err
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
// Process response
|
||||
@@ -203,14 +230,14 @@ func (h *HttpJson) sendRequest(serverURL string) (string, error) {
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return string(body), err
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
return string(body), err
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("httpjson", func() inputs.Input {
|
||||
inputs.Add("httpjson", func() telegraf.Input {
|
||||
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -14,17 +14,17 @@ import (
|
||||
const validJSON = `
|
||||
{
|
||||
"parent": {
|
||||
"child": 3,
|
||||
"child": 3.0,
|
||||
"ignored_child": "hi"
|
||||
},
|
||||
"ignored_null": null,
|
||||
"integer": 4,
|
||||
"ignored_list": [3, 4],
|
||||
"list": [3, 4],
|
||||
"ignored_parent": {
|
||||
"another_ignored_list": [4],
|
||||
"another_ignored_null": null,
|
||||
"ignored_string": "hello, world!"
|
||||
}
|
||||
},
|
||||
"another_list": [4]
|
||||
}`
|
||||
|
||||
const validJSONTags = `
|
||||
@@ -35,8 +35,11 @@ const validJSONTags = `
|
||||
}`
|
||||
|
||||
var expectedFields = map[string]interface{}{
|
||||
"parent_child": float64(3),
|
||||
"integer": float64(4),
|
||||
"parent_child": float64(3),
|
||||
"list_0": float64(3),
|
||||
"list_1": float64(4),
|
||||
"another_list_0": float64(4),
|
||||
"integer": float64(4),
|
||||
}
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
@@ -94,6 +97,10 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"X-Auth-Token": "the-first-parameter",
|
||||
"apiVersion": "v1",
|
||||
},
|
||||
},
|
||||
&HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
@@ -107,6 +114,10 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"X-Auth-Token": "the-first-parameter",
|
||||
"apiVersion": "v1",
|
||||
},
|
||||
TagKeys: []string{
|
||||
"role",
|
||||
"build",
|
||||
@@ -123,10 +134,16 @@ func TestHttpJson200(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 4, acc.NFields())
|
||||
assert.Equal(t, 12, acc.NFields())
|
||||
// Set responsetime
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv}
|
||||
mname := "httpjson_" + service.Name
|
||||
expectedFields["response_time"] = 1.0
|
||||
acc.AssertContainsTaggedFields(t, mname, expectedFields, tags)
|
||||
}
|
||||
}
|
||||
@@ -185,11 +202,15 @@ func TestHttpJson200Tags(t *testing.T) {
|
||||
if service.Name == "other_webapp" {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
// Set responsetime
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, acc.NFields())
|
||||
assert.Equal(t, 4, acc.NFields())
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv, "role": "master", "build": "123"}
|
||||
fields := map[string]interface{}{"value": float64(15)}
|
||||
fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)}
|
||||
mname := "httpjson_" + service.Name
|
||||
acc.AssertContainsTaggedFields(t, mname, fields, tags)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
@@ -32,7 +33,7 @@ func (*InfluxDB) SampleConfig() string {
|
||||
`
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Gather(acc inputs.Accumulator) error {
|
||||
func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
||||
errorChannel := make(chan error, len(i.URLs))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -77,7 +78,7 @@ type point struct {
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (i *InfluxDB) gatherURL(
|
||||
acc inputs.Accumulator,
|
||||
acc telegraf.Accumulator,
|
||||
url string,
|
||||
) error {
|
||||
resp, err := http.Get(url)
|
||||
@@ -130,7 +131,7 @@ func (i *InfluxDB) gatherURL(
|
||||
p.Tags["url"] = url
|
||||
|
||||
acc.AddFields(
|
||||
p.Name,
|
||||
"influxdb_"+p.Name,
|
||||
p.Values,
|
||||
p.Tags,
|
||||
)
|
||||
@@ -140,7 +141,7 @@ func (i *InfluxDB) gatherURL(
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("influxdb", func() inputs.Input {
|
||||
inputs.Add("influxdb", func() telegraf.Input {
|
||||
return &InfluxDB{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs/influxdb"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestBasic(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Points, 2)
|
||||
require.Len(t, acc.Metrics, 2)
|
||||
fields := map[string]interface{}{
|
||||
// JSON will truncate floats to integer representations.
|
||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
||||
@@ -84,7 +84,7 @@ func TestBasic(t *testing.T) {
|
||||
"id": "ex1",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "foo", fields, tags)
|
||||
acc.AssertContainsTaggedFields(t, "influxdb_foo", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"x": "x",
|
||||
@@ -93,5 +93,5 @@ func TestBasic(t *testing.T) {
|
||||
"id": "ex2",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "bar", fields, tags)
|
||||
acc.AssertContainsTaggedFields(t, "influxdb_bar", fields, tags)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
@@ -108,7 +109,7 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
return jsonOut, nil
|
||||
}
|
||||
|
||||
func (j *Jolokia) Gather(acc inputs.Accumulator) error {
|
||||
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||
context := j.Context //"/jolokia/read"
|
||||
servers := j.Servers
|
||||
metrics := j.Metrics
|
||||
@@ -157,7 +158,7 @@ func (j *Jolokia) Gather(acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("jolokia", func() inputs.Input {
|
||||
inputs.Add("jolokia", func() telegraf.Input {
|
||||
return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
_ "github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -85,7 +85,7 @@ func TestHttpJsonMultiValue(t *testing.T) {
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Points))
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"heap_memory_usage_init": 67108864.0,
|
||||
@@ -112,5 +112,5 @@ func TestHttpJsonOn404(t *testing.T) {
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/wvanbergen/kafka/consumergroup"
|
||||
@@ -27,8 +27,8 @@ type Kafka struct {
|
||||
// channel for all kafka consumer errors
|
||||
errs <-chan *sarama.ConsumerError
|
||||
// channel for all incoming parsed kafka points
|
||||
pointChan chan models.Point
|
||||
done chan struct{}
|
||||
metricC chan telegraf.Metric
|
||||
done chan struct{}
|
||||
|
||||
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
|
||||
// this is mostly for test purposes, but there may be a use-case for it later.
|
||||
@@ -93,7 +93,7 @@ func (k *Kafka) Start() error {
|
||||
if k.PointBuffer == 0 {
|
||||
k.PointBuffer = 100000
|
||||
}
|
||||
k.pointChan = make(chan models.Point, k.PointBuffer)
|
||||
k.metricC = make(chan telegraf.Metric, k.PointBuffer)
|
||||
|
||||
// Start the kafka message reader
|
||||
go k.parser()
|
||||
@@ -112,18 +112,18 @@ func (k *Kafka) parser() {
|
||||
case err := <-k.errs:
|
||||
log.Printf("Kafka Consumer Error: %s\n", err.Error())
|
||||
case msg := <-k.in:
|
||||
points, err := models.ParsePoints(msg.Value)
|
||||
metrics, err := telegraf.ParseMetrics(msg.Value)
|
||||
if err != nil {
|
||||
log.Printf("Could not parse kafka message: %s, error: %s",
|
||||
string(msg.Value), err.Error())
|
||||
}
|
||||
|
||||
for _, point := range points {
|
||||
for _, metric := range metrics {
|
||||
select {
|
||||
case k.pointChan <- point:
|
||||
case k.metricC <- metric:
|
||||
continue
|
||||
default:
|
||||
log.Printf("Kafka Consumer buffer is full, dropping a point." +
|
||||
log.Printf("Kafka Consumer buffer is full, dropping a metric." +
|
||||
" You may want to increase the point_buffer setting")
|
||||
}
|
||||
}
|
||||
@@ -148,19 +148,19 @@ func (k *Kafka) Stop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Gather(acc inputs.Accumulator) error {
|
||||
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
npoints := len(k.pointChan)
|
||||
npoints := len(k.metricC)
|
||||
for i := 0; i < npoints; i++ {
|
||||
point := <-k.pointChan
|
||||
point := <-k.metricC
|
||||
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("kafka_consumer", func() inputs.Input {
|
||||
inputs.Add("kafka_consumer", func() telegraf.Input {
|
||||
return &Kafka{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -51,13 +51,13 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
// Verify that we can now gather the sent message
|
||||
var acc testutil.Accumulator
|
||||
// Sanity check
|
||||
assert.Equal(t, 0, len(acc.Points), "There should not be any points")
|
||||
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
||||
|
||||
// Gather points
|
||||
err = k.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
if len(acc.Points) == 1 {
|
||||
point := acc.Points[0]
|
||||
if len(acc.Metrics) == 1 {
|
||||
point := acc.Metrics[0]
|
||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||
assert.Equal(t, map[string]string{
|
||||
@@ -83,7 +83,7 @@ func waitForPoint(k *Kafka, t *testing.T) {
|
||||
counter++
|
||||
if counter > 1000 {
|
||||
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||
} else if len(k.pointChan) == 1 {
|
||||
} else if len(k.metricC) == 1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/models"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -29,7 +29,7 @@ func NewTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
doNotCommitMsgs: true,
|
||||
errs: make(chan *sarama.ConsumerError, pointBuffer),
|
||||
done: make(chan struct{}),
|
||||
pointChan: make(chan models.Point, pointBuffer),
|
||||
metricC: make(chan telegraf.Metric, pointBuffer),
|
||||
}
|
||||
return &k, in
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func TestRunParser(t *testing.T) {
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 1)
|
||||
assert.Equal(t, len(k.metricC), 1)
|
||||
}
|
||||
|
||||
// Test that the parser ignores invalid messages
|
||||
@@ -55,7 +55,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
|
||||
in <- saramaMsg(invalidMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 0)
|
||||
assert.Equal(t, len(k.metricC), 0)
|
||||
}
|
||||
|
||||
// Test that points are dropped when we hit the buffer limit
|
||||
@@ -69,7 +69,7 @@ func TestRunParserRespectsBuffer(t *testing.T) {
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 5)
|
||||
assert.Equal(t, len(k.metricC), 5)
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
@@ -84,7 +84,7 @@ func TestRunParserAndGather(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
k.Gather(&acc)
|
||||
|
||||
assert.Equal(t, len(acc.Points), 1)
|
||||
assert.Equal(t, len(acc.Metrics), 1)
|
||||
acc.AssertContainsFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(23422)})
|
||||
}
|
||||
|
||||
@@ -3,7 +3,8 @@ package leofs
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
@@ -146,7 +147,7 @@ func (l *LeoFS) Description() string {
|
||||
return "Read metrics from a LeoFS Server via SNMP"
|
||||
}
|
||||
|
||||
func (l *LeoFS) Gather(acc inputs.Accumulator) error {
|
||||
func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
|
||||
if len(l.Servers) == 0 {
|
||||
l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc)
|
||||
return nil
|
||||
@@ -176,7 +177,7 @@ func (l *LeoFS) Gather(acc inputs.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc inputs.Accumulator) error {
|
||||
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc telegraf.Accumulator) error {
|
||||
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
@@ -225,7 +226,7 @@ func retrieveTokenAfterColon(line string) (string, error) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("leofs", func() inputs.Input {
|
||||
inputs.Add("leofs", func() telegraf.Input {
|
||||
return &LeoFS{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package leofs
|
||||
|
||||
import (
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"io/ioutil"
|
||||
|
||||
@@ -13,8 +13,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Lustre proc files can change between versions, so we want to future-proof
|
||||
@@ -129,7 +130,7 @@ var wanted_mds_fields = []*mapping{
|
||||
},
|
||||
}
|
||||
|
||||
func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc inputs.Accumulator) error {
|
||||
func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc telegraf.Accumulator) error {
|
||||
files, err := filepath.Glob(fileglob)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -193,7 +194,7 @@ func (l *Lustre2) Description() string {
|
||||
}
|
||||
|
||||
// Gather reads stats from all lustre targets
|
||||
func (l *Lustre2) Gather(acc inputs.Accumulator) error {
|
||||
func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
|
||||
l.allFields = make(map[string]map[string]interface{})
|
||||
|
||||
if len(l.Ost_procfiles) == 0 {
|
||||
@@ -244,7 +245,7 @@ func (l *Lustre2) Gather(acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("lustre2", func() inputs.Input {
|
||||
inputs.Add("lustre2", func() telegraf.Input {
|
||||
return &Lustre2{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,7 +4,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type MailChimp struct {
|
||||
@@ -34,7 +35,7 @@ func (m *MailChimp) Description() string {
|
||||
return "Gathers metrics from the /3.0/reports MailChimp API"
|
||||
}
|
||||
|
||||
func (m *MailChimp) Gather(acc inputs.Accumulator) error {
|
||||
func (m *MailChimp) Gather(acc telegraf.Accumulator) error {
|
||||
if m.api == nil {
|
||||
m.api = NewChimpAPI(m.ApiKey)
|
||||
}
|
||||
@@ -71,7 +72,7 @@ func (m *MailChimp) Gather(acc inputs.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherReport(acc inputs.Accumulator, report Report, now time.Time) {
|
||||
func gatherReport(acc telegraf.Accumulator, report Report, now time.Time) {
|
||||
tags := make(map[string]string)
|
||||
tags["id"] = report.ID
|
||||
tags["campaign_title"] = report.CampaignTitle
|
||||
@@ -110,7 +111,7 @@ func gatherReport(acc inputs.Accumulator, report Report, now time.Time) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mailchimp", func() inputs.Input {
|
||||
inputs.Add("mailchimp", func() telegraf.Input {
|
||||
return &MailChimp{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Memcached is a memcached plugin
|
||||
@@ -69,7 +70,7 @@ func (m *Memcached) Description() string {
|
||||
}
|
||||
|
||||
// Gather reads stats from all configured servers accumulates stats
|
||||
func (m *Memcached) Gather(acc inputs.Accumulator) error {
|
||||
func (m *Memcached) Gather(acc telegraf.Accumulator) error {
|
||||
if len(m.Servers) == 0 && len(m.UnixSockets) == 0 {
|
||||
return m.gatherServer(":11211", false, acc)
|
||||
}
|
||||
@@ -92,7 +93,7 @@ func (m *Memcached) Gather(acc inputs.Accumulator) error {
|
||||
func (m *Memcached) gatherServer(
|
||||
address string,
|
||||
unix bool,
|
||||
acc inputs.Accumulator,
|
||||
acc telegraf.Accumulator,
|
||||
) error {
|
||||
var conn net.Conn
|
||||
if unix {
|
||||
@@ -178,7 +179,7 @@ func parseResponse(r *bufio.Reader) (map[string]string, error) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("memcached", func() inputs.Input {
|
||||
inputs.Add("memcached", func() telegraf.Input {
|
||||
return &Memcached{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
package inputs
|
||||
|
||||
import "github.com/stretchr/testify/mock"
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type MockPlugin struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockPlugin) Gather(_a0 Accumulator) error {
|
||||
func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error {
|
||||
ret := m.Called(_a0)
|
||||
|
||||
r0 := ret.Error(0)
|
||||
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"gopkg.in/mgo.v2"
|
||||
)
|
||||
|
||||
@@ -45,7 +46,7 @@ var localhost = &url.URL{Host: "127.0.0.1:27017"}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (m *MongoDB) Gather(acc inputs.Accumulator) error {
|
||||
func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
|
||||
if len(m.Servers) == 0 {
|
||||
m.gatherServer(m.getMongoServer(localhost), acc)
|
||||
return nil
|
||||
@@ -88,7 +89,7 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server {
|
||||
return m.mongos[url.Host]
|
||||
}
|
||||
|
||||
func (m *MongoDB) gatherServer(server *Server, acc inputs.Accumulator) error {
|
||||
func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
||||
if server.Session == nil {
|
||||
var dialAddrs []string
|
||||
if server.Url.User != nil {
|
||||
@@ -138,7 +139,7 @@ func (m *MongoDB) gatherServer(server *Server, acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mongodb", func() inputs.Input {
|
||||
inputs.Add("mongodb", func() telegraf.Input {
|
||||
return &MongoDB{
|
||||
mongos: make(map[string]*Server),
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type MongodbData struct {
|
||||
@@ -97,7 +97,7 @@ func (d *MongodbData) add(key string, val interface{}) {
|
||||
d.Fields[key] = val
|
||||
}
|
||||
|
||||
func (d *MongodbData) flush(acc inputs.Accumulator) {
|
||||
func (d *MongodbData) flush(acc telegraf.Accumulator) {
|
||||
acc.AddFields(
|
||||
"mongodb",
|
||||
d.Fields,
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"gopkg.in/mgo.v2"
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
@@ -21,7 +21,7 @@ func (s *Server) getDefaultTags() map[string]string {
|
||||
return tags
|
||||
}
|
||||
|
||||
func (s *Server) gatherData(acc inputs.Accumulator) error {
|
||||
func (s *Server) gatherData(acc telegraf.Accumulator) error {
|
||||
s.Session.SetMode(mgo.Eventual, true)
|
||||
s.Session.SetSocketTimeout(0)
|
||||
result := &ServerStatus{}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
"strings"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Mysql struct {
|
||||
@@ -35,7 +36,7 @@ func (m *Mysql) Description() string {
|
||||
|
||||
var localhost = ""
|
||||
|
||||
func (m *Mysql) Gather(acc inputs.Accumulator) error {
|
||||
func (m *Mysql) Gather(acc telegraf.Accumulator) error {
|
||||
if len(m.Servers) == 0 {
|
||||
// if we can't get stats in this case, thats fine, don't report
|
||||
// an error.
|
||||
@@ -113,7 +114,7 @@ var mappings = []*mapping{
|
||||
},
|
||||
}
|
||||
|
||||
func (m *Mysql) gatherServer(serv string, acc inputs.Accumulator) error {
|
||||
func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
|
||||
// If user forgot the '/', add it
|
||||
if strings.HasSuffix(serv, ")") {
|
||||
serv = serv + "/"
|
||||
@@ -207,7 +208,7 @@ func (m *Mysql) gatherServer(serv string, acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mysql", func() inputs.Input {
|
||||
inputs.Add("mysql", func() telegraf.Input {
|
||||
return &Mysql{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Nginx struct {
|
||||
@@ -31,7 +32,7 @@ func (n *Nginx) Description() string {
|
||||
return "Read Nginx's basic status information (ngx_http_stub_status_module)"
|
||||
}
|
||||
|
||||
func (n *Nginx) Gather(acc inputs.Accumulator) error {
|
||||
func (n *Nginx) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
@@ -59,7 +60,7 @@ var tr = &http.Transport{
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *Nginx) gatherUrl(addr *url.URL, acc inputs.Accumulator) error {
|
||||
func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
@@ -159,7 +160,7 @@ func getTags(addr *url.URL) map[string]string {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("nginx", func() inputs.Input {
|
||||
inputs.Add("nginx", func() telegraf.Input {
|
||||
return &Nginx{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
272
plugins/inputs/nsq/nsq.go
Normal file
272
plugins/inputs/nsq/nsq.go
Normal file
@@ -0,0 +1,272 @@
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// Copyright (c) 2015 Jeff Nickoloff (jeff@allingeek.com)
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package nsq
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Might add Lookupd endpoints for cluster discovery
|
||||
type NSQ struct {
|
||||
Endpoints []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of NSQD HTTP API endpoints
|
||||
endpoints = ["http://localhost:4151"]
|
||||
`
|
||||
|
||||
const (
|
||||
requestPattern = `%s/stats?format=json`
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("nsq", func() telegraf.Input {
|
||||
return &NSQ{}
|
||||
})
|
||||
}
|
||||
|
||||
func (n *NSQ) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *NSQ) Description() string {
|
||||
return "Read NSQ topic and channel statistics."
|
||||
}
|
||||
|
||||
func (n *NSQ) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
for _, e := range n.Endpoints {
|
||||
wg.Add(1)
|
||||
go func(e string) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherEndpoint(e, acc)
|
||||
}(e)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error {
|
||||
u, err := buildURL(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := client.Get(u.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while polling %s: %s", u.String(), err)
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status)
|
||||
}
|
||||
|
||||
s := &NSQStats{}
|
||||
err = json.NewDecoder(r.Body).Decode(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`Error parsing response: %s`, err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
`server_host`: u.Host,
|
||||
`server_version`: s.Data.Version,
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
if s.Data.Health == `OK` {
|
||||
fields["server_count"] = int64(1)
|
||||
} else {
|
||||
fields["server_count"] = int64(0)
|
||||
}
|
||||
fields["topic_count"] = int64(len(s.Data.Topics))
|
||||
|
||||
acc.AddFields("nsq_server", fields, tags)
|
||||
for _, t := range s.Data.Topics {
|
||||
topicStats(t, acc, u.Host, s.Data.Version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildURL(e string) (*url.URL, error) {
|
||||
u := fmt.Sprintf(requestPattern, e)
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func topicStats(t TopicStats, acc telegraf.Accumulator, host, version string) {
|
||||
// per topic overall (tag: name, paused, channel count)
|
||||
tags := map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": version,
|
||||
"topic": t.Name,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"depth": t.Depth,
|
||||
"backend_depth": t.BackendDepth,
|
||||
"message_count": t.MessageCount,
|
||||
"channel_count": int64(len(t.Channels)),
|
||||
}
|
||||
acc.AddFields("nsq_topic", fields, tags)
|
||||
|
||||
for _, c := range t.Channels {
|
||||
channelStats(c, acc, host, version, t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func channelStats(c ChannelStats, acc telegraf.Accumulator, host, version, topic string) {
|
||||
tags := map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": version,
|
||||
"topic": topic,
|
||||
"channel": c.Name,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"depth": c.Depth,
|
||||
"backend_depth": c.BackendDepth,
|
||||
"inflight_count": c.InFlightCount,
|
||||
"deferred_count": c.DeferredCount,
|
||||
"message_count": c.MessageCount,
|
||||
"requeue_count": c.RequeueCount,
|
||||
"timeout_count": c.TimeoutCount,
|
||||
"client_count": int64(len(c.Clients)),
|
||||
}
|
||||
|
||||
acc.AddFields("nsq_channel", fields, tags)
|
||||
for _, cl := range c.Clients {
|
||||
clientStats(cl, acc, host, version, topic, c.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic, channel string) {
|
||||
tags := map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": version,
|
||||
"topic": topic,
|
||||
"channel": channel,
|
||||
"client_name": c.Name,
|
||||
"client_id": c.ID,
|
||||
"client_hostname": c.Hostname,
|
||||
"client_version": c.Version,
|
||||
"client_address": c.RemoteAddress,
|
||||
"client_user_agent": c.UserAgent,
|
||||
"client_tls": strconv.FormatBool(c.TLS),
|
||||
"client_snappy": strconv.FormatBool(c.Snappy),
|
||||
"client_deflate": strconv.FormatBool(c.Deflate),
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"ready_count": c.ReadyCount,
|
||||
"inflight_count": c.InFlightCount,
|
||||
"message_count": c.MessageCount,
|
||||
"finish_count": c.FinishCount,
|
||||
"requeue_count": c.RequeueCount,
|
||||
}
|
||||
acc.AddFields("nsq_client", fields, tags)
|
||||
}
|
||||
|
||||
type NSQStats struct {
|
||||
Code int64 `json:"status_code"`
|
||||
Txt string `json:"status_txt"`
|
||||
Data NSQStatsData `json:"data"`
|
||||
}
|
||||
|
||||
type NSQStatsData struct {
|
||||
Version string `json:"version"`
|
||||
Health string `json:"health"`
|
||||
StartTime int64 `json:"start_time"`
|
||||
Topics []TopicStats `json:"topics"`
|
||||
}
|
||||
|
||||
// e2e_processing_latency is not modeled
|
||||
type TopicStats struct {
|
||||
Name string `json:"topic_name"`
|
||||
Depth int64 `json:"depth"`
|
||||
BackendDepth int64 `json:"backend_depth"`
|
||||
MessageCount int64 `json:"message_count"`
|
||||
Paused bool `json:"paused"`
|
||||
Channels []ChannelStats `json:"channels"`
|
||||
}
|
||||
|
||||
// e2e_processing_latency is not modeled
|
||||
type ChannelStats struct {
|
||||
Name string `json:"channel_name"`
|
||||
Depth int64 `json:"depth"`
|
||||
BackendDepth int64 `json:"backend_depth"`
|
||||
InFlightCount int64 `json:"in_flight_count"`
|
||||
DeferredCount int64 `json:"deferred_count"`
|
||||
MessageCount int64 `json:"message_count"`
|
||||
RequeueCount int64 `json:"requeue_count"`
|
||||
TimeoutCount int64 `json:"timeout_count"`
|
||||
Paused bool `json:"paused"`
|
||||
Clients []ClientStats `json:"clients"`
|
||||
}
|
||||
|
||||
type ClientStats struct {
|
||||
Name string `json:"name"`
|
||||
ID string `json:"client_id"`
|
||||
Hostname string `json:"hostname"`
|
||||
Version string `json:"version"`
|
||||
RemoteAddress string `json:"remote_address"`
|
||||
State int64 `json:"state"`
|
||||
ReadyCount int64 `json:"ready_count"`
|
||||
InFlightCount int64 `json:"in_flight_count"`
|
||||
MessageCount int64 `json:"message_count"`
|
||||
FinishCount int64 `json:"finish_count"`
|
||||
RequeueCount int64 `json:"requeue_count"`
|
||||
ConnectTime int64 `json:"connect_ts"`
|
||||
SampleRate int64 `json:"sample_rate"`
|
||||
Deflate bool `json:"deflate"`
|
||||
Snappy bool `json:"snappy"`
|
||||
UserAgent string `json:"user_agent"`
|
||||
TLS bool `json:"tls"`
|
||||
TLSCipherSuite string `json:"tls_cipher_suite"`
|
||||
TLSVersion string `json:"tls_version"`
|
||||
TLSNegotiatedProtocol string `json:"tls_negotiated_protocol"`
|
||||
TLSNegotiatedProtocolIsMutual bool `json:"tls_negotiated_protocol_is_mutual"`
|
||||
}
|
||||
273
plugins/inputs/nsq/nsq_test.go
Normal file
273
plugins/inputs/nsq/nsq_test.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package nsq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNSQStats(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, response)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
n := &NSQ{
|
||||
Endpoints: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := n.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
host := u.Host
|
||||
|
||||
// actually validate the tests
|
||||
tests := []struct {
|
||||
m string
|
||||
f map[string]interface{}
|
||||
g map[string]string
|
||||
}{
|
||||
{
|
||||
"nsq_server",
|
||||
map[string]interface{}{
|
||||
"server_count": int64(1),
|
||||
"topic_count": int64(2),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "0.3.6",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nsq_topic",
|
||||
map[string]interface{}{
|
||||
"depth": int64(12),
|
||||
"backend_depth": int64(13),
|
||||
"message_count": int64(14),
|
||||
"channel_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "0.3.6",
|
||||
"topic": "t1"},
|
||||
},
|
||||
{
|
||||
"nsq_channel",
|
||||
map[string]interface{}{
|
||||
"depth": int64(0),
|
||||
"backend_depth": int64(1),
|
||||
"inflight_count": int64(2),
|
||||
"deferred_count": int64(3),
|
||||
"message_count": int64(4),
|
||||
"requeue_count": int64(5),
|
||||
"timeout_count": int64(6),
|
||||
"client_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "0.3.6",
|
||||
"topic": "t1",
|
||||
"channel": "c1",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nsq_client",
|
||||
map[string]interface{}{
|
||||
"ready_count": int64(200),
|
||||
"inflight_count": int64(7),
|
||||
"message_count": int64(8),
|
||||
"finish_count": int64(9),
|
||||
"requeue_count": int64(10),
|
||||
},
|
||||
map[string]string{"server_host": host, "server_version": "0.3.6",
|
||||
"topic": "t1", "channel": "c1", "client_name": "373a715cd990",
|
||||
"client_id": "373a715cd990", "client_hostname": "373a715cd990",
|
||||
"client_version": "V2", "client_address": "172.17.0.11:35560",
|
||||
"client_tls": "false", "client_snappy": "false",
|
||||
"client_deflate": "false",
|
||||
"client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"},
|
||||
},
|
||||
{
|
||||
"nsq_topic",
|
||||
map[string]interface{}{
|
||||
"depth": int64(28),
|
||||
"backend_depth": int64(29),
|
||||
"message_count": int64(30),
|
||||
"channel_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "0.3.6",
|
||||
"topic": "t2"},
|
||||
},
|
||||
{
|
||||
"nsq_channel",
|
||||
map[string]interface{}{
|
||||
"depth": int64(15),
|
||||
"backend_depth": int64(16),
|
||||
"inflight_count": int64(17),
|
||||
"deferred_count": int64(18),
|
||||
"message_count": int64(19),
|
||||
"requeue_count": int64(20),
|
||||
"timeout_count": int64(21),
|
||||
"client_count": int64(1),
|
||||
},
|
||||
map[string]string{
|
||||
"server_host": host,
|
||||
"server_version": "0.3.6",
|
||||
"topic": "t2",
|
||||
"channel": "c2",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nsq_client",
|
||||
map[string]interface{}{
|
||||
"ready_count": int64(22),
|
||||
"inflight_count": int64(23),
|
||||
"message_count": int64(24),
|
||||
"finish_count": int64(25),
|
||||
"requeue_count": int64(26),
|
||||
},
|
||||
map[string]string{"server_host": host, "server_version": "0.3.6",
|
||||
"topic": "t2", "channel": "c2", "client_name": "377569bd462b",
|
||||
"client_id": "377569bd462b", "client_hostname": "377569bd462b",
|
||||
"client_version": "V2", "client_address": "172.17.0.8:48145",
|
||||
"client_user_agent": "go-nsq/1.0.5", "client_tls": "true",
|
||||
"client_snappy": "true", "client_deflate": "true"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
acc.AssertContainsTaggedFields(t, test.m, test.f, test.g)
|
||||
}
|
||||
}
|
||||
|
||||
var response = `
|
||||
{
|
||||
"status_code": 200,
|
||||
"status_txt": "OK",
|
||||
"data": {
|
||||
"version": "0.3.6",
|
||||
"health": "OK",
|
||||
"start_time": 1452021674,
|
||||
"topics": [
|
||||
{
|
||||
"topic_name": "t1",
|
||||
"channels": [
|
||||
{
|
||||
"channel_name": "c1",
|
||||
"depth": 0,
|
||||
"backend_depth": 1,
|
||||
"in_flight_count": 2,
|
||||
"deferred_count": 3,
|
||||
"message_count": 4,
|
||||
"requeue_count": 5,
|
||||
"timeout_count": 6,
|
||||
"clients": [
|
||||
{
|
||||
"name": "373a715cd990",
|
||||
"client_id": "373a715cd990",
|
||||
"hostname": "373a715cd990",
|
||||
"version": "V2",
|
||||
"remote_address": "172.17.0.11:35560",
|
||||
"state": 3,
|
||||
"ready_count": 200,
|
||||
"in_flight_count": 7,
|
||||
"message_count": 8,
|
||||
"finish_count": 9,
|
||||
"requeue_count": 10,
|
||||
"connect_ts": 1452021675,
|
||||
"sample_rate": 11,
|
||||
"deflate": false,
|
||||
"snappy": false,
|
||||
"user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5",
|
||||
"tls": false,
|
||||
"tls_cipher_suite": "",
|
||||
"tls_version": "",
|
||||
"tls_negotiated_protocol": "",
|
||||
"tls_negotiated_protocol_is_mutual": false
|
||||
}
|
||||
],
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"depth": 12,
|
||||
"backend_depth": 13,
|
||||
"message_count": 14,
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"topic_name": "t2",
|
||||
"channels": [
|
||||
{
|
||||
"channel_name": "c2",
|
||||
"depth": 15,
|
||||
"backend_depth": 16,
|
||||
"in_flight_count": 17,
|
||||
"deferred_count": 18,
|
||||
"message_count": 19,
|
||||
"requeue_count": 20,
|
||||
"timeout_count": 21,
|
||||
"clients": [
|
||||
{
|
||||
"name": "377569bd462b",
|
||||
"client_id": "377569bd462b",
|
||||
"hostname": "377569bd462b",
|
||||
"version": "V2",
|
||||
"remote_address": "172.17.0.8:48145",
|
||||
"state": 3,
|
||||
"ready_count": 22,
|
||||
"in_flight_count": 23,
|
||||
"message_count": 24,
|
||||
"finish_count": 25,
|
||||
"requeue_count": 26,
|
||||
"connect_ts": 1452021678,
|
||||
"sample_rate": 27,
|
||||
"deflate": true,
|
||||
"snappy": true,
|
||||
"user_agent": "go-nsq\/1.0.5",
|
||||
"tls": true,
|
||||
"tls_cipher_suite": "",
|
||||
"tls_version": "",
|
||||
"tls_negotiated_protocol": "",
|
||||
"tls_negotiated_protocol_is_mutual": false
|
||||
}
|
||||
],
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"depth": 28,
|
||||
"backend_depth": 29,
|
||||
"message_count": 30,
|
||||
"paused": false,
|
||||
"e2e_processing_latency": {
|
||||
"count": 0,
|
||||
"percentiles": null
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
138
plugins/inputs/passenger/README.md
Normal file
138
plugins/inputs/passenger/README.md
Normal file
@@ -0,0 +1,138 @@
|
||||
# Telegraf plugin: passenger
|
||||
|
||||
Get phusion passenger stat using their command line utility
|
||||
`passenger-status`
|
||||
|
||||
# Measurements
|
||||
|
||||
Meta:
|
||||
|
||||
- tags:
|
||||
|
||||
* name
|
||||
* passenger_version
|
||||
* pid
|
||||
* code_revision
|
||||
|
||||
Measurement names:
|
||||
|
||||
- passenger:
|
||||
|
||||
* Tags: `passenger_version`
|
||||
* Fields:
|
||||
|
||||
- process_count
|
||||
- max
|
||||
- capacity_used
|
||||
- get_wait_list_size
|
||||
|
||||
- passenger_supergroup:
|
||||
|
||||
* Tags: `name`
|
||||
* Fields:
|
||||
|
||||
- get_wait_list_size
|
||||
- capacity_used
|
||||
|
||||
- passenger_group:
|
||||
|
||||
* Tags:
|
||||
|
||||
- name
|
||||
- app_root
|
||||
- app_type
|
||||
|
||||
* Fields:
|
||||
|
||||
- get_wait_list_size
|
||||
- capacity_used
|
||||
- processes_being_spawned
|
||||
|
||||
- passenger_process:
|
||||
|
||||
* Tags:
|
||||
|
||||
- group_name
|
||||
- app_root
|
||||
- supergroup_name
|
||||
- pid
|
||||
- code_revision
|
||||
- life_status
|
||||
- process_group_id
|
||||
|
||||
* Field:
|
||||
|
||||
- concurrency
|
||||
- sessions
|
||||
- busyness
|
||||
- processed
|
||||
- spawner_creation_time
|
||||
- spawn_start_time
|
||||
- spawn_end_time
|
||||
- last_used
|
||||
- uptime
|
||||
- cpu
|
||||
- rss
|
||||
- pss
|
||||
- private_dirty
|
||||
- swap
|
||||
- real_memory
|
||||
- vmsize
|
||||
|
||||
# Example output
|
||||
|
||||
Using this configuration:
|
||||
|
||||
```
|
||||
[[inputs.passenger]]
|
||||
# Path of passenger-status.
|
||||
#
|
||||
# Plugin gather metric via parsing XML output of passenger-status
|
||||
# More information about the tool:
|
||||
# https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
|
||||
#
|
||||
#
|
||||
# If no path is specified, then the plugin simply execute passenger-status
|
||||
# hopefully it can be found in your PATH
|
||||
command = "passenger-status -v --show=xml"
|
||||
```
|
||||
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -test -input-filter passenger
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
```
|
||||
> passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257
|
||||
> passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977
|
||||
> passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021
|
||||
> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11553,process_group_id=13608,supergroup_name=/var/app/current/public busyness=0i,concurrency=1i,cpu=58i,last_used=1452747071764940i,private_dirty=314900i,processed=951i,pss=319391i,real_memory=314900i,rss=418548i,sessions=0i,spawn_end_time=1452746845013365i,spawn_start_time=1452746844946982i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563580i 1452984112799571490
|
||||
> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11563,process_group_id=13608,supergroup_name=/var/app/current/public busyness=2147483647i,concurrency=1i,cpu=47i,last_used=1452747071709179i,private_dirty=309240i,processed=756i,pss=314036i,real_memory=309240i,rss=418296i,sessions=1i,spawn_end_time=1452746845172460i,spawn_start_time=1452746845136882i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563608i 1452984112799638581
|
||||
```
|
||||
|
||||
# Note
|
||||
|
||||
You have to ensure that you can run the `passenger-status` command under
|
||||
telegraf user. Depend on how you install and configure passenger, this
|
||||
maybe an issue for you. If you are using passenger standlone, or compile
|
||||
yourself, it is straight forward. However, if you are using gem and
|
||||
`rvm`, it maybe harder to get this right.
|
||||
|
||||
Such as with `rvm`, you can use this command:
|
||||
|
||||
```
|
||||
~/.rvm/bin/rvm default do passenger-status -v --show=xml
|
||||
```
|
||||
|
||||
You can use `&` and `;` in the shell command to run comlicated shell command
|
||||
in order to get the passenger-status such as load the rvm shell, source the
|
||||
path
|
||||
```
|
||||
command = "source .rvm/scripts/rvm && passenger-status -v --show=xml"
|
||||
```
|
||||
|
||||
Anyway, just ensure that you can run the command under `telegraf` user, and it
|
||||
has to produce XML output.
|
||||
251
plugins/inputs/passenger/passenger.go
Normal file
251
plugins/inputs/passenger/passenger.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package passenger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"golang.org/x/net/html/charset"
|
||||
)
|
||||
|
||||
type passenger struct {
|
||||
Command string
|
||||
}
|
||||
|
||||
func (p *passenger) parseCommand() (string, []string) {
|
||||
var arguments []string
|
||||
if !strings.Contains(p.Command, " ") {
|
||||
return p.Command, arguments
|
||||
}
|
||||
|
||||
arguments = strings.Split(p.Command, " ")
|
||||
if len(arguments) == 1 {
|
||||
return arguments[0], arguments[1:]
|
||||
}
|
||||
|
||||
return arguments[0], arguments[1:]
|
||||
}
|
||||
|
||||
type info struct {
|
||||
Passenger_version string `xml:"passenger_version"`
|
||||
Process_count int `xml:"process_count"`
|
||||
Capacity_used int `xml:"capacity_used"`
|
||||
Get_wait_list_size int `xml:"get_wait_list_size"`
|
||||
Max int `xml:"max"`
|
||||
Supergroups struct {
|
||||
Supergroup []struct {
|
||||
Name string `xml:"name"`
|
||||
Get_wait_list_size int `xml:"get_wait_list_size"`
|
||||
Capacity_used int `xml:"capacity_used"`
|
||||
Group []struct {
|
||||
Name string `xml:"name"`
|
||||
AppRoot string `xml:"app_root"`
|
||||
AppType string `xml:"app_type"`
|
||||
Enabled_process_count int `xml:"enabled_process_count"`
|
||||
Disabling_process_count int `xml:"disabling_process_count"`
|
||||
Disabled_process_count int `xml:"disabled_process_count"`
|
||||
Capacity_used int `xml:"capacity_used"`
|
||||
Get_wait_list_size int `xml:"get_wait_list_size"`
|
||||
Processes_being_spawned int `xml:"processes_being_spawned"`
|
||||
Processes struct {
|
||||
Process []*process `xml:"process"`
|
||||
} `xml:"processes"`
|
||||
} `xml:"group"`
|
||||
} `xml:"supergroup"`
|
||||
} `xml:"supergroups"`
|
||||
}
|
||||
|
||||
type process struct {
|
||||
Pid int `xml:"pid"`
|
||||
Concurrency int `xml:"concurrency"`
|
||||
Sessions int `xml:"sessions"`
|
||||
Busyness int `xml:"busyness"`
|
||||
Processed int `xml:"processed"`
|
||||
Spawner_creation_time int64 `xml:"spawner_creation_time"`
|
||||
Spawn_start_time int64 `xml:"spawn_start_time"`
|
||||
Spawn_end_time int64 `xml:"spawn_end_time"`
|
||||
Last_used int64 `xml:"last_used"`
|
||||
Uptime string `xml:"uptime"`
|
||||
Code_revision string `xml:"code_revision"`
|
||||
Life_status string `xml:"life_status"`
|
||||
Enabled string `xml:"enabled"`
|
||||
Has_metrics bool `xml:"has_metrics"`
|
||||
Cpu int64 `xml:"cpu"`
|
||||
Rss int64 `xml:"rss"`
|
||||
Pss int64 `xml:"pss"`
|
||||
Private_dirty int64 `xml:"private_dirty"`
|
||||
Swap int64 `xml:"swap"`
|
||||
Real_memory int64 `xml:"real_memory"`
|
||||
Vmsize int64 `xml:"vmsize"`
|
||||
Process_group_id string `xml:"process_group_id"`
|
||||
}
|
||||
|
||||
func (p *process) getUptime() int64 {
|
||||
if p.Uptime == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
timeSlice := strings.Split(p.Uptime, " ")
|
||||
var uptime int64
|
||||
uptime = 0
|
||||
for _, v := range timeSlice {
|
||||
switch {
|
||||
case strings.HasSuffix(v, "d"):
|
||||
iValue := strings.TrimSuffix(v, "d")
|
||||
value, err := strconv.ParseInt(iValue, 10, 64)
|
||||
if err == nil {
|
||||
uptime += value * (24 * 60 * 60)
|
||||
}
|
||||
case strings.HasSuffix(v, "h"):
|
||||
iValue := strings.TrimSuffix(v, "y")
|
||||
value, err := strconv.ParseInt(iValue, 10, 64)
|
||||
if err == nil {
|
||||
uptime += value * (60 * 60)
|
||||
}
|
||||
case strings.HasSuffix(v, "m"):
|
||||
iValue := strings.TrimSuffix(v, "m")
|
||||
value, err := strconv.ParseInt(iValue, 10, 64)
|
||||
if err == nil {
|
||||
uptime += value * 60
|
||||
}
|
||||
case strings.HasSuffix(v, "s"):
|
||||
iValue := strings.TrimSuffix(v, "s")
|
||||
value, err := strconv.ParseInt(iValue, 10, 64)
|
||||
if err == nil {
|
||||
uptime += value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return uptime
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Path of passenger-status.
|
||||
#
|
||||
# Plugin gather metric via parsing XML output of passenger-status
|
||||
# More information about the tool:
|
||||
# https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
|
||||
#
|
||||
#
|
||||
# If no path is specified, then the plugin simply execute passenger-status
|
||||
# hopefully it can be found in your PATH
|
||||
command = "passenger-status -v --show=xml"
|
||||
`
|
||||
|
||||
func (r *passenger) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *passenger) Description() string {
|
||||
return "Read metrics of passenger using passenger-status"
|
||||
}
|
||||
|
||||
func (g *passenger) Gather(acc telegraf.Accumulator) error {
|
||||
if g.Command == "" {
|
||||
g.Command = "passenger-status -v --show=xml"
|
||||
}
|
||||
|
||||
cmd, args := g.parseCommand()
|
||||
out, err := exec.Command(cmd, args...).Output()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = importMetric(out, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func importMetric(stat []byte, acc telegraf.Accumulator) error {
|
||||
var p info
|
||||
|
||||
decoder := xml.NewDecoder(bytes.NewReader(stat))
|
||||
decoder.CharsetReader = charset.NewReaderLabel
|
||||
if err := decoder.Decode(&p); err != nil {
|
||||
return fmt.Errorf("Cannot parse input with error: %v\n", err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"passenger_version": p.Passenger_version,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"process_count": p.Process_count,
|
||||
"max": p.Max,
|
||||
"capacity_used": p.Capacity_used,
|
||||
"get_wait_list_size": p.Get_wait_list_size,
|
||||
}
|
||||
acc.AddFields("passenger", fields, tags)
|
||||
|
||||
for _, sg := range p.Supergroups.Supergroup {
|
||||
tags := map[string]string{
|
||||
"name": sg.Name,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"get_wait_list_size": sg.Get_wait_list_size,
|
||||
"capacity_used": sg.Capacity_used,
|
||||
}
|
||||
acc.AddFields("passenger_supergroup", fields, tags)
|
||||
|
||||
for _, group := range sg.Group {
|
||||
tags := map[string]string{
|
||||
"name": group.Name,
|
||||
"app_root": group.AppRoot,
|
||||
"app_type": group.AppType,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"get_wait_list_size": group.Get_wait_list_size,
|
||||
"capacity_used": group.Capacity_used,
|
||||
"processes_being_spawned": group.Processes_being_spawned,
|
||||
}
|
||||
acc.AddFields("passenger_group", fields, tags)
|
||||
|
||||
for _, process := range group.Processes.Process {
|
||||
tags := map[string]string{
|
||||
"group_name": group.Name,
|
||||
"app_root": group.AppRoot,
|
||||
"supergroup_name": sg.Name,
|
||||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
"code_revision": process.Code_revision,
|
||||
"life_status": process.Life_status,
|
||||
"process_group_id": process.Process_group_id,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"concurrency": process.Concurrency,
|
||||
"sessions": process.Sessions,
|
||||
"busyness": process.Busyness,
|
||||
"processed": process.Processed,
|
||||
"spawner_creation_time": process.Spawner_creation_time,
|
||||
"spawn_start_time": process.Spawn_start_time,
|
||||
"spawn_end_time": process.Spawn_end_time,
|
||||
"last_used": process.Last_used,
|
||||
"uptime": process.getUptime(),
|
||||
"cpu": process.Cpu,
|
||||
"rss": process.Rss,
|
||||
"pss": process.Pss,
|
||||
"private_dirty": process.Private_dirty,
|
||||
"swap": process.Swap,
|
||||
"real_memory": process.Real_memory,
|
||||
"vmsize": process.Vmsize,
|
||||
}
|
||||
acc.AddFields("passenger_process", fields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("passenger", func() telegraf.Input {
|
||||
return &passenger{}
|
||||
})
|
||||
}
|
||||
301
plugins/inputs/passenger/passenger_test.go
Normal file
301
plugins/inputs/passenger/passenger_test.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package passenger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func fakePassengerStatus(stat string) {
|
||||
content := fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat)
|
||||
ioutil.WriteFile("/tmp/passenger-status", []byte(content), 0700)
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
os.Remove("/tmp/passenger-status")
|
||||
}
|
||||
|
||||
func Test_Invalid_Passenger_Status_Cli(t *testing.T) {
|
||||
r := &passenger{
|
||||
Command: "an-invalid-command passenger-status",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, err.Error(), `exec: "an-invalid-command": executable file not found in $PATH`)
|
||||
}
|
||||
|
||||
func Test_Invalid_Xml(t *testing.T) {
|
||||
fakePassengerStatus("invalid xml")
|
||||
defer teardown()
|
||||
|
||||
r := &passenger{
|
||||
Command: "/tmp/passenger-status",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, err.Error(), "Cannot parse input with error: EOF\n")
|
||||
}
|
||||
|
||||
// We test this by ensure that the error message match the path of default cli
|
||||
func Test_Default_Config_Load_Default_Command(t *testing.T) {
|
||||
fakePassengerStatus("invalid xml")
|
||||
defer teardown()
|
||||
|
||||
r := &passenger{}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, err.Error(), "exec: \"passenger-status\": executable file not found in $PATH")
|
||||
}
|
||||
|
||||
func TestPassengerGenerateMetric(t *testing.T) {
|
||||
fakePassengerStatus(sampleStat)
|
||||
defer teardown()
|
||||
|
||||
//Now we tested again above server, with our authentication data
|
||||
r := &passenger{
|
||||
Command: "/tmp/passenger-status",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"passenger_version": "5.0.17",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"process_count": 23,
|
||||
"max": 23,
|
||||
"capacity_used": 23,
|
||||
"get_wait_list_size": 3,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "passenger", fields, tags)
|
||||
|
||||
tags = map[string]string{
|
||||
"name": "/var/app/current/public",
|
||||
"app_root": "/var/app/current",
|
||||
"app_type": "rack",
|
||||
}
|
||||
fields = map[string]interface{}{
|
||||
"processes_being_spawned": 2,
|
||||
"capacity_used": 23,
|
||||
"get_wait_list_size": 3,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "passenger_group", fields, tags)
|
||||
|
||||
tags = map[string]string{
|
||||
"name": "/var/app/current/public",
|
||||
}
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"capacity_used": 23,
|
||||
"get_wait_list_size": 3,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "passenger_supergroup", fields, tags)
|
||||
|
||||
tags = map[string]string{
|
||||
"app_root": "/var/app/current",
|
||||
"group_name": "/var/app/current/public",
|
||||
"supergroup_name": "/var/app/current/public",
|
||||
"pid": "11553",
|
||||
"code_revision": "899ac7f",
|
||||
"life_status": "ALIVE",
|
||||
"process_group_id": "13608",
|
||||
}
|
||||
fields = map[string]interface{}{
|
||||
"concurrency": 1,
|
||||
"sessions": 0,
|
||||
"busyness": 0,
|
||||
"processed": 951,
|
||||
"spawner_creation_time": int64(1452746835922747),
|
||||
"spawn_start_time": int64(1452746844946982),
|
||||
"spawn_end_time": int64(1452746845013365),
|
||||
"last_used": int64(1452747071764940),
|
||||
"uptime": int64(226), // in seconds of 3m 46s
|
||||
"cpu": int64(58),
|
||||
"rss": int64(418548),
|
||||
"pss": int64(319391),
|
||||
"private_dirty": int64(314900),
|
||||
"swap": int64(0),
|
||||
"real_memory": int64(314900),
|
||||
"vmsize": int64(1563580),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "passenger_process", fields, tags)
|
||||
}
|
||||
|
||||
var sampleStat = `
|
||||
<?xml version="1.0" encoding="iso8859-1" ?>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<info version="3">
|
||||
<passenger_version>5.0.17</passenger_version>
|
||||
<group_count>1</group_count>
|
||||
<process_count>23</process_count>
|
||||
<max>23</max>
|
||||
<capacity_used>23</capacity_used>
|
||||
<get_wait_list_size>3</get_wait_list_size>
|
||||
<get_wait_list />
|
||||
<supergroups>
|
||||
<supergroup>
|
||||
<name>/var/app/current/public</name>
|
||||
<state>READY</state>
|
||||
<get_wait_list_size>3</get_wait_list_size>
|
||||
<capacity_used>23</capacity_used>
|
||||
<secret>foo</secret>
|
||||
<group default="true">
|
||||
<name>/var/app/current/public</name>
|
||||
<component_name>/var/app/current/public</component_name>
|
||||
<app_root>/var/app/current</app_root>
|
||||
<app_type>rack</app_type>
|
||||
<environment>production</environment>
|
||||
<uuid>QQUrbCVYxbJYpfgyDOwJ</uuid>
|
||||
<enabled_process_count>23</enabled_process_count>
|
||||
<disabling_process_count>0</disabling_process_count>
|
||||
<disabled_process_count>0</disabled_process_count>
|
||||
<capacity_used>23</capacity_used>
|
||||
<get_wait_list_size>3</get_wait_list_size>
|
||||
<disable_wait_list_size>0</disable_wait_list_size>
|
||||
<processes_being_spawned>2</processes_being_spawned>
|
||||
<secret>foo</secret>
|
||||
<api_key>foo</api_key>
|
||||
<life_status>ALIVE</life_status>
|
||||
<user>axcoto</user>
|
||||
<uid>1001</uid>
|
||||
<group>axcoto</group>
|
||||
<gid>1001</gid>
|
||||
<options>
|
||||
<app_root>/var/app/current</app_root>
|
||||
<app_group_name>/var/app/current/public</app_group_name>
|
||||
<app_type>rack</app_type>
|
||||
<start_command>/var/app/.rvm/gems/ruby-2.2.0-p645/gems/passenger-5.0.17/helper-scripts/rack-loader.rb</start_command>
|
||||
<startup_file>config.ru</startup_file>
|
||||
<process_title>Passenger RubyApp</process_title>
|
||||
<log_level>3</log_level>
|
||||
<start_timeout>90000</start_timeout>
|
||||
<environment>production</environment>
|
||||
<base_uri>/</base_uri>
|
||||
<spawn_method>smart</spawn_method>
|
||||
<default_user>nobody</default_user>
|
||||
<default_group>nogroup</default_group>
|
||||
<ruby>/var/app/.rvm/gems/ruby-2.2.0-p645/wrappers/ruby</ruby>
|
||||
<python>python</python>
|
||||
<nodejs>node</nodejs>
|
||||
<ust_router_address>unix:/tmp/passenger.eKFdvdC/agents.s/ust_router</ust_router_address>
|
||||
<ust_router_username>logging</ust_router_username>
|
||||
<ust_router_password>foo</ust_router_password>
|
||||
<debugger>false</debugger>
|
||||
<analytics>false</analytics>
|
||||
<api_key>foo</api_key>
|
||||
<min_processes>22</min_processes>
|
||||
<max_processes>0</max_processes>
|
||||
<max_preloader_idle_time>300</max_preloader_idle_time>
|
||||
<max_out_of_band_work_instances>1</max_out_of_band_work_instances>
|
||||
</options>
|
||||
<processes>
|
||||
<process>
|
||||
<pid>11553</pid>
|
||||
<sticky_session_id>378579907</sticky_session_id>
|
||||
<gupid>17173df-PoNT3J9HCf</gupid>
|
||||
<concurrency>1</concurrency>
|
||||
<sessions>0</sessions>
|
||||
<busyness>0</busyness>
|
||||
<processed>951</processed>
|
||||
<spawner_creation_time>1452746835922747</spawner_creation_time>
|
||||
<spawn_start_time>1452746844946982</spawn_start_time>
|
||||
<spawn_end_time>1452746845013365</spawn_end_time>
|
||||
<last_used>1452747071764940</last_used>
|
||||
<last_used_desc>0s ago</last_used_desc>
|
||||
<uptime>3m 46s</uptime>
|
||||
<code_revision>899ac7f</code_revision>
|
||||
<life_status>ALIVE</life_status>
|
||||
<enabled>ENABLED</enabled>
|
||||
<has_metrics>true</has_metrics>
|
||||
<cpu>58</cpu>
|
||||
<rss>418548</rss>
|
||||
<pss>319391</pss>
|
||||
<private_dirty>314900</private_dirty>
|
||||
<swap>0</swap>
|
||||
<real_memory>314900</real_memory>
|
||||
<vmsize>1563580</vmsize>
|
||||
<process_group_id>13608</process_group_id>
|
||||
<command>Passenger RubyApp: /var/app/current/public</command>
|
||||
<sockets>
|
||||
<socket>
|
||||
<name>main</name>
|
||||
<address>unix:/tmp/passenger.eKFdvdC/apps.s/ruby.UWF6zkRJ71aoMXPxpknpWVfC1POFqgWZzbEsdz5v0G46cSSMxJ3GHLFhJaUrK2I</address>
|
||||
<protocol>session</protocol>
|
||||
<concurrency>1</concurrency>
|
||||
<sessions>0</sessions>
|
||||
</socket>
|
||||
<socket>
|
||||
<name>http</name>
|
||||
<address>tcp://127.0.0.1:49888</address>
|
||||
<protocol>http</protocol>
|
||||
<concurrency>1</concurrency>
|
||||
<sessions>0</sessions>
|
||||
</socket>
|
||||
</sockets>
|
||||
</process>
|
||||
<process>
|
||||
<pid>11563</pid>
|
||||
<sticky_session_id>1549681201</sticky_session_id>
|
||||
<gupid>17173df-pX5iJOipd8</gupid>
|
||||
<concurrency>1</concurrency>
|
||||
<sessions>1</sessions>
|
||||
<busyness>2147483647</busyness>
|
||||
<processed>756</processed>
|
||||
<spawner_creation_time>1452746835922747</spawner_creation_time>
|
||||
<spawn_start_time>1452746845136882</spawn_start_time>
|
||||
<spawn_end_time>1452746845172460</spawn_end_time>
|
||||
<last_used>1452747071709179</last_used>
|
||||
<last_used_desc>0s ago</last_used_desc>
|
||||
<uptime>3m 46s</uptime>
|
||||
<code_revision>899ac7f</code_revision>
|
||||
<life_status>ALIVE</life_status>
|
||||
<enabled>ENABLED</enabled>
|
||||
<has_metrics>true</has_metrics>
|
||||
<cpu>47</cpu>
|
||||
<rss>418296</rss>
|
||||
<pss>314036</pss>
|
||||
<private_dirty>309240</private_dirty>
|
||||
<swap>0</swap>
|
||||
<real_memory>309240</real_memory>
|
||||
<vmsize>1563608</vmsize>
|
||||
<process_group_id>13608</process_group_id>
|
||||
<command>Passenger RubyApp: /var/app/current/public</command>
|
||||
<sockets>
|
||||
<socket>
|
||||
<name>main</name>
|
||||
<address>unix:/tmp/passenger.eKFdvdC/apps.s/ruby.PVCh7TmvCi9knqhba2vG5qXrlHGEIwhGrxnUvRbIAD6SPz9m0G7YlJ8HEsREHY3</address>
|
||||
<protocol>session</protocol>
|
||||
<concurrency>1</concurrency>
|
||||
<sessions>1</sessions>
|
||||
</socket>
|
||||
<socket>
|
||||
<name>http</name>
|
||||
<address>tcp://127.0.0.1:52783</address>
|
||||
<protocol>http</protocol>
|
||||
<concurrency>1</concurrency>
|
||||
<sessions>0</sessions>
|
||||
</socket>
|
||||
</sockets>
|
||||
</process>
|
||||
</processes>
|
||||
</group>
|
||||
</supergroup>
|
||||
</supergroups>
|
||||
</info>`
|
||||
@@ -6,10 +6,14 @@ Get phpfpm stat using either HTTP status page or fpm socket.
|
||||
|
||||
Meta:
|
||||
|
||||
- tags: `url=<ip> pool=poolname`
|
||||
- tags: `pool=poolname`
|
||||
|
||||
Measurement names:
|
||||
|
||||
- phpfpm
|
||||
|
||||
Measurement field:
|
||||
|
||||
- accepted_conn
|
||||
- listen_queue
|
||||
- max_listen_queue
|
||||
@@ -50,36 +54,12 @@ It produces:
|
||||
|
||||
```
|
||||
* Plugin: phpfpm, Collection 1
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_idle_processes value=1
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_total_processes value=2
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_max_children_reached value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_max_listen_queue value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_listen_queue value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_listen_queue_len value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_active_processes value=1
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_max_active_processes value=2
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_slow_requests value=0
|
||||
> [url="10.0.0.12" pool="www"] phpfpm_accepted_conn value=305
|
||||
|
||||
> [url="localhost" pool="www2"] phpfpm_max_children_reached value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_slow_requests value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_max_listen_queue value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_active_processes value=1
|
||||
> [url="localhost" pool="www2"] phpfpm_listen_queue_len value=0
|
||||
> [url="localhost" pool="www2"] phpfpm_idle_processes value=1
|
||||
> [url="localhost" pool="www2"] phpfpm_total_processes value=2
|
||||
> [url="localhost" pool="www2"] phpfpm_max_active_processes value=2
|
||||
> [url="localhost" pool="www2"] phpfpm_accepted_conn value=306
|
||||
> [url="localhost" pool="www2"] phpfpm_listen_queue value=0
|
||||
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_children_reached value=0
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_slow_requests value=1
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_listen_queue value=0
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_active_processes value=1
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue_len value=0
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_idle_processes value=2
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_total_processes value=2
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_active_processes value=2
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_accepted_conn value=307
|
||||
> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue value=0
|
||||
> phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187
|
||||
> phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422
|
||||
> phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658
|
||||
```
|
||||
|
||||
## Note
|
||||
|
||||
When using `unixsocket`, you have to ensure that telegraf runs on same
|
||||
host, and socket path is accessible to telegraf user.
|
||||
|
||||
@@ -7,11 +7,13 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -40,20 +42,25 @@ type phpfpm struct {
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of addresses to gather stats about. Specify an ip or hostname
|
||||
# with optional port and path.
|
||||
# with optional port and path
|
||||
#
|
||||
# Plugin can be configured in three modes (both can be used):
|
||||
# - http: the URL must start with http:// or https://, ex:
|
||||
# Plugin can be configured in three modes (either can be used):
|
||||
# - http: the URL must start with http:// or https://, ie:
|
||||
# "http://localhost/status"
|
||||
# "http://192.168.130.1/status?full"
|
||||
# - unixsocket: path to fpm socket, ex:
|
||||
#
|
||||
# - unixsocket: path to fpm socket, ie:
|
||||
# "/var/run/php5-fpm.sock"
|
||||
# "192.168.10.10:/var/run/php5-fpm-www2.sock"
|
||||
# - fcgi: the URL mush start with fcgi:// or cgi://, and port must present, ex:
|
||||
# or using a custom fpm status path:
|
||||
# "/var/run/php5-fpm.sock:fpm-custom-status-path"
|
||||
#
|
||||
# - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
|
||||
# "fcgi://10.0.0.12:9000/status"
|
||||
# "cgi://10.0.10.12:9001/status"
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1/server-status
|
||||
# Example of multiple gathering from local socket and remove host
|
||||
# urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
|
||||
# If no servers are specified, then default to http://127.0.0.1/status
|
||||
urls = ["http://localhost/status"]
|
||||
`
|
||||
|
||||
@@ -62,12 +69,12 @@ func (r *phpfpm) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (r *phpfpm) Description() string {
|
||||
return "Read metrics of phpfpm, via HTTP status page or socket(pending)"
|
||||
return "Read metrics of phpfpm, via HTTP status page or socket"
|
||||
}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *phpfpm) Gather(acc inputs.Accumulator) error {
|
||||
func (g *phpfpm) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Urls) == 0 {
|
||||
return g.gatherServer("http://127.0.0.1/status", acc)
|
||||
}
|
||||
@@ -89,71 +96,96 @@ func (g *phpfpm) Gather(acc inputs.Accumulator) error {
|
||||
return outerr
|
||||
}
|
||||
|
||||
// Request status page to get stat raw data
|
||||
func (g *phpfpm) gatherServer(addr string, acc inputs.Accumulator) error {
|
||||
// Request status page to get stat raw data and import it
|
||||
func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
if g.client == nil {
|
||||
|
||||
client := &http.Client{}
|
||||
g.client = client
|
||||
}
|
||||
|
||||
if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") {
|
||||
return g.gatherHttp(addr, acc)
|
||||
}
|
||||
|
||||
var (
|
||||
fcgi *conn
|
||||
socketPath string
|
||||
statusPath string
|
||||
)
|
||||
|
||||
if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme,
|
||||
u.Host, u.Path), nil)
|
||||
res, err := g.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v",
|
||||
addr, err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("Unable to get valid stat result from '%s': %v",
|
||||
addr, err)
|
||||
}
|
||||
|
||||
importMetric(res.Body, acc, u.Host)
|
||||
socketAddr := strings.Split(u.Host, ":")
|
||||
fcgiIp := socketAddr[0]
|
||||
fcgiPort, _ := strconv.Atoi(socketAddr[1])
|
||||
fcgi, _ = NewClient(fcgiIp, fcgiPort)
|
||||
} else {
|
||||
var (
|
||||
fcgi *FCGIClient
|
||||
fcgiAddr string
|
||||
)
|
||||
if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
socketAddr := strings.Split(u.Host, ":")
|
||||
fcgiIp := socketAddr[0]
|
||||
fcgiPort, _ := strconv.Atoi(socketAddr[1])
|
||||
fcgiAddr = u.Host
|
||||
fcgi, _ = NewClient(fcgiIp, fcgiPort)
|
||||
socketAddr := strings.Split(addr, ":")
|
||||
if len(socketAddr) >= 2 {
|
||||
socketPath = socketAddr[0]
|
||||
statusPath = socketAddr[1]
|
||||
} else {
|
||||
socketAddr := strings.Split(addr, ":")
|
||||
fcgiAddr = socketAddr[0]
|
||||
fcgi, _ = NewClient("unix", socketAddr[1])
|
||||
}
|
||||
resOut, resErr, err := fcgi.Request(map[string]string{
|
||||
"SCRIPT_NAME": "/status",
|
||||
"SCRIPT_FILENAME": "status",
|
||||
"REQUEST_METHOD": "GET",
|
||||
}, "")
|
||||
|
||||
if len(resErr) == 0 && err == nil {
|
||||
importMetric(bytes.NewReader(resOut), acc, fcgiAddr)
|
||||
socketPath = socketAddr[0]
|
||||
statusPath = "status"
|
||||
}
|
||||
|
||||
if _, err := os.Stat(socketPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err)
|
||||
}
|
||||
fcgi, _ = NewClient("unix", socketPath)
|
||||
}
|
||||
return g.gatherFcgi(fcgi, statusPath, acc)
|
||||
}
|
||||
|
||||
// Gather stat using fcgi protocol
|
||||
func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumulator) error {
|
||||
fpmOutput, fpmErr, err := fcgi.Request(map[string]string{
|
||||
"SCRIPT_NAME": "/" + statusPath,
|
||||
"SCRIPT_FILENAME": statusPath,
|
||||
"REQUEST_METHOD": "GET",
|
||||
"CONTENT_LENGTH": "0",
|
||||
"SERVER_PROTOCOL": "HTTP/1.0",
|
||||
"SERVER_SOFTWARE": "go / fcgiclient ",
|
||||
"REMOTE_ADDR": "127.0.0.1",
|
||||
}, "/"+statusPath)
|
||||
|
||||
if len(fpmErr) == 0 && err == nil {
|
||||
importMetric(bytes.NewReader(fpmOutput), acc)
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Gather stat using http protocol
|
||||
func (g *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme,
|
||||
u.Host, u.Path), nil)
|
||||
res, err := g.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v",
|
||||
addr, err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("Unable to get valid stat result from '%s': %v",
|
||||
addr, err)
|
||||
}
|
||||
|
||||
importMetric(res.Body, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import HTTP stat data into Telegraf system
|
||||
func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, error) {
|
||||
// Import stat data into Telegraf system
|
||||
func importMetric(r io.Reader, acc telegraf.Accumulator) (poolStat, error) {
|
||||
stats := make(poolStat)
|
||||
var currentPool string
|
||||
|
||||
@@ -195,7 +227,6 @@ func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, e
|
||||
// Finally, we push the pool metric
|
||||
for pool := range stats {
|
||||
tags := map[string]string{
|
||||
"url": host,
|
||||
"pool": pool,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
@@ -209,7 +240,7 @@ func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, e
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("phpfpm", func() inputs.Input {
|
||||
inputs.Add("phpfpm", func() telegraf.Input {
|
||||
return &phpfpm{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fcgi implements the FastCGI protocol.
|
||||
// Currently only the responder role is supported.
|
||||
// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22
|
||||
package phpfpm
|
||||
|
||||
// FastCGI client to request via socket
|
||||
|
||||
// Copyright 2012 Junqing Tan <ivan@mysqlab.net> and The Go Authors
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// Part of source code is from Go fcgi package
|
||||
|
||||
// Fix bug: Can't recive more than 1 record untill FCGI_END_REQUEST 2012-09-15
|
||||
// By: wofeiwo
|
||||
// This file defines the raw protocol and some utilities used by the child and
|
||||
// the host.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -15,70 +16,84 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"strings"
|
||||
)
|
||||
|
||||
const FCGI_LISTENSOCK_FILENO uint8 = 0
|
||||
const FCGI_HEADER_LEN uint8 = 8
|
||||
const VERSION_1 uint8 = 1
|
||||
const FCGI_NULL_REQUEST_ID uint8 = 0
|
||||
const FCGI_KEEP_CONN uint8 = 1
|
||||
// recType is a record type, as defined by
|
||||
// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8
|
||||
type recType uint8
|
||||
|
||||
const (
|
||||
FCGI_BEGIN_REQUEST uint8 = iota + 1
|
||||
FCGI_ABORT_REQUEST
|
||||
FCGI_END_REQUEST
|
||||
FCGI_PARAMS
|
||||
FCGI_STDIN
|
||||
FCGI_STDOUT
|
||||
FCGI_STDERR
|
||||
FCGI_DATA
|
||||
FCGI_GET_VALUES
|
||||
FCGI_GET_VALUES_RESULT
|
||||
FCGI_UNKNOWN_TYPE
|
||||
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
|
||||
typeBeginRequest recType = 1
|
||||
typeAbortRequest recType = 2
|
||||
typeEndRequest recType = 3
|
||||
typeParams recType = 4
|
||||
typeStdin recType = 5
|
||||
typeStdout recType = 6
|
||||
typeStderr recType = 7
|
||||
typeData recType = 8
|
||||
typeGetValues recType = 9
|
||||
typeGetValuesResult recType = 10
|
||||
typeUnknownType recType = 11
|
||||
)
|
||||
|
||||
const (
|
||||
FCGI_RESPONDER uint8 = iota + 1
|
||||
FCGI_AUTHORIZER
|
||||
FCGI_FILTER
|
||||
)
|
||||
// keep the connection between web-server and responder open after request
|
||||
const flagKeepConn = 1
|
||||
|
||||
const (
|
||||
FCGI_REQUEST_COMPLETE uint8 = iota
|
||||
FCGI_CANT_MPX_CONN
|
||||
FCGI_OVERLOADED
|
||||
FCGI_UNKNOWN_ROLE
|
||||
)
|
||||
|
||||
const (
|
||||
FCGI_MAX_CONNS string = "MAX_CONNS"
|
||||
FCGI_MAX_REQS string = "MAX_REQS"
|
||||
FCGI_MPXS_CONNS string = "MPXS_CONNS"
|
||||
)
|
||||
|
||||
const (
|
||||
maxWrite = 6553500 // maximum record body
|
||||
maxWrite = 65535 // maximum record body
|
||||
maxPad = 255
|
||||
)
|
||||
|
||||
const (
|
||||
roleResponder = iota + 1 // only Responders are implemented.
|
||||
roleAuthorizer
|
||||
roleFilter
|
||||
)
|
||||
|
||||
const (
|
||||
statusRequestComplete = iota
|
||||
statusCantMultiplex
|
||||
statusOverloaded
|
||||
statusUnknownRole
|
||||
)
|
||||
|
||||
const headerLen = 8
|
||||
|
||||
type header struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
Type recType
|
||||
Id uint16
|
||||
ContentLength uint16
|
||||
PaddingLength uint8
|
||||
Reserved uint8
|
||||
}
|
||||
|
||||
type beginRequest struct {
|
||||
role uint16
|
||||
flags uint8
|
||||
reserved [5]uint8
|
||||
}
|
||||
|
||||
func (br *beginRequest) read(content []byte) error {
|
||||
if len(content) != 8 {
|
||||
return errors.New("fcgi: invalid begin request record")
|
||||
}
|
||||
br.role = binary.BigEndian.Uint16(content)
|
||||
br.flags = content[2]
|
||||
return nil
|
||||
}
|
||||
|
||||
// for padding so we don't have to allocate all the time
|
||||
// not synchronized because we don't care what the contents are
|
||||
var pad [maxPad]byte
|
||||
|
||||
func (h *header) init(recType uint8, reqId uint16, contentLength int) {
|
||||
func (h *header) init(recType recType, reqId uint16, contentLength int) {
|
||||
h.Version = 1
|
||||
h.Type = recType
|
||||
h.Id = reqId
|
||||
@@ -86,6 +101,26 @@ func (h *header) init(recType uint8, reqId uint16, contentLength int) {
|
||||
h.PaddingLength = uint8(-contentLength & 7)
|
||||
}
|
||||
|
||||
// conn sends records over rwc
|
||||
type conn struct {
|
||||
mutex sync.Mutex
|
||||
rwc io.ReadWriteCloser
|
||||
|
||||
// to avoid allocations
|
||||
buf bytes.Buffer
|
||||
h header
|
||||
}
|
||||
|
||||
func newConn(rwc io.ReadWriteCloser) *conn {
|
||||
return &conn{rwc: rwc}
|
||||
}
|
||||
|
||||
func (c *conn) Close() error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.rwc.Close()
|
||||
}
|
||||
|
||||
type record struct {
|
||||
h header
|
||||
buf [maxWrite + maxPad]byte
|
||||
@@ -109,69 +144,39 @@ func (r *record) content() []byte {
|
||||
return r.buf[:r.h.ContentLength]
|
||||
}
|
||||
|
||||
type FCGIClient struct {
|
||||
mutex sync.Mutex
|
||||
rwc io.ReadWriteCloser
|
||||
h header
|
||||
buf bytes.Buffer
|
||||
keepAlive bool
|
||||
}
|
||||
|
||||
func NewClient(h string, args ...interface{}) (fcgi *FCGIClient, err error) {
|
||||
var conn net.Conn
|
||||
if len(args) != 1 {
|
||||
err = errors.New("fcgi: not enough params")
|
||||
return
|
||||
}
|
||||
switch args[0].(type) {
|
||||
case int:
|
||||
addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
|
||||
conn, err = net.Dial("tcp", addr)
|
||||
case string:
|
||||
laddr := net.UnixAddr{Name: args[0].(string), Net: h}
|
||||
conn, err = net.DialUnix(h, nil, &laddr)
|
||||
default:
|
||||
err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
|
||||
}
|
||||
fcgi = &FCGIClient{
|
||||
rwc: conn,
|
||||
keepAlive: false,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writeRecord(recType uint8, reqId uint16, content []byte) (err error) {
|
||||
client.mutex.Lock()
|
||||
defer client.mutex.Unlock()
|
||||
client.buf.Reset()
|
||||
client.h.init(recType, reqId, len(content))
|
||||
if err := binary.Write(&client.buf, binary.BigEndian, client.h); err != nil {
|
||||
// writeRecord writes and sends a single record.
|
||||
func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.buf.Reset()
|
||||
c.h.init(recType, reqId, len(b))
|
||||
if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := client.buf.Write(content); err != nil {
|
||||
if _, err := c.buf.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := client.buf.Write(pad[:client.h.PaddingLength]); err != nil {
|
||||
if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = client.rwc.Write(client.buf.Bytes())
|
||||
_, err := c.rwc.Write(c.buf.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
|
||||
func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
|
||||
b := [8]byte{byte(role >> 8), byte(role), flags}
|
||||
return client.writeRecord(FCGI_BEGIN_REQUEST, reqId, b[:])
|
||||
return c.writeRecord(typeBeginRequest, reqId, b[:])
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
|
||||
func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint32(b, uint32(appStatus))
|
||||
b[4] = protocolStatus
|
||||
return client.writeRecord(FCGI_END_REQUEST, reqId, b)
|
||||
return c.writeRecord(typeEndRequest, reqId, b)
|
||||
}
|
||||
|
||||
func (client *FCGIClient) writePairs(recType uint8, reqId uint16, pairs map[string]string) error {
|
||||
w := newWriter(client, recType, reqId)
|
||||
func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error {
|
||||
w := newWriter(c, recType, reqId)
|
||||
b := make([]byte, 8)
|
||||
for k, v := range pairs {
|
||||
n := encodeSize(b, uint32(len(k)))
|
||||
@@ -238,7 +243,7 @@ func (w *bufWriter) Close() error {
|
||||
return w.closer.Close()
|
||||
}
|
||||
|
||||
func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter {
|
||||
func newWriter(c *conn, recType recType, reqId uint16) *bufWriter {
|
||||
s := &streamWriter{c: c, recType: recType, reqId: reqId}
|
||||
w := bufio.NewWriterSize(s, maxWrite)
|
||||
return &bufWriter{s, w}
|
||||
@@ -247,8 +252,8 @@ func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter {
|
||||
// streamWriter abstracts out the separation of a stream into discrete records.
|
||||
// It only writes maxWrite bytes at a time.
|
||||
type streamWriter struct {
|
||||
c *FCGIClient
|
||||
recType uint8
|
||||
c *conn
|
||||
recType recType
|
||||
reqId uint16
|
||||
}
|
||||
|
||||
@@ -273,22 +278,44 @@ func (w *streamWriter) Close() error {
|
||||
return w.c.writeRecord(w.recType, w.reqId, nil)
|
||||
}
|
||||
|
||||
func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout []byte, reterr []byte, err error) {
|
||||
func NewClient(h string, args ...interface{}) (fcgi *conn, err error) {
|
||||
var con net.Conn
|
||||
if len(args) != 1 {
|
||||
err = errors.New("fcgi: not enough params")
|
||||
return
|
||||
}
|
||||
switch args[0].(type) {
|
||||
case int:
|
||||
addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10)
|
||||
con, err = net.Dial("tcp", addr)
|
||||
case string:
|
||||
laddr := net.UnixAddr{Name: args[0].(string), Net: h}
|
||||
con, err = net.DialUnix(h, nil, &laddr)
|
||||
default:
|
||||
err = errors.New("fcgi: we only accept int (port) or string (socket) params.")
|
||||
}
|
||||
fcgi = &conn{
|
||||
rwc: con,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var reqId uint16 = 1
|
||||
func (client *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) {
|
||||
defer client.rwc.Close()
|
||||
var reqId uint16 = 1
|
||||
|
||||
err = client.writeBeginRequest(reqId, uint16(FCGI_RESPONDER), 0)
|
||||
err = client.writeBeginRequest(reqId, uint16(roleResponder), 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = client.writePairs(FCGI_PARAMS, reqId, env)
|
||||
|
||||
err = client.writePairs(typeParams, reqId, env)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(reqStr) > 0 {
|
||||
err = client.writeRecord(FCGI_STDIN, reqId, []byte(reqStr))
|
||||
if err != nil {
|
||||
|
||||
if len(requestData) > 0 {
|
||||
if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -297,23 +324,25 @@ func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout
|
||||
var err1 error
|
||||
|
||||
// recive untill EOF or FCGI_END_REQUEST
|
||||
READ_LOOP:
|
||||
for {
|
||||
err1 = rec.read(client.rwc)
|
||||
if err1 != nil {
|
||||
if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") {
|
||||
if err1 != io.EOF {
|
||||
err = err1
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
switch {
|
||||
case rec.h.Type == FCGI_STDOUT:
|
||||
case rec.h.Type == typeStdout:
|
||||
retout = append(retout, rec.content()...)
|
||||
case rec.h.Type == FCGI_STDERR:
|
||||
case rec.h.Type == typeStderr:
|
||||
reterr = append(reterr, rec.content()...)
|
||||
case rec.h.Type == FCGI_END_REQUEST:
|
||||
case rec.h.Type == typeEndRequest:
|
||||
fallthrough
|
||||
default:
|
||||
break
|
||||
break READ_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,24 +1,34 @@
|
||||
package phpfpm
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/fcgi"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
||||
func TestPhpFpmGeneratesMetrics(t *testing.T) {
|
||||
//We create a fake server to return test data
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, outputSample)
|
||||
}))
|
||||
type statServer struct{}
|
||||
|
||||
// We create a fake server to return test data
|
||||
func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Header().Set("Content-Length", fmt.Sprint(len(outputSample)))
|
||||
fmt.Fprint(w, outputSample)
|
||||
}
|
||||
|
||||
func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) {
|
||||
sv := statServer{}
|
||||
ts := httptest.NewServer(sv)
|
||||
defer ts.Close()
|
||||
|
||||
//Now we tested again above server, with our authentication data
|
||||
r := &phpfpm{
|
||||
Urls: []string{ts.URL},
|
||||
}
|
||||
@@ -29,7 +39,134 @@ func TestPhpFpmGeneratesMetrics(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"url": ts.Listener.Addr().String(),
|
||||
"pool": "www",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"accepted_conn": int64(3),
|
||||
"listen_queue": int64(1),
|
||||
"max_listen_queue": int64(0),
|
||||
"listen_queue_len": int64(0),
|
||||
"idle_processes": int64(1),
|
||||
"active_processes": int64(1),
|
||||
"total_processes": int64(2),
|
||||
"max_active_processes": int64(1),
|
||||
"max_children_reached": int64(2),
|
||||
"slow_requests": int64(1),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
|
||||
}
|
||||
|
||||
func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) {
|
||||
// Let OS find an available port
|
||||
tcp, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initalize test server")
|
||||
}
|
||||
defer tcp.Close()
|
||||
|
||||
s := statServer{}
|
||||
go fcgi.Serve(tcp, s)
|
||||
|
||||
//Now we tested again above server
|
||||
r := &phpfpm{
|
||||
Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"pool": "www",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"accepted_conn": int64(3),
|
||||
"listen_queue": int64(1),
|
||||
"max_listen_queue": int64(0),
|
||||
"listen_queue_len": int64(0),
|
||||
"idle_processes": int64(1),
|
||||
"active_processes": int64(1),
|
||||
"total_processes": int64(2),
|
||||
"max_active_processes": int64(1),
|
||||
"max_children_reached": int64(2),
|
||||
"slow_requests": int64(1),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
|
||||
}
|
||||
|
||||
func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) {
|
||||
// Create a socket in /tmp because we always have write permission and if the
|
||||
// removing of socket fail when system restart /tmp is clear so
|
||||
// we don't have junk files around
|
||||
var randomNumber int64
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
|
||||
tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber))
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initalize server on port ")
|
||||
}
|
||||
|
||||
defer tcp.Close()
|
||||
s := statServer{}
|
||||
go fcgi.Serve(tcp, s)
|
||||
|
||||
r := &phpfpm{
|
||||
Urls: []string{tcp.Addr().String()},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"pool": "www",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"accepted_conn": int64(3),
|
||||
"listen_queue": int64(1),
|
||||
"max_listen_queue": int64(0),
|
||||
"listen_queue_len": int64(0),
|
||||
"idle_processes": int64(1),
|
||||
"active_processes": int64(1),
|
||||
"total_processes": int64(2),
|
||||
"max_active_processes": int64(1),
|
||||
"max_children_reached": int64(2),
|
||||
"slow_requests": int64(1),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags)
|
||||
}
|
||||
|
||||
func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) {
|
||||
// Create a socket in /tmp because we always have write permission. If the
|
||||
// removing of socket fail we won't have junk files around. Cuz when system
|
||||
// restart, it clears out /tmp
|
||||
var randomNumber int64
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
|
||||
tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber))
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initalize server on port ")
|
||||
}
|
||||
|
||||
defer tcp.Close()
|
||||
s := statServer{}
|
||||
go fcgi.Serve(tcp, s)
|
||||
|
||||
r := &phpfpm{
|
||||
Urls: []string{tcp.Addr().String() + ":custom-status-path"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"pool": "www",
|
||||
}
|
||||
|
||||
@@ -51,7 +188,7 @@ func TestPhpFpmGeneratesMetrics(t *testing.T) {
|
||||
|
||||
//When not passing server config, we default to localhost
|
||||
//We just want to make sure we did request stat from localhost
|
||||
func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
||||
func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) {
|
||||
r := &phpfpm{}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
@@ -61,6 +198,31 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "127.0.0.1/status")
|
||||
}
|
||||
|
||||
func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) {
|
||||
r := &phpfpm{
|
||||
Urls: []string{"http://aninvalidone"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`)
|
||||
}
|
||||
|
||||
func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) {
|
||||
r := &phpfpm{
|
||||
Urls: []string{"/tmp/invalid.sock"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error())
|
||||
|
||||
}
|
||||
|
||||
const outputSample = `
|
||||
pool: www
|
||||
process manager: dynamic
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
package ping
|
||||
|
||||
import (
|
||||
@@ -7,7 +9,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// HostPinger is a function that runs the "ping" function using a list of
|
||||
@@ -40,6 +43,9 @@ func (_ *Ping) Description() string {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# NOTE: this plugin forks the ping command. You may need to set capabilities
|
||||
# via setcap cap_net_raw+p /bin/ping
|
||||
|
||||
# urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
# number of pings to send (ping -c <COUNT>)
|
||||
@@ -56,7 +62,7 @@ func (_ *Ping) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Ping) Gather(acc inputs.Accumulator) error {
|
||||
func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errorChannel := make(chan error, len(p.Urls)*2)
|
||||
@@ -64,7 +70,7 @@ func (p *Ping) Gather(acc inputs.Accumulator) error {
|
||||
// Spin off a go routine for each url to ping
|
||||
for _, url := range p.Urls {
|
||||
wg.Add(1)
|
||||
go func(url string, acc inputs.Accumulator) {
|
||||
go func(url string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
args := p.args(url)
|
||||
out, err := p.pingHost(args...)
|
||||
@@ -86,7 +92,9 @@ func (p *Ping) Gather(acc inputs.Accumulator) error {
|
||||
"packets_transmitted": trans,
|
||||
"packets_received": rec,
|
||||
"percent_packet_loss": loss,
|
||||
"average_response_ms": avg,
|
||||
}
|
||||
if avg > 0 {
|
||||
fields["average_response_ms"] = avg
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
}(url, acc)
|
||||
@@ -108,7 +116,11 @@ func (p *Ping) Gather(acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func hostPinger(args ...string) (string, error) {
|
||||
c := exec.Command("ping", args...)
|
||||
bin, err := exec.LookPath("ping")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
c := exec.Command(bin, args...)
|
||||
out, err := c.CombinedOutput()
|
||||
return string(out), err
|
||||
}
|
||||
@@ -174,7 +186,7 @@ func processPingOutput(out string) (int, int, float64, error) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("ping", func() inputs.Input {
|
||||
inputs.Add("ping", func() telegraf.Input {
|
||||
return &Ping{pingHost: hostPinger}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !windows
|
||||
|
||||
package ping
|
||||
|
||||
import (
|
||||
@@ -6,7 +8,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -193,7 +195,6 @@ func TestBadPingGather(t *testing.T) {
|
||||
"packets_transmitted": 2,
|
||||
"packets_received": 0,
|
||||
"percent_packet_loss": 100.0,
|
||||
"average_response_ms": 0.0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
|
||||
}
|
||||
|
||||
3
plugins/inputs/ping/ping_windows.go
Normal file
3
plugins/inputs/ping/ping_windows.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// +build windows
|
||||
|
||||
package ping
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
@@ -53,7 +54,7 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
|
||||
|
||||
var localhost = "host=localhost sslmode=disable"
|
||||
|
||||
func (p *Postgresql) Gather(acc inputs.Accumulator) error {
|
||||
func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
||||
var query string
|
||||
|
||||
if p.Address == "" || p.Address == "localhost" {
|
||||
@@ -101,7 +102,7 @@ type scanner interface {
|
||||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
func (p *Postgresql) accRow(row scanner, acc inputs.Accumulator) error {
|
||||
func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator) error {
|
||||
var columnVars []interface{}
|
||||
var dbname bytes.Buffer
|
||||
|
||||
@@ -145,7 +146,7 @@ func (p *Postgresql) accRow(row scanner, acc inputs.Accumulator) error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("postgresql", func() inputs.Input {
|
||||
inputs.Add("postgresql", func() telegraf.Input {
|
||||
return &Postgresql{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -113,7 +113,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) {
|
||||
|
||||
var found bool
|
||||
|
||||
for _, pnt := range acc.Points {
|
||||
for _, pnt := range acc.Metrics {
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "postgres" {
|
||||
found = true
|
||||
|
||||
68
plugins/inputs/powerdns/README.md
Normal file
68
plugins/inputs/powerdns/README.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# PowerDNS Input Plugin
|
||||
|
||||
The powerdns plugin gathers metrics about PowerDNS using unix socket.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Description
|
||||
[[inputs.powerdns]]
|
||||
# An array of sockets to gather stats about.
|
||||
# Specify a path to unix socket.
|
||||
#
|
||||
# If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path.
|
||||
unix_sockets = ["/var/run/pdns.controlsocket"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- powerdns
|
||||
- corrupt-packets
|
||||
- deferred-cache-inserts
|
||||
- deferred-cache-lookup
|
||||
- dnsupdate-answers
|
||||
- dnsupdate-changes
|
||||
- dnsupdate-queries
|
||||
- dnsupdate-refused
|
||||
- packetcache-hit
|
||||
- packetcache-miss
|
||||
- packetcache-size
|
||||
- query-cache-hit
|
||||
- query-cache-miss
|
||||
- rd-queries
|
||||
- recursing-answers
|
||||
- recursing-questions
|
||||
- recursion-unanswered
|
||||
- security-status
|
||||
- servfail-packets
|
||||
- signatures
|
||||
- tcp-answers
|
||||
- tcp-queries
|
||||
- timedout-packets
|
||||
- udp-answers
|
||||
- udp-answers-bytes
|
||||
- udp-do-queries
|
||||
- udp-queries
|
||||
- udp4-answers
|
||||
- udp4-queries
|
||||
- udp6-answers
|
||||
- udp6-queries
|
||||
- key-cache-size
|
||||
- latency
|
||||
- meta-cache-size
|
||||
- qsize-q
|
||||
- signature-cache-size
|
||||
- sys-msec
|
||||
- uptime
|
||||
- user-msec
|
||||
|
||||
### Tags:
|
||||
|
||||
- tags: `server=socket`
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter powerdns -test
|
||||
> powerdns,server=/var/run/pdns.controlsocket corrupt-packets=0i,deferred-cache-inserts=0i,deferred-cache-lookup=0i,dnsupdate-answers=0i,dnsupdate-changes=0i,dnsupdate-queries=0i,dnsupdate-refused=0i,key-cache-size=0i,latency=26i,meta-cache-size=0i,packetcache-hit=0i,packetcache-miss=1i,packetcache-size=0i,qsize-q=0i,query-cache-hit=0i,query-cache-miss=6i,rd-queries=1i,recursing-answers=0i,recursing-questions=0i,recursion-unanswered=0i,security-status=3i,servfail-packets=0i,signature-cache-size=0i,signatures=0i,sys-msec=4349i,tcp-answers=0i,tcp-queries=0i,timedout-packets=0i,udp-answers=1i,udp-answers-bytes=50i,udp-do-queries=0i,udp-queries=0i,udp4-answers=1i,udp4-queries=1i,udp6-answers=0i,udp6-queries=0i,uptime=166738i,user-msec=3036i 1454078624932715706
|
||||
```
|
||||
126
plugins/inputs/powerdns/powerdns.go
Normal file
126
plugins/inputs/powerdns/powerdns.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package powerdns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Powerdns struct {
|
||||
UnixSockets []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# An array of sockets to gather stats about.
|
||||
# Specify a path to unix socket.
|
||||
#
|
||||
# If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path.
|
||||
unix_sockets = ["/var/run/pdns.controlsocket"]
|
||||
`
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
func (p *Powerdns) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Powerdns) Description() string {
|
||||
return "Read metrics from one or many PowerDNS servers"
|
||||
}
|
||||
|
||||
func (p *Powerdns) Gather(acc telegraf.Accumulator) error {
|
||||
if len(p.UnixSockets) == 0 {
|
||||
return p.gatherServer("/var/run/pdns.controlsocket", acc)
|
||||
}
|
||||
|
||||
for _, serverSocket := range p.UnixSockets {
|
||||
if err := p.gatherServer(serverSocket, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error {
|
||||
conn, err := net.DialTimeout("unix", address, defaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
conn.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
// Read and write buffer
|
||||
rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
|
||||
|
||||
// Send command
|
||||
if _, err := fmt.Fprint(conn, "show * \n"); err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := rw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read data
|
||||
buf := make([]byte, 0, 4096)
|
||||
tmp := make([]byte, 1024)
|
||||
for {
|
||||
n, err := rw.Read(tmp)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
buf = append(buf, tmp[:n]...)
|
||||
}
|
||||
|
||||
metrics := string(buf)
|
||||
|
||||
// Process data
|
||||
fields, err := parseResponse(metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add server socket as a tag
|
||||
tags := map[string]string{"server": address}
|
||||
|
||||
acc.AddFields("powerdns", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseResponse(metrics string) (map[string]interface{}, error) {
|
||||
values := make(map[string]interface{})
|
||||
|
||||
s := strings.Split(metrics, ",")
|
||||
|
||||
for _, metric := range s[:len(s)-1] {
|
||||
m := strings.Split(metric, "=")
|
||||
|
||||
i, err := strconv.ParseInt(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[m[0]] = i
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("powerdns", func() telegraf.Input {
|
||||
return &Powerdns{}
|
||||
})
|
||||
}
|
||||
147
plugins/inputs/powerdns/powerdns_test.go
Normal file
147
plugins/inputs/powerdns/powerdns_test.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package powerdns
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type statServer struct{}
|
||||
|
||||
var metrics = "corrupt-packets=0,deferred-cache-inserts=0,deferred-cache-lookup=0," +
|
||||
"dnsupdate-answers=0,dnsupdate-changes=0,dnsupdate-queries=0," +
|
||||
"dnsupdate-refused=0,packetcache-hit=0,packetcache-miss=1,packetcache-size=0," +
|
||||
"query-cache-hit=0,query-cache-miss=6,rd-queries=1,recursing-answers=0," +
|
||||
"recursing-questions=0,recursion-unanswered=0,security-status=3," +
|
||||
"servfail-packets=0,signatures=0,tcp-answers=0,tcp-queries=0," +
|
||||
"timedout-packets=0,udp-answers=1,udp-answers-bytes=50,udp-do-queries=0," +
|
||||
"udp-queries=0,udp4-answers=1,udp4-queries=1,udp6-answers=0,udp6-queries=0," +
|
||||
"key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," +
|
||||
"signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167,"
|
||||
|
||||
func (s statServer) serverSocket(l net.Listener) {
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
go func(c net.Conn) {
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := c.Read(buf)
|
||||
|
||||
data := buf[:n]
|
||||
if string(data) == "show * \n" {
|
||||
c.Write([]byte(metrics))
|
||||
c.Close()
|
||||
}
|
||||
}(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemcachedGeneratesMetrics(t *testing.T) {
|
||||
// We create a fake server to return test data
|
||||
var randomNumber int64
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
|
||||
socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber))
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initalize server on port ")
|
||||
}
|
||||
|
||||
defer socket.Close()
|
||||
|
||||
s := statServer{}
|
||||
go s.serverSocket(socket)
|
||||
|
||||
p := &Powerdns{
|
||||
UnixSockets: []string{fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
intMetrics := []string{"corrupt-packets", "deferred-cache-inserts",
|
||||
"deferred-cache-lookup", "dnsupdate-answers", "dnsupdate-changes",
|
||||
"dnsupdate-queries", "dnsupdate-refused", "packetcache-hit",
|
||||
"packetcache-miss", "packetcache-size", "query-cache-hit", "query-cache-miss",
|
||||
"rd-queries", "recursing-answers", "recursing-questions",
|
||||
"recursion-unanswered", "security-status", "servfail-packets", "signatures",
|
||||
"tcp-answers", "tcp-queries", "timedout-packets", "udp-answers",
|
||||
"udp-answers-bytes", "udp-do-queries", "udp-queries", "udp4-answers",
|
||||
"udp4-queries", "udp6-answers", "udp6-queries", "key-cache-size", "latency",
|
||||
"meta-cache-size", "qsize-q", "signature-cache-size", "sys-msec", "uptime", "user-msec"}
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasIntField("powerdns", metric), metric)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPowerdnsParseMetrics(t *testing.T) {
|
||||
values, err := parseResponse(metrics)
|
||||
require.NoError(t, err, "Error parsing memcached response")
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
value int64
|
||||
}{
|
||||
{"corrupt-packets", 0},
|
||||
{"deferred-cache-inserts", 0},
|
||||
{"deferred-cache-lookup", 0},
|
||||
{"dnsupdate-answers", 0},
|
||||
{"dnsupdate-changes", 0},
|
||||
{"dnsupdate-queries", 0},
|
||||
{"dnsupdate-refused", 0},
|
||||
{"packetcache-hit", 0},
|
||||
{"packetcache-miss", 1},
|
||||
{"packetcache-size", 0},
|
||||
{"query-cache-hit", 0},
|
||||
{"query-cache-miss", 6},
|
||||
{"rd-queries", 1},
|
||||
{"recursing-answers", 0},
|
||||
{"recursing-questions", 0},
|
||||
{"recursion-unanswered", 0},
|
||||
{"security-status", 3},
|
||||
{"servfail-packets", 0},
|
||||
{"signatures", 0},
|
||||
{"tcp-answers", 0},
|
||||
{"tcp-queries", 0},
|
||||
{"timedout-packets", 0},
|
||||
{"udp-answers", 1},
|
||||
{"udp-answers-bytes", 50},
|
||||
{"udp-do-queries", 0},
|
||||
{"udp-queries", 0},
|
||||
{"udp4-answers", 1},
|
||||
{"udp4-queries", 1},
|
||||
{"udp6-answers", 0},
|
||||
{"udp6-queries", 0},
|
||||
{"key-cache-size", 0},
|
||||
{"latency", 26},
|
||||
{"meta-cache-size", 0},
|
||||
{"qsize-q", 0},
|
||||
{"signature-cache-size", 0},
|
||||
{"sys-msec", 2889},
|
||||
{"uptime", 86317},
|
||||
{"user-msec", 2167},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
value, ok := values[test.key]
|
||||
if !ok {
|
||||
t.Errorf("Did not find key for metric %s in values", test.key)
|
||||
continue
|
||||
}
|
||||
if value != test.value {
|
||||
t.Errorf("Metric: %s, Expected: %d, actual: %d",
|
||||
test.key, test.value, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,25 +16,19 @@ individual process specific measurements.
|
||||
Example:
|
||||
|
||||
```
|
||||
[procstat]
|
||||
[[inputs.procstat]]
|
||||
exe = "influxd"
|
||||
prefix = "influxd"
|
||||
|
||||
[[procstat.specifications]]
|
||||
exe = "influxd"
|
||||
prefix = "influxd"
|
||||
|
||||
[[procstat.specifications]]
|
||||
pid_file = "/var/run/lxc/dnsmasq.pid"
|
||||
[[inputs.procstat]]
|
||||
pid_file = "/var/run/lxc/dnsmasq.pid"
|
||||
```
|
||||
|
||||
The above configuration would result in output like:
|
||||
|
||||
```
|
||||
[...]
|
||||
> [name="dnsmasq" pid="44979"] procstat_cpu_user value=0.14
|
||||
> [name="dnsmasq" pid="44979"] procstat_cpu_system value=0.07
|
||||
[...]
|
||||
> [name="influxd" pid="34337"] procstat_influxd_cpu_user value=25.43
|
||||
> [name="influxd" pid="34337"] procstat_influxd_cpu_system value=21.82
|
||||
> procstat,name="dnsmasq",pid="44979" cpu_user=0.14,cpu_system=0.07
|
||||
> procstat,name="influxd",pid="34337" influxd_cpu_user=25.43,influxd_cpu_system=21.82
|
||||
```
|
||||
|
||||
# Measurements
|
||||
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
|
||||
"github.com/shirou/gopsutil/process"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Procstat struct {
|
||||
@@ -18,10 +19,14 @@ type Procstat struct {
|
||||
Exe string
|
||||
Pattern string
|
||||
Prefix string
|
||||
|
||||
pidmap map[int32]*process.Process
|
||||
}
|
||||
|
||||
func NewProcstat() *Procstat {
|
||||
return &Procstat{}
|
||||
return &Procstat{
|
||||
pidmap: make(map[int32]*process.Process),
|
||||
}
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -45,13 +50,13 @@ func (_ *Procstat) Description() string {
|
||||
return "Monitor process cpu and memory usage"
|
||||
}
|
||||
|
||||
func (p *Procstat) Gather(acc inputs.Accumulator) error {
|
||||
procs, err := p.createProcesses()
|
||||
func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
||||
err := p.createProcesses()
|
||||
if err != nil {
|
||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
|
||||
p.Exe, p.PidFile, p.Pattern, err.Error())
|
||||
} else {
|
||||
for _, proc := range procs {
|
||||
for _, proc := range p.pidmap {
|
||||
p := NewSpecProcessor(p.Prefix, acc, proc)
|
||||
p.pushMetrics()
|
||||
}
|
||||
@@ -60,8 +65,7 @@ func (p *Procstat) Gather(acc inputs.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Procstat) createProcesses() ([]*process.Process, error) {
|
||||
var out []*process.Process
|
||||
func (p *Procstat) createProcesses() error {
|
||||
var errstring string
|
||||
var outerr error
|
||||
|
||||
@@ -71,11 +75,14 @@ func (p *Procstat) createProcesses() ([]*process.Process, error) {
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
p, err := process.NewProcess(int32(pid))
|
||||
if err == nil {
|
||||
out = append(out, p)
|
||||
} else {
|
||||
errstring += err.Error() + " "
|
||||
_, ok := p.pidmap[pid]
|
||||
if !ok {
|
||||
proc, err := process.NewProcess(pid)
|
||||
if err == nil {
|
||||
p.pidmap[pid] = proc
|
||||
} else {
|
||||
errstring += err.Error() + " "
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,7 +90,7 @@ func (p *Procstat) createProcesses() ([]*process.Process, error) {
|
||||
outerr = fmt.Errorf("%s", errstring)
|
||||
}
|
||||
|
||||
return out, outerr
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (p *Procstat) getAllPids() ([]int32, error) {
|
||||
@@ -123,9 +130,13 @@ func pidsFromFile(file string) ([]int32, error) {
|
||||
func pidsFromExe(exe string) ([]int32, error) {
|
||||
var out []int32
|
||||
var outerr error
|
||||
pgrep, err := exec.Command("pgrep", exe).Output()
|
||||
bin, err := exec.LookPath("pgrep")
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("Failed to execute pgrep. Error: '%s'", err)
|
||||
return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
|
||||
}
|
||||
pgrep, err := exec.Command(bin, exe).Output()
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
|
||||
} else {
|
||||
pids := strings.Fields(string(pgrep))
|
||||
for _, pid := range pids {
|
||||
@@ -143,9 +154,13 @@ func pidsFromExe(exe string) ([]int32, error) {
|
||||
func pidsFromPattern(pattern string) ([]int32, error) {
|
||||
var out []int32
|
||||
var outerr error
|
||||
pgrep, err := exec.Command("pgrep", "-f", pattern).Output()
|
||||
bin, err := exec.LookPath("pgrep")
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("Failed to execute pgrep. Error: '%s'", err)
|
||||
return out, fmt.Errorf("Couldn't find pgrep binary: %s", err)
|
||||
}
|
||||
pgrep, err := exec.Command(bin, "-f", pattern).Output()
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err)
|
||||
} else {
|
||||
pids := strings.Fields(string(pgrep))
|
||||
for _, pid := range pids {
|
||||
@@ -161,7 +176,7 @@ func pidsFromPattern(pattern string) ([]int32, error) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("procstat", func() inputs.Input {
|
||||
inputs.Add("procstat", func() telegraf.Input {
|
||||
return NewProcstat()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,10 +6,11 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/shirou/gopsutil/process"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
@@ -23,6 +24,7 @@ func TestGather(t *testing.T) {
|
||||
p := Procstat{
|
||||
PidFile: file.Name(),
|
||||
Prefix: "foo",
|
||||
pidmap: make(map[int32]*process.Process),
|
||||
}
|
||||
p.Gather(&acc)
|
||||
assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user"))
|
||||
|
||||
@@ -2,18 +2,18 @@ package procstat
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/process"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type SpecProcessor struct {
|
||||
Prefix string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
acc inputs.Accumulator
|
||||
acc telegraf.Accumulator
|
||||
proc *process.Process
|
||||
}
|
||||
|
||||
@@ -34,13 +34,13 @@ func (p *SpecProcessor) flush() {
|
||||
|
||||
func NewSpecProcessor(
|
||||
prefix string,
|
||||
acc inputs.Accumulator,
|
||||
acc telegraf.Accumulator,
|
||||
p *process.Process,
|
||||
) *SpecProcessor {
|
||||
tags := make(map[string]string)
|
||||
tags["pid"] = fmt.Sprintf("%v", p.Pid)
|
||||
if name, err := p.Name(); err == nil {
|
||||
tags["name"] = name
|
||||
tags["process_name"] = name
|
||||
}
|
||||
return &SpecProcessor{
|
||||
Prefix: prefix,
|
||||
@@ -52,21 +52,11 @@ func NewSpecProcessor(
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushMetrics() {
|
||||
if err := p.pushFDStats(); err != nil {
|
||||
log.Printf("procstat, fd stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushCtxStats(); err != nil {
|
||||
log.Printf("procstat, ctx stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushIOStats(); err != nil {
|
||||
log.Printf("procstat, io stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushCPUStats(); err != nil {
|
||||
log.Printf("procstat, cpu stats not available: %s", err.Error())
|
||||
}
|
||||
if err := p.pushMemoryStats(); err != nil {
|
||||
log.Printf("procstat, mem stats not available: %s", err.Error())
|
||||
}
|
||||
p.pushFDStats()
|
||||
p.pushCtxStats()
|
||||
p.pushIOStats()
|
||||
p.pushCPUStats()
|
||||
p.pushMemoryStats()
|
||||
p.flush()
|
||||
}
|
||||
|
||||
@@ -113,10 +103,18 @@ func (p *SpecProcessor) pushCPUStats() error {
|
||||
p.add("cpu_time_iowait", cpu_time.Iowait)
|
||||
p.add("cpu_time_irq", cpu_time.Irq)
|
||||
p.add("cpu_time_soft_irq", cpu_time.Softirq)
|
||||
p.add("cpu_time_soft_steal", cpu_time.Steal)
|
||||
p.add("cpu_time_soft_stolen", cpu_time.Stolen)
|
||||
p.add("cpu_time_soft_guest", cpu_time.Guest)
|
||||
p.add("cpu_time_soft_guest_nice", cpu_time.GuestNice)
|
||||
p.add("cpu_time_steal", cpu_time.Steal)
|
||||
p.add("cpu_time_stolen", cpu_time.Stolen)
|
||||
p.add("cpu_time_guest", cpu_time.Guest)
|
||||
p.add("cpu_time_guest_nice", cpu_time.GuestNice)
|
||||
|
||||
cpu_perc, err := p.proc.CPUPercent(time.Duration(0))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if cpu_perc == 0 {
|
||||
return nil
|
||||
}
|
||||
p.add("cpu_usage", cpu_perc)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user