Compare commits

..

118 Commits
1.5.2 ... 1.4.5

Author SHA1 Message Date
Daniel Nelson
8385206e68 Update changelog
(cherry picked from commit 2c5a5373f6)
2017-12-01 11:42:34 -08:00
Daniel Nelson
07e268f39e Update changelog
(cherry picked from commit cabe10b88a)
2017-12-01 11:25:18 -08:00
Daniel Nelson
c095876442 Fix HOST_MOUNT_PREFIX in docker with disk input (#3529)
(cherry picked from commit 7f66863b87)
2017-12-01 11:25:03 -08:00
Daniel Nelson
809ed511dd Update changelog
(cherry picked from commit 24d82aebe6)
2017-11-29 12:12:54 -08:00
Daniel Nelson
741022b656 Update gopsutil version to include netstat fix (#3513)
(cherry picked from commit 7dc256e845)
2017-11-29 12:11:12 -08:00
Daniel Nelson
34d3bf4fa0 Update changelog
(cherry picked from commit a9ada5f65b)
2017-11-27 12:33:17 -08:00
Laurent Gosselin
700b52dbd1 Fix global variable collection when using interval_slow option in mysql input (#3500)
(cherry picked from commit f758d0c6c3)
2017-11-27 12:33:17 -08:00
Daniel Nelson
ddcb93188f Set 1.4.4 release date
(cherry picked from commit 07297e80a8)
2017-11-08 15:22:31 -08:00
Daniel Nelson
cb193d0e8a Update changelog
(cherry picked from commit 2c2dc97702)
2017-11-07 11:43:33 -08:00
Daniel Nelson
600f9fa067 Use current time if container read time is zero value (#3437)
(cherry picked from commit cbbdf1043b)
2017-11-07 11:43:29 -08:00
Daniel Nelson
4cedae9d2c Update changelog
(cherry picked from commit c55f285de0)
2017-11-07 11:37:45 -08:00
Daniel Nelson
4c8e8fc2f1 Update gopsutil to v2.17.10 (#3441)
(cherry picked from commit e1295c41c8)
2017-11-07 11:37:24 -08:00
Daniel Nelson
7c5bcfe84e Update changelog
(cherry picked from commit e0df62c27b)
2017-11-06 17:43:02 -08:00
Bob Shannon
efa20d05fa Redact datadog API key in log output (#3420)
(cherry picked from commit fdf12ce6b4)
2017-11-06 17:42:57 -08:00
Daniel Nelson
187c7e12a8 Update changelog
(cherry picked from commit c116af35c7)
2017-10-30 15:36:17 -07:00
Daniel Nelson
f29a994743 Use explicit schemas in mqtt_consumer input (#3401)
(cherry picked from commit fcfcc803b1)
2017-10-30 15:35:48 -07:00
Daniel Nelson
f416f429d7 Fix circle-ci Go version 2017-10-30 15:06:34 -07:00
Daniel Nelson
ec6b1aae94 Fix unittest for golang 1.9
(cherry picked from commit cafb22d145)
2017-10-30 15:04:30 -07:00
Daniel Nelson
b473b6a659 Set release date for 1.4.3 2017-10-25 14:16:05 -07:00
Daniel Nelson
e5d08a4d86 Update changelog
(cherry picked from commit 13c1f1524a)
2017-10-24 16:26:06 -07:00
Daniel Nelson
3c894bb056 Use golang.org/x/sys/unix instead of syscall in diskio (#3384)
(cherry picked from commit 9a062498e7)
2017-10-24 16:26:06 -07:00
Daniel Nelson
d2d173b792 Update changelog
(cherry picked from commit f64cf89db1)
2017-10-24 15:47:19 -07:00
Daniel Nelson
145f7da42e If the connector name cannot be unquoted, use the raw value (#3371)
(cherry picked from commit 6d1777276c)
2017-10-24 15:47:15 -07:00
Daniel Nelson
f9f8d9ed7e Update changelog
(cherry picked from commit 65580759fc)
2017-10-23 12:37:30 -07:00
Sergei Smolianinov
0dd3b0507b Fix ACL token usage in consul input plugin (#3376)
(cherry picked from commit d2f9fc7d8c)
2017-10-23 12:37:30 -07:00
Daniel Nelson
c44b4fcc89 Update changelog
(cherry picked from commit 7088d98304)
2017-10-19 16:35:11 -07:00
Daniel Nelson
cb9c1653d3 Remove warning when JSON contains null value (#3359)
(cherry picked from commit 4243403432)
2017-10-19 16:35:10 -07:00
Daniel Nelson
cf7590b88e Update changelog
(cherry picked from commit 9b59cdd10e)
2017-10-18 13:58:25 -07:00
clheikes
5a7d889908 Fix TELEGRAF_OPTS expansion in systemd service unit (#3354)
(cherry picked from commit 02baa696c3)
2017-10-18 13:58:25 -07:00
Daniel Nelson
ef652678dd Update changelog
(cherry picked from commit a4fa19252f)
2017-10-18 12:58:41 -07:00
Dimitris Rozakis
c4cc57956b Respect path prefix in influx output uri (#3224)
(cherry picked from commit 9c8f4afa37)
2017-10-18 12:58:41 -07:00
Daniel Nelson
7b8a761c63 Update changelog
(cherry picked from commit 7ba376964c)
2017-10-18 12:26:07 -07:00
Ayrdrie
7d66319f59 Fix mongodb input panic when restarting mongodb (#3355)
(cherry picked from commit a75ab3e190)
2017-10-18 12:26:03 -07:00
Pierre Fersing
22f64f8417 Fix CPU system plugin gets stuck after system suspend (#3342)
(cherry picked from commit f5a9d1bc75)
2017-10-16 14:27:58 -07:00
Daniel Nelson
6b4deb01bb Update changelog
(cherry picked from commit 3ea41e885c)
2017-10-16 11:27:28 -07:00
Daniel Nelson
e4835cdc30 Fix case sensitivity issue in sqlserver query (#3336)
(cherry picked from commit 1f348037b7)
2017-10-16 11:27:28 -07:00
Daniel Nelson
e32ffdde06 Update changelog
(cherry picked from commit 0f9f757da7)
2017-10-12 17:27:24 -07:00
Windkit Li
0f905eaee7 Fix snmpwalk address format in leofs input (#3328)
(cherry picked from commit 2f8d0f4d47)
2017-10-12 17:27:24 -07:00
Daniel Nelson
4d48dcb84f Update changelog
(cherry picked from commit 024dea2ff9)
2017-10-12 15:56:09 -07:00
Daniel Nelson
17377b4942 Fix container name filters in docker input (#3331)
(cherry picked from commit fa25e123d8)
2017-10-12 15:55:50 -07:00
Daniel Nelson
0cc5fc0ce4 Set 1.4.2 release date
(cherry picked from commit 4e0c8e6026)
2017-10-10 13:31:06 -07:00
Daniel Nelson
8011109466 Remove InfluxDB path prefix test
This tests a feature that is not yet on this branch and the test was
mistakenly backported.
2017-10-05 16:37:58 -07:00
Daniel Nelson
588f0c77f8 Update changelog
(cherry picked from commit 13c7802b84)
2017-10-05 16:17:06 -07:00
Daniel Nelson
4301b8e32a Use chunked transfer encoding in InfluxDB output (#3307)
(cherry picked from commit cce40c515a)
2017-10-05 16:17:05 -07:00
Daniel Nelson
3c9d7db0a0 Update changelog
(cherry picked from commit 6e1fa559a3)
2017-10-05 16:06:11 -07:00
Daniel Nelson
f7b3eb1ebd Fix panic in cpu input if number of cpus changes (#3306)
(cherry picked from commit f56dda0ac8)
2017-10-05 16:06:11 -07:00
Daniel Nelson
b8ab827629 Update changelog
(cherry picked from commit 002ccf3295)
2017-10-03 15:27:49 -07:00
Daniel Nelson
d03e2fca32 Add support for proxy environment variables to http_response (#3302)
(cherry picked from commit a163effa6d)
2017-10-03 15:26:55 -07:00
Daniel Nelson
eca00c10e0 Add support for standard proxy env vars in outputs. (#3212)
(cherry picked from commit 7b08f9d099)
2017-10-03 15:26:44 -07:00
Daniel Nelson
9cf19df04e Update changelog
(cherry picked from commit f67350107d)
2017-10-02 17:17:10 -07:00
Daniel Nelson
e77c2b76e7 Fix case sensitivity error in sqlserver input (#3287)
(cherry picked from commit 8e3ed96d6f)
2017-10-02 17:17:10 -07:00
Daniel Nelson
c749c43dab Fix mqtt_consumer connection_timeout test
(cherry picked from commit cdca81c999)
2017-10-02 12:32:05 -07:00
Daniel Nelson
1be17ea5af Update example config 2017-09-29 16:04:02 -07:00
Daniel Nelson
e1155bec20 Update changelog
(cherry picked from commit 29b6f4168c)
2017-09-29 16:01:11 -07:00
Daniel Nelson
cfac750469 Fix format of connection_timeout in mqtt_consumer (#3286)
(cherry picked from commit 3d62e045af)
2017-09-29 16:01:11 -07:00
Daniel Nelson
f10d5b43c4 Update changelog
(cherry picked from commit cadafa6405)
2017-09-26 16:03:30 -07:00
Daniel Nelson
47b2d04d5b Allow JSON data format to contain zero metrics (#3268)
(cherry picked from commit 22a9ffbb9d)
2017-09-26 16:03:30 -07:00
Daniel Nelson
0e0da57b9a Update changelog
(cherry picked from commit 2e1457a496)
2017-09-26 15:38:41 -07:00
Daniel Nelson
8e7cf0109e Fix parsing of JSON with a UTF8 BOM in httpjson (#3267)
(cherry picked from commit 8614445235)
2017-09-26 15:38:41 -07:00
Daniel Nelson
5b791fd2e5 Update changelog
(cherry picked from commit f23d1eb078)
2017-09-26 15:29:19 -07:00
Daniel Nelson
293b1a0093 Fix dmcache tests with 32bit int
(cherry picked from commit ef5c12bd86)
2017-09-26 15:29:01 -07:00
Daniel Nelson
761ea06d6a Fix cgroup tests with 32bit int
(cherry picked from commit c013cc1497)
2017-09-26 15:29:01 -07:00
Daniel Nelson
8fafe9878b Fix ceph tests with 32bit int
(cherry picked from commit bb665cf013)
2017-09-26 15:29:01 -07:00
Daniel Nelson
5da3eef38b Allow 64bit integers in kernel_vmstat
(cherry picked from commit f823fc73f6)
2017-09-26 15:29:00 -07:00
Daniel Nelson
2de7aa23d7 Set 1.4.1 release date in changelog
(cherry picked from commit fd702e6bb8)
2017-09-26 14:19:51 -07:00
Daniel Nelson
52cd38150c Update changelog
(cherry picked from commit 0048bf2120)
2017-09-18 14:25:57 -07:00
Daniel Nelson
c08f492f78 Fix arm64 packages contain 32-bit executable (#3246)
(cherry picked from commit b8e134cd37)
2017-09-18 14:25:57 -07:00
Daniel Nelson
66cfe80e37 Update changelog
(cherry picked from commit b94cda6b46)
2017-09-14 15:30:51 -07:00
Trevor Pounds
ba5e5ec283 Fix panic in statsd p100 calculation (#3230)
(cherry picked from commit 73372872c2)
2017-09-14 15:30:51 -07:00
Daniel Nelson
259f8e4002 Update changelog
(cherry picked from commit 875ab3c4b7)
2017-09-14 15:05:38 -07:00
Mark Wilkinson - m82labs
558ab0c730 Fix duplicate keys in perf counters sqlserver query (#3175)
(cherry picked from commit 1c5ebd4be3)
2017-09-14 15:05:38 -07:00
Daniel Nelson
8d4fbe29e7 Update changelog
(cherry picked from commit 103d24bfba)
2017-09-14 15:01:28 -07:00
Daniel Nelson
72337a1c97 Fix skipped line with empty target in iptables (#3235)
(cherry picked from commit d5f48e3e96)
2017-09-14 15:01:21 -07:00
Daniel Nelson
86537899b2 Update changelog
(cherry picked from commit 7a41d2c586)
2017-09-14 13:07:30 -07:00
Trevor Pounds
a727d5d1f0 Fix counter and gauge metric types. (#3232)
(cherry picked from commit fa1982323a)
2017-09-14 13:07:30 -07:00
Daniel Nelson
7ec194a482 Update changelog
(cherry picked from commit cdf63c5776)
2017-09-13 17:32:03 -07:00
Daniel Nelson
5a77d28837 Whitelist allowed char classes for opentsdb output. (#3227)
(cherry picked from commit 0a8c2e0b3b)
2017-09-13 17:32:03 -07:00
Daniel Nelson
47927c353d Fix fluentd test
(cherry picked from commit eebee9759f)
2017-09-12 17:58:29 -07:00
Daniel Nelson
b9e7fa27aa Update changelog
(cherry picked from commit c5cfde667a)
2017-09-12 17:18:29 -07:00
Daniel Nelson
0d437140bd Fix optional field types in fluentd input
(cherry picked from commit 8a68e7424c)
2017-09-12 17:18:29 -07:00
Daniel Nelson
36969a63c2 Update changelog
(cherry picked from commit cc63b3b667)
2017-09-11 12:28:37 -07:00
DanKans
e9a12bb694 Fix MQTT input exits if Broker is not available on startup (#3202)
(cherry picked from commit 5488f4b3ac)
2017-09-11 12:28:12 -07:00
Daniel Nelson
34b7a4c361 Add 1.4.0 release date
(cherry picked from commit ab1c11b06d)
2017-09-05 17:15:06 -07:00
Daniel Nelson
f46370d982 Sort metrics before comparing in graphite test
(cherry picked from commit 98e784faf3)
2017-09-05 12:50:55 -07:00
Daniel Nelson
07b7e09749 Update changelog
(cherry picked from commit f43af72785)
2017-08-31 13:44:05 -07:00
Daniel Nelson
e54795795d Fix panic when handling string fields with escapes (#3188)
(cherry picked from commit 28d16188b3)
2017-08-30 21:17:10 -07:00
Daniel Nelson
b2b2bd8a27 Update changelog 2017-08-29 16:30:25 -07:00
Daniel Nelson
f96cbb48c7 Convert bool fields to int in graphite serializer 2017-08-29 16:30:25 -07:00
Seua Polyakov
9077cb83bc Skip non-numerical values in graphite format (#3179) 2017-08-29 16:30:25 -07:00
Daniel Nelson
0f188f280f Update changelog 2017-08-28 17:18:00 -07:00
Dylan Meissner
b9420e73bd HTTP headers can be added to InfluxDB output (#3182)
(cherry picked from commit a9a40cbf87)
2017-08-28 17:15:43 -07:00
Daniel Nelson
1e43e5e7ae Update changelog
(cherry picked from commit 5fd8ab36d3)
2017-08-28 17:09:08 -07:00
Jeff Nickoloff
5e104ad974 Added CloudWatch metric constraint validation (#3183)
(cherry picked from commit ac1fa05672)
2017-08-28 17:09:08 -07:00
Daniel Nelson
cc9d8c700c Update changelog
(cherry picked from commit a98496591a)
2017-08-25 18:08:55 -07:00
Ashton Kinslow
b15ec21ba7 Fix NSQ input plugin when used with version 1.0.0-compat
(cherry picked from commit 0a6541dfa8)
2017-08-25 18:08:55 -07:00
Daniel Nelson
a9abfe8f08 Update changelog
(cherry picked from commit 6abecd0ac7)
2017-08-25 12:59:51 -07:00
Rickard von Essen
307210242c Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149)
(cherry picked from commit 0502b65316)
2017-08-25 12:59:38 -07:00
Daniel Nelson
0a41db16f1 Update changelog
(cherry picked from commit e400fcf5da)
2017-08-25 11:56:30 -07:00
Jan Willem Janssen
7480267fd2 Fix parsing of SHM remotes in ntpq input (#3163)
(cherry picked from commit d449833de9)
2017-08-25 11:56:27 -07:00
Daniel Nelson
30949c4596 Update fail2ban documentation
(cherry picked from commit 58751fa4df)
2017-08-25 11:43:49 -07:00
Daniel Nelson
47264bc860 Fix amqp_consumer data_format documentation
closes #3164

(cherry picked from commit 656ce31d98)
2017-08-24 13:18:23 -07:00
Daniel Nelson
67e693e9a8 Update changelog
(cherry picked from commit f95c239a3f)
2017-08-23 15:22:29 -07:00
Daniel Nelson
851352bc8a Escape backslash within string fields (#3161)
(cherry picked from commit ae24a0754b)
2017-08-23 15:22:25 -07:00
Daniel Nelson
c807452c14 Update changelog
(cherry picked from commit f253623231)
2017-08-23 15:16:40 -07:00
Rickard von Essen
48e00f7ea0 Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.

(cherry picked from commit f0db4fd901)
2017-08-23 15:16:27 -07:00
Daniel Nelson
8ce901aaa4 Update changelog
(cherry picked from commit 8c68bd9ddb)
2017-08-22 17:03:28 -07:00
Daniel Nelson
78d1715601 Don't start Telegraf on install in Amazon Linux (#3156)
(cherry picked from commit 9fc7220c2e)
2017-08-22 17:03:17 -07:00
Daniel Nelson
1b0a18897d Update changelog
(cherry picked from commit 6597b55477)
2017-08-22 16:55:37 -07:00
Daniel Nelson
257b6a09d9 Don't retry points beyond retention policy (#3155)
(cherry picked from commit 1f4a997164)
2017-08-22 16:55:33 -07:00
Rickard von Essen
e6feac735c Enable fail2ban on all platforms (#3151)
(cherry picked from commit 371638ce56)
2017-08-22 12:59:54 -07:00
Rickard von Essen
6616065acf Enable chrony for all platforms (#3152)
(cherry picked from commit 53c5d3a290)
2017-08-22 11:50:16 -07:00
Daniel Nelson
98774d60e2 Cache intermediate objects during build
(cherry picked from commit ccf17a9f93)
2017-08-21 17:28:20 -07:00
Chris Goller
d4cd1b7eb4 Add JSON input support to zipkin plugin (#3150)
(cherry picked from commit 13a6b917c3)
2017-08-21 17:28:14 -07:00
Daniel Nelson
7254111d37 Add win_services to the readme
(cherry picked from commit 1f1e9cc49f)
2017-08-18 17:58:18 -07:00
Daniel Nelson
4551efb459 Update histogram aggregator documentation (#3133)
(cherry picked from commit 70c2b83f00)
2017-08-18 13:25:22 -07:00
Daniel Nelson
2610eba0e3 Remove version test
(cherry picked from commit 4de264ffc8)
2017-08-18 11:09:34 -07:00
Daniel Nelson
c277dc27a6 Update example config
(cherry picked from commit 36c2c88fd2)
2017-08-17 18:54:44 -07:00
Daniel Nelson
a4f5c6fbc3 Update sample config 2017-08-16 16:48:10 -07:00
241 changed files with 1145 additions and 17480 deletions

View File

@@ -1,127 +1,3 @@
## v1.5.2 [2018-01-30]
### Bugfixes
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
- [#3680](https://github.com/influxdata/telegraf/pull/3680): Reconnect before sending graphite metrics if disconnected.
- [#3693](https://github.com/influxdata/telegraf/pull/3693): Align aggregator period with internal ticker to avoid skipping metrics.
- [#3629](https://github.com/influxdata/telegraf/issues/3629): Fix a potential deadlock when using aggregators.
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Limit wait time for writes in mqtt output.
- [#3698](https://github.com/influxdata/telegraf/issues/3698): Revert change in graphite output where dot in field key was replaced by underscore.
- [#3710](https://github.com/influxdata/telegraf/issues/3710): Add timeout to wavefront output write.
- [#3725](https://github.com/influxdata/telegraf/issues/3725): Exclude master_replid fields from redis input.
## v1.5.1 [2018-01-10]
### Bugfixes
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
- [#3593](https://github.com/influxdata/telegraf/pull/3593): Set Content-Type charset in influxdb output and allow it be overridden.
- [#3594](https://github.com/influxdata/telegraf/pull/3594): Document permissions setup for postfix input.
- [#3633](https://github.com/influxdata/telegraf/pull/3633): Fix deliver_get field in rabbitmq input.
- [#3607](https://github.com/influxdata/telegraf/issues/3607): Escape environment variables during config toml parsing.
## v1.5 [2017-12-14]
### New Plugins
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
- [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv
- [cratedb](./plugins/outputs/wavefront/README.md) - Thanks to @felixge
- [dcos](./plugins/inputs/dcos/README.md) - Thanks to @influxdata
- [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
- [teamspeak](./plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1
- [unbound](./plugins/inputs/unbound/README.md) - Thanks to @aromeyer
- [wavefront](./plugins/outputs/wavefront/README.md) - Thanks to @puckpuck
### Release Notes
- In the `kinesis` output, use of the `partition_key` and
`use_random_partitionkey` options has been deprecated in favor of the
`partition` subtable. This allows for more flexible methods to set the
partition key such as by metric name or by tag.
- With the release of the new improved `jolokia2` input, the legacy `jolokia`
plugin is deprecated and will be removed in a future release. Users of this
plugin are encouraged to update to the new `jolokia2` plugin.
### Features
- [#3170](https://github.com/influxdata/telegraf/pull/3170): Add support for sharding based on metric name.
- [#3196](https://github.com/influxdata/telegraf/pull/3196): Add Kafka output plugin topic_suffix option.
- [#3027](https://github.com/influxdata/telegraf/pull/3027): Include mount mode option in disk metrics.
- [#3191](https://github.com/influxdata/telegraf/pull/3191): TLS and MTLS enhancements to HTTPListener input plugin.
- [#3213](https://github.com/influxdata/telegraf/pull/3213): Add polling method to logparser and tail inputs.
- [#3211](https://github.com/influxdata/telegraf/pull/3211): Add timeout option for kubernetes input.
- [#3234](https://github.com/influxdata/telegraf/pull/3234): Add support for timing sums in statsd input.
- [#2617](https://github.com/influxdata/telegraf/issues/2617): Add resource limit monitoring to procstat.
- [#3236](https://github.com/influxdata/telegraf/pull/3236): Add support for k8s service DNS discovery to prometheus input.
- [#3245](https://github.com/influxdata/telegraf/pull/3245): Add configurable metrics endpoint to prometheus output.
- [#3214](https://github.com/influxdata/telegraf/pull/3214): Add new nginx_plus input plugin.
- [#3215](https://github.com/influxdata/telegraf/pull/3215): Add support for NSQLookupd to nsq_consumer.
- [#2278](https://github.com/influxdata/telegraf/pull/2278): Add redesigned Jolokia input plugin.
- [#3106](https://github.com/influxdata/telegraf/pull/3106): Add configurable separator for metrics and fields in opentsdb output.
- [#1692](https://github.com/influxdata/telegraf/pull/1692): Add support for the rollbar occurrence webhook event.
- [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin.
- [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input.
- [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin.
- [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data.
- [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input.
- [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input.
- [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator.
- [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input.
- [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields.
- [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier.
- [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins.
- [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents.
- [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result.
- [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin.
- [#3406](https://github.com/influxdata/telegraf/pull/3406): Add support for SSL settings to ElasticSearch output plugin.
- [#3315](https://github.com/influxdata/telegraf/pull/3315): Add Teamspeak 3 input plugin.
- [#3305](https://github.com/influxdata/telegraf/pull/3305): Add modification_time field to filestat input plugin.
- [#2019](https://github.com/influxdata/telegraf/pull/2019): Add Solr input plugin.
- [#3210](https://github.com/influxdata/telegraf/pull/3210): Add CrateDB output plugin.
- [#3459](https://github.com/influxdata/telegraf/pull/3459): Add systemd unit pid and cgroup matching to procstat.
- [#3477](https://github.com/influxdata/telegraf/pull/3477): Add Particle Webhook Plugin.
- [#3471](https://github.com/influxdata/telegraf/pull/3471): Use MAX() instead of SUM() for latency measurements in sqlserver.
- [#3490](https://github.com/influxdata/telegraf/pull/3490): Add index by week number to Elasticsearch output.
- [#3434](https://github.com/influxdata/telegraf/pull/3434): Add unbound input plugin.
- [#3449](https://github.com/influxdata/telegraf/pull/3449): Add opensmtpd input plugin.
- [#3470](https://github.com/influxdata/telegraf/pull/3470): Add support for tags in the index name in elasticsearch output.
- [#2553](https://github.com/influxdata/telegraf/pull/2553): Add postfix input plugin.
- [#3424](https://github.com/influxdata/telegraf/pull/3424): Add bond input plugin.
- [#3518](https://github.com/influxdata/telegraf/pull/3518): Add slab to mem plugin.
- [#3519](https://github.com/influxdata/telegraf/pull/3519): Add input plugin for DC/OS.
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
### Bugfixes
- [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload.
- [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock.
- [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions.
- [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types.
- [#3430](https://github.com/influxdata/telegraf/issues/3430): Always ignore autofs filesystems in disk input.
- [#3326](https://github.com/influxdata/telegraf/issues/3326): Fail metrics parsing on unescaped quotes.
- [#3473](https://github.com/influxdata/telegraf/pull/3473): Whitelist allowed char classes for graphite output.
- [#3488](https://github.com/influxdata/telegraf/pull/3488): Use hexadecimal ids and lowercase names in zipkin input.
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
## v1.4.5 [2017-12-01]
### Bugfixes

View File

@@ -12,7 +12,7 @@ but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
## GoDoc
@@ -52,7 +52,7 @@ See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf config`.
plugin can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this plugin does.
Let's say you've written a plugin that emits metrics about processes on the
@@ -183,7 +183,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf config`.
output can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this output does.
### Output Example
@@ -287,7 +287,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in the output of `telegraf config`.
processor can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this processor does.
### Processor Example
@@ -344,7 +344,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
aggregator can be configured. This is include in `telegraf config`.
aggregator can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of each
@@ -457,28 +457,29 @@ func init() {
## Unit Tests
Before opening a pull request you should run the linter checks and
the short tests.
### Execute linter
execute `make lint`
### Execute short tests
execute `make test`
execute `make test-short`
### Execute integration tests
### Execute long tests
Running the integration tests requires several docker containers to be
running. You can start the containers with:
```
make docker-run
```
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
And run the full test suite with:
```
make test-all
```
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
a simple mock will suffice.
Use `make docker-kill` to stop the containers.
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running

11
Godeps
View File

@@ -4,13 +4,12 @@ github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
@@ -33,7 +32,7 @@ github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
@@ -41,13 +40,11 @@ github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
@@ -65,7 +62,7 @@ github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5

View File

@@ -15,6 +15,7 @@ ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
endif
all:
$(MAKE) deps
$(MAKE) telegraf
@@ -48,17 +49,12 @@ test-all: lint
go test ./...
package:
./scripts/build.py --package --platform=all --arch=all
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
clean:
-rm -f telegraf
-rm -f telegraf.exe
docker-image:
./scripts/build.py --package --platform=linux --arch=amd64
cp build/telegraf*$(COMMIT)*.deb .
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
# Run all docker containers necessary for integration tests
docker-run:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
@@ -86,12 +82,6 @@ docker-run:
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
docker run --name cratedb \
-p "6543:5432" \
-d crate:2.2 \
-Cnetwork.host=0.0.0.0 \
-Ctransport.host=localhost \
-Clicense.enterprise=false
# Run docker containers necessary for integration tests; skipping services provided
# by CircleCI
@@ -119,9 +109,9 @@ docker-run-circle:
docker-kill:
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper cratedb
openldap postgres rabbitmq redis riemann zookeeper
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper cratedb
openldap postgres rabbitmq redis riemann zookeeper
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
package clean docker-run docker-run-circle docker-kill docker-image
package clean docker-run docker-run-circle docker-kill

View File

@@ -5,7 +5,8 @@ and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from local or remote services.
from well known services (like Hadoop, Postgres, or Redis) and third party
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
@@ -51,33 +52,6 @@ which is installed by the Makefile if you don't have it already.
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
### Nightly Builds
These builds are generated from the master branch:
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb)
- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm)
- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb)
- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm)
- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz)
- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz)
- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb)
- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm)
- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz)
- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz)
- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz)
- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz)
- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz)
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz)
## How to use it:
See usage with:
@@ -129,7 +103,6 @@ configuration options.
* [apache](./plugins/inputs/apache)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
* [bond](./plugins/inputs/bond)
* [cassandra](./plugins/inputs/cassandra)
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
@@ -138,7 +111,6 @@ configuration options.
* [conntrack](./plugins/inputs/conntrack)
* [couchbase](./plugins/inputs/couchbase)
* [couchdb](./plugins/inputs/couchdb)
* [DC/OS](./plugins/inputs/dcos)
* [disque](./plugins/inputs/disque)
* [dmcache](./plugins/inputs/dmcache)
* [dns query time](./plugins/inputs/dns_query)
@@ -159,8 +131,7 @@ configuration options.
* [interrupts](./plugins/inputs/interrupts)
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
* [iptables](./plugins/inputs/iptables)
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [jolokia2](./plugins/inputs/jolokia2)
* [jolokia](./plugins/inputs/jolokia)
* [kapacitor](./plugins/inputs/kapacitor)
* [kubernetes](./plugins/inputs/kubernetes)
* [leofs](./plugins/inputs/leofs)
@@ -173,19 +144,15 @@ configuration options.
* [mysql](./plugins/inputs/mysql)
* [net_response](./plugins/inputs/net_response)
* [nginx](./plugins/inputs/nginx)
* [nginx_plus](./plugins/inputs/nginx_plus)
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [openldap](./plugins/inputs/openldap)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [pf](./plugins/inputs/pf)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
* [postfix](./plugins/inputs/postfix)
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [postgresql](./plugins/inputs/postgresql)
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [powerdns](./plugins/inputs/powerdns)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
@@ -197,15 +164,11 @@ configuration options.
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
* [smart](./plugins/inputs/smart)
* [snmp](./plugins/inputs/snmp)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [solr](./plugins/inputs/solr)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [teamspeak](./plugins/inputs/teamspeak)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [unbound](./plugins/input/unbound)
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
@@ -242,9 +205,8 @@ Telegraf can also collect metrics via the following service plugins:
* [filestack](./plugins/inputs/webhooks/filestack)
* [github](./plugins/inputs/webhooks/github)
* [mandrill](./plugins/inputs/webhooks/mandrill)
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [particle](./plugins/inputs/webhooks/particle)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [zipkin](./plugins/inputs/zipkin)
Telegraf is able to parse the following input data formats into metrics, these
@@ -263,7 +225,6 @@ formats may be used with input plugins supporting the `data_format` option:
## Aggregator Plugins
* [basicstats](./plugins/aggregators/basicstats)
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
@@ -274,7 +235,6 @@ formats may be used with input plugins supporting the `data_format` option:
* [amqp](./plugins/outputs/amqp) (rabbitmq)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [cratedb](./plugins/outputs/cratedb)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
* [elasticsearch](./plugins/outputs/elasticsearch)
@@ -294,4 +254,3 @@ formats may be used with input plugins supporting the `data_format` option:
* [socket_writer](./plugins/outputs/socket_writer)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
* [wavefront](./plugins/outputs/wavefront)

View File

@@ -28,18 +28,6 @@ type Accumulator interface {
tags map[string]string,
t ...time.Time)
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
AddSummary(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
AddHistogram(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
SetPrecision(precision, interval time.Duration)
AddError(err error)

View File

@@ -76,28 +76,6 @@ func (ac *accumulator) AddCounter(
}
}
func (ac *accumulator) AddSummary(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddHistogram(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
// AddError passes a runtime error to the accumulator.
// The error will be tagged with the plugin name and written to the log.
func (ac *accumulator) AddError(err error) {

View File

@@ -252,7 +252,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 300)
// create an output metric channel and a gorouting that continuously passes
// create an output metric channel and a gorouting that continously passes
// each metric onto the output plugins & aggregators.
outMetricC := make(chan telegraf.Metric, 100)
var wg sync.WaitGroup
@@ -308,13 +308,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
metrics = processor.Apply(metrics...)
}
for _, m := range metrics {
for i, o := range a.Config.Outputs {
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(m.Copy())
}
}
outMetricC <- m
}
}
}
@@ -370,6 +364,8 @@ func (a *Agent) Run(shutdown chan struct{}) error {
metricC := make(chan telegraf.Metric, 100)
aggC := make(chan telegraf.Metric, 100)
now := time.Now()
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
input.SetDefaultTags(a.Config.Tags)
@@ -410,7 +406,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
acc := NewAccumulator(agg, aggC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
agg.Run(acc, shutdown)
agg.Run(acc, now, shutdown)
}(aggregator)
}

View File

@@ -12,11 +12,11 @@ platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
- IF NOT EXIST "C:\Cache\go1.8.1.msi" curl -o "C:\Cache\go1.8.1.msi" https://storage.googleapis.com/golang/go1.8.1.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
- msiexec.exe /i "C:\Cache\go1.8.1.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version

View File

@@ -6,8 +6,8 @@ machine:
- rabbitmq-server
post:
- sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.9.2.linux-amd64.tar.gz
- wget https://storage.googleapis.com/golang/go1.8.4.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.8.4.linux-amd64.tar.gz
- go version
dependencies:

View File

@@ -55,8 +55,11 @@ var fUsage = flag.String("usage", "",
var fService = flag.String("service", "",
"operate on the service")
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
var (
nextVersion = "1.5.0"
nextVersion = "1.4.0"
version string
commit string
branch string
@@ -265,7 +268,7 @@ func (p *program) Stop(s service.Service) error {
func displayVersion() string {
if version == "" {
return fmt.Sprintf("v%s~%s", nextVersion, commit)
return fmt.Sprintf("v%s~pre%s", nextVersion, commit)
}
return "v" + version
}

View File

@@ -39,11 +39,6 @@ metrics as they pass through Telegraf:
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
Use [measurement filtering](CONFIGURATION.md#measurement-filtering)
to control which metrics are passed through a processor or aggregator. If a
metric is filtered out the metric bypasses the plugin and is passed downstream
to the next plugin.
**Processor** plugins process metrics as they pass through and immediately emit
results based on the values they process. For example, this could be printing
all metrics or adding a tag to all metrics that pass through.

View File

@@ -24,17 +24,11 @@ Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
When using the `.deb` or `.rpm` packages, you can define environment variables
in the `/etc/default/telegraf` file.
## Configuration file locations
The location of the configuration file can be set via the `--config` command
line flag.
When the `--config-directory` command line flag is used files ending with
`.conf` in the specified directory will also be included in the Telegraf
configuration.
line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
the `-config-directory` command line flag is used.
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
@@ -98,13 +92,9 @@ you can configure that here.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the input plugin.
## Output Configuration
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the output plugin.
There are no generic configuration options available for all outputs.
## Aggregator Configuration
@@ -125,10 +115,6 @@ aggregator and will not get sent to the output plugins.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters be used to
limit what metrics are handled by the aggregator. Excluded metrics are passed
downstream to the next aggregator.
## Processor Configuration
The following config parameters are available for all processors:
@@ -136,10 +122,6 @@ The following config parameters are available for all processors:
* **order**: This is the order in which the processor(s) get executed. If this
is not specified then processor execution order will be random.
The [measurement filtering](#measurement-filtering) can parameters may be used
to limit what metrics are handled by the processor. Excluded metrics are
passed downstream to the next processor.
#### Measurement Filtering
Filters can be configured per input, output, processor, or aggregator,
@@ -389,15 +371,3 @@ to the system load metrics due to the `namepass` parameter.
[[outputs.file]]
files = ["stdout"]
```
#### Processor Configuration Examples:
Print only the metrics with `cpu` as the measurement name, all metrics are
passed to the output:
```toml
[[processors.printer]]
namepass = "cpu"
[[outputs.file]]
files = ["/tmp/metrics.out"]
```

View File

@@ -1,46 +0,0 @@
# Frequently Asked Questions
### Q: How can I monitor the Docker Engine Host from within a container?
You will need to setup several volume mounts as well as some environment
variables:
```
docker run --name telegraf
-v /:/hostfs:ro
-v /etc:/hostfs/etc:ro
-v /proc:/hostfs/proc:ro
-v /sys:/hostfs/sys:ro
-v /var/run/utmp:/var/run/utmp:ro
-e HOST_ETC=/hostfs/etc
-e HOST_PROC=/hostfs/proc
-e HOST_SYS=/hostfs/sys
-e HOST_MOUNT_PREFIX=/hostfs
telegraf
```
### Q: Why do I get a "no such host" error resolving hostnames that other
programs can resolve?
Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution).
This resolver behaves differently than the C library functions but is more
efficient when used with the Go runtime.
If you encounter problems or want to use more advanced name resolution methods
that are unsupported by the pure Go resolver, you can switch to the cgo
resolver.
If running manually set:
```
export GODEBUG=netdns=cgo
```
If running as a service add the environment variable to `/etc/default/telegraf`:
```
GODEBUG=netdns=cgo
```
### Q: When will the next version be released?
The latest release date estimate can be viewed on the
[milestones](https://github.com/influxdata/telegraf/milestones) page.

View File

@@ -82,8 +82,6 @@ following works:
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)

View File

@@ -38,7 +38,7 @@ Telegraf can manage its own service through the --service flag:
| `telegraf.exe --service stop` | Stop the telegraf service |
Troubleshooting common error #1067
Trobleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start

View File

@@ -88,8 +88,8 @@
##
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
## The target database for metrics (telegraf will create it if not exists).
database = "telegraf" # required
@@ -151,9 +151,6 @@
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, its value will be used as the routing key
# routing_tag = "host"
# ## Delivery Mode controls if a published message is persistent
# ## Valid options are "transient" and "persistent". default: "transient"
# delivery_mode = "transient"
#
# ## InfluxDB retention policy
# # retention_policy = "default"
@@ -202,19 +199,6 @@
# namespace = "InfluxData/Telegraf"
# # Configuration for CrateDB to send metrics to.
# [[outputs.cratedb]]
# # A github.com/jackc/pgx connection string.
# # See https://godoc.org/github.com/jackc/pgx#ParseDSN
# url = "postgres://user:password@localhost/schema?sslmode=disable"
# # Timeout for all CrateDB queries.
# timeout = "5s"
# # Name of the table to store metrics in.
# table = "metrics"
# # If true, and the metrics table does not exist, create it automatically.
# table_create = true
# # Configuration for DataDog API to send metrics to.
# [[outputs.datadog]]
# ## Datadog API key
@@ -256,21 +240,8 @@
# # %m - month (01..12)
# # %d - day of month (e.g., 01)
# # %H - hour (00..23)
# # %V - week of the year (ISO week) (01..53)
# ## Additionally, you can specify a tag name using the notation {{tag_name}}
# ## which will be used as part of the index name. If the tag does not exist,
# ## the default tag value will be used.
# # index_name = "telegraf-{{host}}-%Y.%m.%d"
# # default_tag_value = "none"
# index_name = "telegraf-%Y.%m.%d" # required.
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Template Config
# ## Set to true if you want telegraf to manage its index template.
# ## If enabled it will create a recommended index template for telegraf indexes
@@ -342,34 +313,6 @@
# brokers = ["localhost:9092"]
# ## Kafka topic for producer messages
# topic = "telegraf"
#
# ## Optional topic suffix configuration.
# ## If the section is omitted, no suffix is used.
# ## Following topic suffix methods are supported:
# ## measurement - suffix equals to separator + measurement's name
# ## tags - suffix equals to separator + specified tags' values
# ## interleaved with separator
#
# ## Suffix equals to "_" + measurement name
# # [outputs.kafka.topic_suffix]
# # method = "measurement"
# # separator = "_"
#
# ## Suffix equals to "__" + measurement's "foo" tag value.
# ## If there's no such a tag, suffix equals to an empty string
# # [outputs.kafka.topic_suffix]
# # method = "tags"
# # keys = ["foo"]
# # separator = "__"
#
# ## Suffix equals to "_" + measurement's "foo" and "bar"
# ## tag values, separated by "_". If there is no such tags,
# ## their values treated as empty strings.
# # [outputs.kafka.topic_suffix]
# # method = "tags"
# # keys = ["foo", "bar"]
# # separator = "_"
#
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, its value will be used as the routing key
# routing_tag = "host"
@@ -440,32 +383,12 @@
#
# ## Kinesis StreamName must exist prior to starting telegraf.
# streamname = "StreamName"
# ## DEPRECATED: PartitionKey as used for sharding data.
# ## PartitionKey as used for sharding data.
# partitionkey = "PartitionKey"
# ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
# ## If set the paritionKey will be a random UUID on every put.
# ## This allows for scaling across multiple shards in a stream.
# ## This will cause issues with ordering.
# use_random_partitionkey = false
# ## The partition key can be calculated using one of several methods:
# ##
# ## Use a static value for all writes:
# # [outputs.kinesis.partition]
# # method = "static"
# # key = "howdy"
# #
# ## Use a random partition key on each write:
# # [outputs.kinesis.partition]
# # method = "random"
# #
# ## Use the measurement name as the partition key:
# # [outputs.kinesis.partition]
# # method = "measurement"
# #
# ## Use the value of a tag for all writes, if the tag is not set the empty
# ## string will be used:
# # [outputs.kinesis.partition]
# # method = "tag"
# # key = "host"
#
#
# ## Data format to output.
@@ -584,9 +507,6 @@
#
# ## Debug true - Prints OpenTSDB communication
# debug = false
#
# ## Separator separates measurement name from field
# separator = "_"
# # Configuration for the Prometheus client to spawn
@@ -596,10 +516,6 @@
#
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
# # expiration_interval = "60s"
#
# ## Collectors to enable, valid entries are "gocollector" and "process".
# ## If unset, both are enabled.
# collectors_exclude = ["gocollector", "process"]
# # Configuration for the Riemann server to send metrics to
@@ -673,46 +589,6 @@
# # data_format = "influx"
# # Configuration for Wavefront server to send metrics to
# [[outputs.wavefront]]
# ## DNS name of the wavefront proxy server
# host = "wavefront.example.com"
#
# ## Port that the Wavefront proxy server listens on
# port = 2878
#
# ## prefix for metrics keys
# #prefix = "my.specific.prefix."
#
# ## whether to use "value" for name of simple fields
# #simple_fields = false
#
# ## character to use between metric and field name. defaults to . (dot)
# #metric_separator = "."
#
# ## Convert metric name paths to use metricSeperator character
# ## When true (default) will convert all _ (underscore) chartacters in final metric name
# #convert_paths = true
#
# ## Use Regex to sanitize metric and tag names from invalid characters
# ## Regex is more thorough, but significantly slower
# #use_regex = false
#
# ## point tags to use as the source name for Wavefront (if none found, host will be used)
# #source_override = ["hostname", "snmp_host", "node_host"]
#
# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
# #convert_bool = true
#
# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for
# ## any metrics beginning with "elasticsearch"
# #[[outputs.wavefront.string_to_number.elasticsearch]]
# # green = 1.0
# # yellow = 0.5
# # red = 0.0
###############################################################################
# PROCESSOR PLUGINS #
@@ -727,16 +603,6 @@
# AGGREGATOR PLUGINS #
###############################################################################
# # Keep the aggregate basicstats of each metric passing through.
# [[aggregators.basicstats]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
# # Create aggregate histograms.
# [[aggregators.histogram]]
# ## The period in which to flush the aggregator.
@@ -895,18 +761,6 @@
# bcacheDevs = ["bcache0"]
# # Collect bond interface status, slaves statuses and failures count
# [[inputs.bond]]
# ## Sets 'proc' directory path
# ## If not specified, then default is /proc
# # host_proc = "/proc"
#
# ## By default, telegraf gather stats for all bond interfaces
# ## Setting interfaces will restrict the stats to the specified
# ## bond interfaces.
# # bond_interfaces = ["bond0"]
# # Read Cassandra metrics through Jolokia
# [[inputs.cassandra]]
# # This is the context root used to compose the jolokia url
@@ -1014,7 +868,7 @@
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
# delay = "5m"
#
# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
# ## gaps or overlap in pulled data
# interval = "5m"
#
@@ -1085,7 +939,7 @@
# ## http://admin:secret@couchbase-0.example.com:8091/
# ##
# ## If no servers are specified, then localhost is used as the host.
# ## If no protocol is specified, HTTP is used.
# ## If no protocol is specifed, HTTP is used.
# ## If no port is specified, 8091 is used.
# servers = ["http://localhost:8091"]
@@ -1097,50 +951,6 @@
# hosts = ["http://localhost:8086/_stats"]
# # Input plugin for DC/OS metrics
# [[inputs.dcos]]
# ## The DC/OS cluster URL.
# cluster_url = "https://dcos-ee-master-1"
#
# ## The ID of the service account.
# service_account_id = "telegraf"
# ## The private key file for the service account.
# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
#
# ## Path containing login token. If set, will read on every gather.
# # token_file = "/home/dcos/.dcos/token"
#
# ## In all filter options if both include and exclude are empty all items
# ## will be collected. Arrays may contain glob patterns.
# ##
# ## Node IDs to collect metrics from. If a node is excluded, no metrics will
# ## be collected for its containers or apps.
# # node_include = []
# # node_exclude = []
# ## Container IDs to collect container metrics from.
# # container_include = []
# # container_exclude = []
# ## Container IDs to collect app metrics from.
# # app_include = []
# # app_exclude = []
#
# ## Maximum concurrent connections to the cluster.
# # max_connections = 10
# ## Maximum time to receive a response from cluster.
# # response_timeout = "20s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## If false, skip chain & host verification
# # insecure_skip_verify = true
#
# ## Recommended filtering to reduce series cardinality.
# # [inputs.dcos.tagdrop]
# # path = ["/var/lib/mesos/slave/slaves/*"]
# # Read metrics from one or many disque servers
# [[inputs.disque]]
# ## An array of URI to gather stats about. Specify an ip or hostname
@@ -1185,9 +995,6 @@
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
# endpoint = "unix:///var/run/docker.sock"
#
# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
# gather_services = false
#
# ## Only collect metrics for these containers, collect all if empty
# container_names = []
#
@@ -1253,21 +1060,10 @@
# ## Set cluster_health to true when you want to also obtain cluster health stats
# cluster_health = false
#
# ## Adjust cluster_health_level when you want to also obtain detailed health stats
# ## The options are
# ## - indices (default)
# ## - cluster
# # cluster_health_level = "indices"
#
# ## Set cluster_stats to true when you want to also obtain cluster stats from the
# ## Master node.
# cluster_stats = false
#
# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
# ## "breakers". Per default, all stats are gathered.
# # node_stats = ["jvm", "http"]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
@@ -1328,7 +1124,7 @@
# ## - only one URI is allowed
# ## - https is not supported
# endpoint = "http://localhost:24220/api/plugins.json"
#
#
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
# exclude = [
# "monitor_agent",
@@ -1548,7 +1344,7 @@
# ##
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
#
# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
# ## gaps or overlap in pulled data
# interval = "30s"
#
@@ -1576,10 +1372,6 @@
# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
# # jolokia2 plugin
# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
#
# ## This is the context root used to compose the jolokia url
# ## NOTE that Jolokia requires a trailing slash at the end of the context root
# ## NOTE that your jolokia security policy must allow for POST requests.
@@ -1642,64 +1434,6 @@
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
# # Read JMX metrics from a Jolokia REST agent endpoint
# [[inputs.jolokia2_agent]]
# # default_tag_prefix = ""
# # default_field_prefix = ""
# # default_field_separator = "."
#
# # Add agents URLs to query
# urls = ["http://localhost:8080/jolokia"]
# # username = ""
# # password = ""
# # response_timeout = "5s"
#
# ## Optional SSL config
# # ssl_ca = "/var/private/ca.pem"
# # ssl_cert = "/var/private/client.pem"
# # ssl_key = "/var/private/client-key.pem"
# # insecure_skip_verify = false
#
# ## Add metrics to read
# [[inputs.jolokia2_agent.metric]]
# name = "java_runtime"
# mbean = "java.lang:type=Runtime"
# paths = ["Uptime"]
# # Read JMX metrics from a Jolokia REST proxy endpoint
# [[inputs.jolokia2_proxy]]
# # default_tag_prefix = ""
# # default_field_prefix = ""
# # default_field_separator = "."
#
# ## Proxy agent
# url = "http://localhost:8080/jolokia"
# # username = ""
# # password = ""
# # response_timeout = "5s"
#
# ## Optional SSL config
# # ssl_ca = "/var/private/ca.pem"
# # ssl_cert = "/var/private/client.pem"
# # ssl_key = "/var/private/client-key.pem"
# # insecure_skip_verify = false
#
# ## Add proxy targets to query
# # default_target_username = ""
# # default_target_password = ""
# [[inputs.jolokia_proxy.target]]
# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# # username = ""
# # password = ""
#
# ## Add metrics to read
# [[inputs.jolokia_proxy.metric]]
# name = "java_runtime"
# mbean = "java.lang:type=Runtime"
# paths = ["Uptime"]
# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
# [[inputs.kapacitor]]
# ## Multiple URLs from which to read Kapacitor-formatted JSON
@@ -1725,9 +1459,6 @@
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
# ## Set response_timeout (default 5 seconds)
# # response_timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
@@ -1962,15 +1693,6 @@
# response_timeout = "5s"
# # Read Nginx Plus' full status information (ngx_http_status_module)
# [[inputs.nginx_plus]]
# ## An array of ngx_http_status_module or status URI to gather stats.
# urls = ["http://localhost/status"]
#
# # HTTP response timeout (default: 5s)
# response_timeout = "5s"
# # Read NSQ topic and channel statistics.
# [[inputs.nsq]]
# ## An array of NSQD HTTP API endpoints
@@ -2016,18 +1738,6 @@
# bind_password = ""
# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
# [[inputs.opensmtpd]]
# ## If running as a restricted user you can prepend sudo for additional access:
# #use_sudo = false
#
# ## The default location of the smtpctl binary can be overridden with:
# binary = "/usr/sbin/smtpctl"
#
# ## The default timeout of 1000ms can be overriden with (in milliseconds):
# timeout = 1000
# # Read metrics of passenger using passenger-status
# [[inputs.passenger]]
# ## Path of passenger-status.
@@ -2041,15 +1751,6 @@
# command = "passenger-status -v --show=xml"
# # Gather counters from PF
# [[inputs.pf]]
# ## PF require root access on most systems.
# ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
# ## Users must configure sudo to allow telegraf user to run pfctl with no password.
# ## pfctl can be restricted to only list command "pfctl -s info".
# use_sudo = false
# # Read metrics of phpfpm, via HTTP status page or socket
# [[inputs.phpfpm]]
# ## An array of addresses to gather stats about. Specify an ip or hostname
@@ -2091,13 +1792,6 @@
# # interface = ""
# # Measure postfix queue statistics
# [[inputs.postfix]]
# ## Postfix queue directory. If not provided, telegraf will try to use
# ## 'postconf -h queue_directory' to determine it.
# # queue_directory = "/var/spool/postfix"
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql]]
# ## specify address via a url matching:
@@ -2200,10 +1894,6 @@
# # pattern = "nginx"
# ## user as argument for pgrep (ie, pgrep -u <user>)
# # user = "nginx"
# ## Systemd unit name
# # systemd_unit = "nginx.service"
# ## CGroup name or path
# # cgroup = "systemd/system.slice/nginx.service"
#
# ## override for process_name
# ## This is optional; default is sourced from /proc/<pid>/status
@@ -2221,9 +1911,6 @@
# ## An array of urls to scrape metrics from.
# urls = ["http://localhost:9100/metrics"]
#
# ## An array of Kubernetes services to scrape metrics from.
# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
@@ -2271,13 +1958,9 @@
# ## Includes connection time, any redirects, and reading the response body.
# # client_timeout = "4s"
#
# ## A list of nodes to gather as the rabbitmq_node measurement. If not
# ## specified, metrics for all nodes are gathered.
# ## A list of nodes to pull metrics about. If not specified, metrics for
# ## all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"]
#
# ## A list of queues to gather as the rabbitmq_queue measurement. If not
# ## specified, metrics for all queues are gathered.
# # queues = ["telegraf"]
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
@@ -2351,40 +2034,6 @@
# # remove_numbers = true
# # Read metrics from storage devices supporting S.M.A.R.T.
# [[inputs.smart]]
# ## Optionally specify the path to the smartctl executable
# # path = "/usr/bin/smartctl"
# #
# ## On most platforms smartctl requires root access.
# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
# ## Sudo must be configured to to allow the telegraf user to run smartctl
# ## with out password.
# # use_sudo = false
# #
# ## Skip checking disks in this power mode. Defaults to
# ## "standby" to not wake up disks that have stoped rotating.
# ## See --nocheck in the man pages for smartctl.
# ## smartctl version 5.41 and 5.42 have faulty detection of
# ## power mode and might require changing this value to
# ## "never" depending on your disks.
# # nocheck = "standby"
# #
# ## Gather detailed metrics for each SMART Attribute.
# ## Defaults to "false"
# ##
# # attributes = false
# #
# ## Optionally specify devices to exclude from reporting.
# # excludes = [ "/dev/pass6" ]
# #
# ## Optionally specify devices and device type, if unset
# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
# ## done and all found will be included except for the
# ## excluded in excludes.
# # devices = [ "/dev/ada0 -d atacam" ]
# # Retrieves SNMP values from remote agents
# [[inputs.snmp]]
# agents = [ "127.0.0.1:161" ]
@@ -2548,15 +2197,6 @@
# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
# # Read stats from one or more Solr servers or cores
# [[inputs.solr]]
# ## specify a list of one or more Solr servers
# servers = ["http://localhost:8983"]
#
# ## specify a list of one or more Solr cores (default - all)
# # cores = ["main"]
# # Read metrics from Microsoft SQL Server
# [[inputs.sqlserver]]
# ## Specify instances to monitor with a list of connection strings.
@@ -2601,7 +2241,7 @@
# #
# #
# ## Options for the sadf command. The values on the left represent the sadf
# ## options and the values on the right their description (which are used for
# ## options and the values on the right their description (wich are used for
# ## grouping and prefixing metrics).
# ##
# ## Run 'sar -h' or 'man sar' to find out the supported options for your
@@ -2632,18 +2272,6 @@
# # vg = "rootvg"
# # Reads metrics from a Teamspeak 3 Server via ServerQuery
# [[inputs.teamspeak]]
# ## Server address for Teamspeak 3 ServerQuery
# # server = "127.0.0.1:10011"
# ## Username for ServerQuery
# username = "serverqueryuser"
# ## Password for ServerQuery
# password = "secret"
# ## Array of virtual servers
# # virtual_servers = [1]
# # Gather metrics from the Tomcat server status page.
# [[inputs.tomcat]]
# ## URL of the Tomcat server status
@@ -2678,21 +2306,6 @@
# pools = ["redis_pool", "mc_pool"]
# # A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver
# [[inputs.unbound]]
# ## If running as a restricted user you can prepend sudo for additional access:
# #use_sudo = false
#
# ## The default location of the unbound-control binary can be overridden with:
# binary = "/usr/sbin/unbound-control"
#
# ## The default timeout of 1s can be overriden with:
# timeout = "1s"
#
# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields
# fieldpass = ["total_*", "num_*","time_up", "mem_*"]
# # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]]
# ## If running as a restricted user you can prepend sudo for additional access:
@@ -2706,10 +2319,6 @@
# ## Glob matching can be used, ie, stats = ["MAIN.*"]
# ## stats may also be set to ["*"], which will collect all stats
# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
#
# ## Optional name for the varnish instance (or working directory) to query
# ## Usually appened after -n in varnish cli
# #name = instanceName
# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
@@ -2791,14 +2400,6 @@
# ## Maximum line size allowed to be sent in bytes.
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0
#
# ## Set one or more allowed client CA certificate file names to
# ## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#
# ## Add service certificate and key
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
# # Read metrics from Kafka topic(s)
@@ -2874,9 +2475,6 @@
# ## be read from the beginning.
# from_beginning = false
#
# ## Method used to watch for file updates. Can be either "inotify" or "poll".
# # watch_method = "inotify"
#
# ## Parse logstash-style "grok" patterns:
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
# [inputs.logparser.grok]
@@ -2912,10 +2510,7 @@
# # Read metrics from MQTT topic(s)
# [[inputs.mqtt_consumer]]
# ## MQTT broker URLs to be used. The format should be scheme://host:port,
# ## schema can be tcp, ssl, or ws.
# servers = ["tcp://localhost:1883"]
#
# servers = ["localhost:1883"]
# ## MQTT QoS, must be 0, 1, or 2
# qos = 0
# ## Connection timeout for initial connection in seconds
@@ -2978,12 +2573,8 @@
# # Read NSQ topic for metrics.
# [[inputs.nsq_consumer]]
# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
# # server = "localhost:4150"
# ## An array representing the NSQD TCP HTTP Endpoints
# nsqd = ["localhost:4150"]
# ## An array representing the NSQLookupd HTTP Endpoints
# nsqlookupd = ["localhost:4161"]
# ## An string representing the NSQD TCP Endpoint
# server = "localhost:4150"
# topic = "telegraf"
# channel = "consumer"
# max_in_flight = 100
@@ -3040,7 +2631,7 @@
# # Statsd UDP/TCP Server
# [[inputs.statsd]]
# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
# ## Protocol, must be "tcp" or "udp" (default=udp)
# protocol = "udp"
#
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
@@ -3104,9 +2695,6 @@
# ## Whether file is a named pipe
# pipe = false
#
# ## Method used to watch for file updates. Can be either "inotify" or "poll".
# # watch_method = "inotify"
#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@@ -3148,9 +2736,6 @@
#
# [inputs.webhooks.papertrail]
# path = "/papertrail"
#
# [inputs.webhooks.particle]
# path = "/particle"
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.

View File

@@ -63,8 +63,8 @@
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".

View File

@@ -40,11 +40,6 @@ var (
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\w+`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
// Config specifies the URL/user/password for the database that telegraf
@@ -131,7 +126,7 @@ type AgentConfig struct {
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatibility
// compatability
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode
@@ -688,17 +683,12 @@ func (c *Config) LoadConfig(path string) error {
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatibility only.
// this is for Windows compatability only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
// parseFile loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
@@ -712,9 +702,8 @@ func parseFile(fpath string) (*ast.Table, error) {
env_vars := envVarRe.FindAll(contents, -1)
for _, env_var := range env_vars {
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
if ok {
env_val = escapeEnv(env_val)
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
if env_val != "" {
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
}
}

View File

@@ -40,7 +40,6 @@ func TestSnakeCase(t *testing.T) {
var (
sleepbin, _ = exec.LookPath("sleep")
echobin, _ = exec.LookPath("echo")
shell, _ = exec.LookPath("sh")
)
func TestRunTimeout(t *testing.T) {
@@ -85,13 +84,13 @@ func TestCombinedOutput(t *testing.T) {
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
// the same output from a failed command.
func TestCombinedOutputError(t *testing.T) {
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
cmd := exec.Command(shell, "-c", "false")
cmd := exec.Command(sleepbin, "foo")
expected, err := cmd.CombinedOutput()
cmd2 := exec.Command(shell, "-c", "false")
cmd2 := exec.Command(sleepbin, "foo")
actual, err := CombinedOutputTimeout(cmd2, time.Second)
assert.Error(t, err)
@@ -99,10 +98,10 @@ func TestCombinedOutputError(t *testing.T) {
}
func TestRunError(t *testing.T) {
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
cmd := exec.Command(shell, "-c", "false")
cmd := exec.Command(sleepbin, "foo")
err := RunTimeout(cmd, time.Second)
assert.Error(t, err)

View File

@@ -114,6 +114,7 @@ func (r *RunningAggregator) reset() {
// for period ticks to tell it when to push and reset the aggregator.
func (r *RunningAggregator) Run(
acc telegraf.Accumulator,
now time.Time,
shutdown chan struct{},
) {
// The start of the period is truncated to the nearest second.
@@ -132,7 +133,6 @@ func (r *RunningAggregator) Run(
// 2nd interval: 00:10 - 00:20.5
// etc.
//
now := time.Now()
r.periodStart = now.Truncate(time.Second)
truncation := now.Sub(r.periodStart)
r.periodEnd = r.periodStart.Add(r.Config.Period)

View File

@@ -24,7 +24,7 @@ func TestAdd(t *testing.T) {
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
go ra.Run(&acc, time.Now(), make(chan struct{}))
m := ra.MakeMetric(
"RITest",
@@ -55,7 +55,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
go ra.Run(&acc, time.Now(), make(chan struct{}))
// metric before current period
m := ra.MakeMetric(
@@ -113,7 +113,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
ra.Run(&acc, shutdown)
ra.Run(&acc, time.Now(), shutdown)
}()
m := ra.MakeMetric(

View File

@@ -13,8 +13,6 @@ const (
Counter
Gauge
Untyped
Summary
Histogram
)
type Metric interface {

View File

@@ -326,9 +326,7 @@ func scanTagsValue(buf []byte, i int) (int, int, error) {
func scanFields(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
// track how many '"" we've seen since last '='
quotes := 0
quoted := false
// tracks how many '=' we've seen
equals := 0
@@ -352,17 +350,13 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
// Only quote values in the field value since quotes are not significant
// in the field key
if buf[i] == '"' && equals > commas {
quoted = !quoted
i++
quotes++
if quotes > 2 {
break
}
continue
}
// If we see an =, ensure that there is at least on char before and after it
if buf[i] == '=' && quotes != 1 {
quotes = 0
if buf[i] == '=' && !quoted {
equals++
// check for "... =123" but allow "a\ =123"
@@ -404,18 +398,18 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
}
}
if buf[i] == ',' && quotes != 1 {
if buf[i] == ',' && !quoted {
commas++
}
// reached end of block?
if buf[i] == ' ' && quotes != 1 {
if buf[i] == ' ' && !quoted {
break
}
i++
}
if quotes != 0 && quotes != 2 {
if quoted {
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
}
@@ -653,7 +647,7 @@ func skipWhitespace(buf []byte, i int) int {
}
// makeError is a helper function for making a metric parsing error.
// reason is the reason why the error occurred.
// reason is the reason that the error occured.
// buf should be the current buffer we are parsing.
// i is the current index, to give some context on where in the buffer we are.
func makeError(reason string, buf []byte, i int) error {

View File

@@ -181,7 +181,7 @@ func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
}
}
// Regression test for when a metric requires to be split and one of the
// Regresssion test for when a metric requires to be split and one of the
// split metrics is larger than the buffer.
//
// Previously the metric index would be set incorrectly causing a panic.
@@ -218,7 +218,7 @@ func TestMetricReader_SplitOverflowOversized(t *testing.T) {
}
}
// Regression test for when a split metric exactly fits in the buffer.
// Regresssion test for when a split metric exactly fits in the buffer.
//
// Previously the metric would be overflow split when not required.
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {

View File

@@ -1,7 +1,6 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
)

View File

@@ -1,43 +0,0 @@
# BasicStats Aggregator Plugin
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
emitting the aggregate every `period` seconds.
### Configuration:
```toml
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
```
### Measurements & Fields:
- measurement1
- field1_count
- field1_max
- field1_min
- field1_mean
- field1_s2 (variance)
- field1_stdev (standard deviation)
### Tags:
No tags are applied by this aggregator.
### Example Output:
```
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
```

View File

@@ -1,155 +0,0 @@
package basicstats
import (
"math"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type BasicStats struct {
cache map[uint64]aggregate
}
func NewBasicStats() telegraf.Aggregator {
mm := &BasicStats{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]basicstats
name string
tags map[string]string
}
type basicstats struct {
count float64
min float64
max float64
mean float64
M2 float64 //intermedia value for variance/stdev
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *BasicStats) SampleConfig() string {
return sampleConfig
}
func (m *BasicStats) Description() string {
return "Keep the aggregate basicstats of each metric passing through."
}
func (m *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]basicstats),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
continue
}
tmp := m.cache[id].fields[k]
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
//variable initialization
x := fv
mean := tmp.mean
M2 := tmp.M2
//counter compute
n := tmp.count + 1
tmp.count = n
//mean compute
delta := x - mean
mean = mean + delta/n
tmp.mean = mean
//variance/stdev compute
M2 = M2 + delta*(x-mean)
tmp.M2 = M2
//max/min compute
if fv < tmp.min {
tmp.min = fv
} else if fv > tmp.max {
tmp.max = fv
}
//store final data
m.cache[id].fields[k] = tmp
}
}
}
}
func (m *BasicStats) Push(acc telegraf.Accumulator) {
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
fields[k+"_count"] = v.count
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
fields[k+"_mean"] = v.mean
//v.count always >=1
if v.count > 1 {
variance := v.M2 / (v.count - 1)
fields[k+"_s2"] = variance
fields[k+"_stdev"] = math.Sqrt(variance)
}
//if count == 1 StdDev = infinite => so I won't send data
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (m *BasicStats) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
})
}

View File

@@ -1,151 +0,0 @@
package basicstats
import (
"math"
"testing"
"time"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": float64(2),
"d": float64(2),
},
time.Now(),
)
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": float64(4),
"d": float64(6),
"e": float64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"b_count": float64(2), //b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), //c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"d_count": float64(2), //d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(1),
"b_min": float64(1),
"b_mean": float64(1),
"c_count": float64(1), //c
"c_max": float64(2),
"c_min": float64(2),
"c_mean": float64(2),
"d_count": float64(1), //d
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(3),
"b_min": float64(3),
"b_mean": float64(3),
"c_count": float64(1), //c
"c_max": float64(4),
"c_min": float64(4),
"c_mean": float64(4),
"d_count": float64(1), //d
"d_max": float64(6),
"d_min": float64(6),
"d_mean": float64(6),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View File

@@ -6,38 +6,31 @@ additional information can be found.
### Configuration:
This section contains the default TOML to configure the plugin. You can
generate it using `telegraf --usage <plugin-name>`.
```toml
# Description
[[inputs.example]]
example_option = "example_value"
# SampleConfig
```
### Metrics:
### Measurements & Fields:
Here you should add an optional description and links to where the user can
get more information about the measurements.
If the output is determined dynamically based on the input source, or there
are more metrics than can reasonably be listed, describe how the input is
mapped to the output.
- measurement1
- tags:
- tag1 (optional description)
- tag2
- fields:
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- tags:
- tag3
- fields:
- field3 (integer, bytes)
### Tags:
- All measurements have the following tags:
- tag1 (optional description)
- tag2
- measurement2 has the following tags:
- tag3
### Sample Queries:
This section should contain some useful InfluxDB queries that can be used to
@@ -51,10 +44,6 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
### Example Output:
This section shows example output in Line Protocol format. You can often use
`telegraf --input-filter <plugin-name> --test` or use the `file` output to get
this information.
```
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455

View File

@@ -5,7 +5,6 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
@@ -15,7 +14,6 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
_ "github.com/influxdata/telegraf/plugins/inputs/dcos"
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
@@ -38,7 +36,6 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
@@ -56,18 +53,14 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
_ "github.com/influxdata/telegraf/plugins/inputs/postfix"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
@@ -81,23 +74,19 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/smart"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/unbound"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"

View File

@@ -1,85 +0,0 @@
# Bond Input Plugin
The Bond Input plugin collects network bond interface status, bond's slaves interfaces
status and failures count of bond's slaves interfaces.
The plugin collects these metrics from `/proc/net/bonding/*` files.
### Configuration:
```toml
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
```
### Measurements & Fields:
- bond
- active_slave (for active-backup mode)
- status
- bond_slave
- failures
- status
### Description:
```
active_slave
Currently active slave interface for active-backup mode.
status
Status of bond interface or bonds's slave interface (down = 0, up = 1).
failures
Amount of failures for bond's slave interface.
```
### Tags:
- bond
- bond
- bond_slave
- bond
- interface
### Example output:
Configuration:
```
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
bond_interfaces = ["bond0", "bond1"]
```
Run:
```
telegraf --config telegraf.conf --input-filter bond --test
```
Output:
```
* Plugin: inputs.bond, Collection 1
> bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000
> bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000
> bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000
> bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000
```

View File

@@ -1,204 +0,0 @@
package bond
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// default host proc path
const defaultHostProc = "/proc"
// env host proc variable name
const envProc = "HOST_PROC"
type Bond struct {
HostProc string `toml:"host_proc"`
BondInterfaces []string `toml:"bond_interfaces"`
}
var sampleConfig = `
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
`
func (bond *Bond) Description() string {
return "Collect bond interface status, slaves statuses and failures count"
}
func (bond *Bond) SampleConfig() string {
return sampleConfig
}
func (bond *Bond) Gather(acc telegraf.Accumulator) error {
// load proc path, get default value if config value and env variable are empty
bond.loadPath()
// list bond interfaces from bonding directory or gather all interfaces.
bondNames, err := bond.listInterfaces()
if err != nil {
return err
}
for _, bondName := range bondNames {
bondAbsPath := bond.HostProc + "/net/bonding/" + bondName
file, err := ioutil.ReadFile(bondAbsPath)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err))
continue
}
rawFile := strings.TrimSpace(string(file))
err = bond.gatherBondInterface(bondName, rawFile, acc)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err))
}
}
return nil
}
func (bond *Bond) gatherBondInterface(bondName string, rawFile string, acc telegraf.Accumulator) error {
splitIndex := strings.Index(rawFile, "Slave Interface:")
if splitIndex == -1 {
splitIndex = len(rawFile)
}
bondPart := rawFile[:splitIndex]
slavePart := rawFile[splitIndex:]
err := bond.gatherBondPart(bondName, bondPart, acc)
if err != nil {
return err
}
err = bond.gatherSlavePart(bondName, slavePart, acc)
if err != nil {
return err
}
return nil
}
func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.Accumulator) error {
fields := make(map[string]interface{})
tags := map[string]string{
"bond": bondName,
}
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Currently Active Slave") {
fields["active_slave"] = value
}
if strings.Contains(name, "MII Status") {
fields["status"] = 0
if value == "up" {
fields["status"] = 1
}
acc.AddFields("bond", fields, tags)
return nil
}
}
if err := scanner.Err(); err != nil {
return err
}
return fmt.Errorf("Couldn't find status info for '%s' ", bondName)
}
func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error {
var slave string
var status int
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Slave Interface") {
slave = value
}
if strings.Contains(name, "MII Status") {
status = 0
if value == "up" {
status = 1
}
}
if strings.Contains(name, "Link Failure Count") {
count, err := strconv.Atoi(value)
if err != nil {
return err
}
fields := map[string]interface{}{
"status": status,
"failures": count,
}
tags := map[string]string{
"bond": bondName,
"interface": slave,
}
acc.AddFields("bond_slave", fields, tags)
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
// loadPath can be used to read path firstly from config
// if it is empty then try read from env variable
func (bond *Bond) loadPath() {
if bond.HostProc == "" {
bond.HostProc = proc(envProc, defaultHostProc)
}
}
// proc can be used to read file paths from env
func proc(env, path string) string {
// try to read full file path
if p := os.Getenv(env); p != "" {
return p
}
// return default path
return path
}
func (bond *Bond) listInterfaces() ([]string, error) {
var interfaces []string
if len(bond.BondInterfaces) > 0 {
interfaces = bond.BondInterfaces
} else {
paths, err := filepath.Glob(bond.HostProc + "/net/bonding/*")
if err != nil {
return nil, err
}
for _, p := range paths {
interfaces = append(interfaces, filepath.Base(p))
}
}
return interfaces, nil
}
func init() {
inputs.Add("bond", func() telegraf.Input {
return &Bond{}
})
}

View File

@@ -1,77 +0,0 @@
package bond
import (
"testing"
"github.com/influxdata/telegraf/testutil"
)
var sampleTest802 = `
Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008)
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
Transmit Hash Policy: layer2 (0)
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
802.3ad info
LACP rate: fast
Aggregator selection policy (ad_select): stable
bond bond0 has no active aggregator
Slave Interface: eth1
MII Status: up
Link Failure Count: 0
Permanent HW addr: 00:0c:29:f5:b7:11
Aggregator ID: N/A
Slave Interface: eth2
MII Status: up
Link Failure Count: 3
Permanent HW addr: 00:0c:29:f5:b7:1b
Aggregator ID: N/A
`
var sampleTestAB = `
Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: eth2 (primary_reselect always)
Currently Active Slave: eth2
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth3
MII Status: down
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 2
Permanent HW addr:
Slave queue ID: 0
Slave Interface: eth2
MII Status: up
Speed: 100 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr:
`
func TestGatherBondInterface(t *testing.T) {
var acc testutil.Accumulator
bond := &Bond{}
bond.gatherBondInterface("bond802", sampleTest802, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"})
bond.gatherBondInterface("bondAB", sampleTestAB, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"})
}

View File

@@ -92,7 +92,7 @@ func (c *CloudWatch) SampleConfig() string {
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = "5m"
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"

View File

@@ -21,7 +21,7 @@ var sampleConfig = `
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
## If no protocol is specified, HTTP is used.
## If no protocol is specifed, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
`

View File

@@ -1,209 +0,0 @@
# DC/OS Input Plugin
This input plugin gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/).
**Series Cardinality Warning**
Depending on the work load of your DC/OS cluster, this plugin can quickly
create a high number of series which, when unchecked, can cause high load on
your database.
- Use [measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) liberally to exclude unneeded metrics as well as the node, container, and app inclue/exclude options.
- Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#retention-policy-rp).
- Limit the number of series allowed in your database using the `max-series-per-database` and `max-values-per-tag` settings.
- Consider enabling the [TSI](https://docs.influxdata.com/influxdb/v1.3/about_the_project/releasenotes-changelog/#release-notes-8) engine.
- Monitor your [series cardinality](https://docs.influxdata.com/influxdb/v1.3/troubleshooting/frequently-asked-questions/#how-can-i-query-for-series-cardinality).
### Configuration:
```toml
[[inputs.dcos]]
## The DC/OS cluster URL.
cluster_url = "https://dcos-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]
```
#### Enterprise Authentication
When using Enterprise DC/OS, it is recommended to use a service account to
authenticate with the cluster.
The plugin requires the following permissions:
```
dcos:adminrouter:ops:system-metrics full
dcos:adminrouter:ops:mesos full
```
Follow the directions to [create a service account and assign permissions](https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/).
Quick configuration using the Enterprise CLI:
```
dcos security org service-accounts keypair telegraf-sa-key.pem telegraf-sa-cert.pem
dcos security org service-accounts create -p telegraf-sa-cert.pem -d "Telegraf DC/OS input plugin" telegraf
dcos security org users grant telegraf dcos:adminrouter:ops:system-metrics full
dcos security org users grant telegraf dcos:adminrouter:ops:mesos full
```
#### Open Source Authentication
The Open Source DC/OS does not provide service accounts. Instead you can use
of the following options:
1. [Disable authentication](https://dcos.io/docs/1.10/security/managing-authentication/#authentication-opt-out)
2. Use the `token_file` parameter to read a authentication token from a file.
Then `token_file` can be set by using the [dcos cli] to login periodically.
The cli can login for at most XXX days, you will need to ensure the cli
performs a new login before this time expires.
```
dcos auth login --username foo --password bar
dcos config show core.dcos_acs_token > ~/.dcos/token
```
Another option to create a `token_file` is to generate a token using the
cluster secret. This will allow you to set the expiration date manually or
even create a never expiring token. However, if the cluster secret or the
token is compromised it cannot be revoked and may require a full reinstall of
the cluster. For more information on this technique reference
[this blog post](https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add).
### Metrics:
Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/)
for details on interprete field interpretation.
- dcos_node
- tags:
- cluster
- hostname
- path (filesystem fields only)
- interface (network fields only)
- fields:
- system_uptime (float)
- cpu_cores (float)
- cpu_total (float)
- cpu_user (float)
- cpu_system (float)
- cpu_idle (float)
- cpu_wait (float)
- load_1min (float)
- load_5min (float)
- load_15min (float)
- filesystem_capacity_total_bytes (int)
- filesystem_capacity_used_bytes (int)
- filesystem_capacity_free_bytes (int)
- filesystem_inode_total (float)
- filesystem_inode_used (float)
- filesystem_inode_free (float)
- memory_total_bytes (int)
- memory_free_bytes (int)
- memory_buffers_bytes (int)
- memory_cached_bytes (int)
- swap_total_bytes (int)
- swap_free_bytes (int)
- swap_used_bytes (int)
- network_in_bytes (int)
- network_out_bytes (int)
- network_in_packets (float)
- network_out_packets (float)
- network_in_dropped (float)
- network_out_dropped (float)
- network_in_errors (float)
- network_out_errors (float)
- process_count (float)
- dcos_container
- tags:
- cluster
- hostname
- container_id
- task_name
- fields:
- cpus_limit (float)
- cpus_system_time (float)
- cpus_throttled_time (float)
- cpus_user_time (float)
- disk_limit_bytes (int)
- disk_used_bytes (int)
- mem_limit_bytes (int)
- mem_total_bytes (int)
- net_rx_bytes (int)
- net_rx_dropped (float)
- net_rx_errors (float)
- net_rx_packets (float)
- net_tx_bytes (int)
- net_tx_dropped (float)
- net_tx_errors (float)
- net_tx_packets (float)
- dcos_app
- tags:
- cluster
- hostname
- container_id
- task_name
- fields:
- fields are application specific
### Example Output:
```
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/boot filesystem_capacity_free_bytes=918188032i,filesystem_capacity_total_bytes=1063256064i,filesystem_capacity_used_bytes=145068032i,filesystem_inode_free=523958,filesystem_inode_total=524288,filesystem_inode_used=330 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=dummy0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=docker0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18 cpu_cores=2,cpu_idle=81.62,cpu_system=4.19,cpu_total=13.670000000000002,cpu_user=9.48,cpu_wait=0,load_15min=0.7,load_1min=0.22,load_5min=0.6,memory_buffers_bytes=970752i,memory_cached_bytes=1830473728i,memory_free_bytes=1178636288i,memory_total_bytes=3975073792i,process_count=198,swap_free_bytes=859828224i,swap_total_bytes=859828224i,swap_used_bytes=0i,system_uptime=18874 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=lo network_in_bytes=1090992450i,network_in_dropped=0,network_in_errors=0,network_in_packets=1546938,network_out_bytes=1090992450i,network_out_dropped=0,network_out_errors=0,network_out_packets=1546938 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/ filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=minuteman network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=eth0 network_in_bytes=539886216i,network_in_dropped=1,network_in_errors=0,network_in_packets=979808,network_out_bytes=112395836i,network_out_dropped=0,network_out_errors=0,network_out_packets=891239 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=spartan network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/overlay filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=vtep1024 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/plugins filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=d-dcos network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=9a78d34a-3bbf-467e-81cf-a57737f154ee,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 cpus_limit=0.3,cpus_system_time=307.31,cpus_throttled_time=102.029930607,cpus_user_time=268.57,disk_limit_bytes=268435456i,disk_used_bytes=30953472i,mem_limit_bytes=570425344i,mem_total_bytes=13316096i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18,task_name=hello-world cpus_limit=0.6,cpus_system_time=25.6,cpus_throttled_time=327.977109217,cpus_user_time=566.54,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=1107296256i,mem_total_bytes=335941632i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=c76e1488-4fb7-4010-a4cf-25725f8173f9,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=cbe0b2f9-061f-44ac-8f15-4844229e8231,hostname=192.168.122.18,task_name=telegraf cpus_limit=0.2,cpus_system_time=8.109999999,cpus_throttled_time=93.183916045,cpus_user_time=17.97,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=167772160i,mem_total_bytes=0i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 cpus_limit=0.2,cpus_system_time=2.69,cpus_throttled_time=20.064861214,cpus_user_time=6.56,disk_limit_bytes=268435456i,disk_used_bytes=29360128i,mem_limit_bytes=297795584i,mem_total_bytes=13733888i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
```

View File

@@ -1,332 +0,0 @@
package dcos
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
jwt "github.com/dgrijalva/jwt-go"
)
const (
// How long to stayed logged in for
loginDuration = 65 * time.Minute
)
// Client is an interface for communicating with the DC/OS API.
type Client interface {
SetToken(token string)
Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
GetSummary(ctx context.Context) (*Summary, error)
GetContainers(ctx context.Context, node string) ([]Container, error)
GetNodeMetrics(ctx context.Context, node string) (*Metrics, error)
GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error)
GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error)
}
type APIError struct {
StatusCode int
Title string
Description string
}
// Login is request data for logging in.
type Login struct {
UID string `json:"uid"`
Exp int64 `json:"exp"`
Token string `json:"token"`
}
// LoginError is the response when login fails.
type LoginError struct {
Title string `json:"title"`
Description string `json:"description"`
}
// LoginAuth is the response to a successful login.
type LoginAuth struct {
Token string `json:"token"`
}
// Slave is a node in the cluster.
type Slave struct {
ID string `json:"id"`
}
// Summary provides high level cluster wide information.
type Summary struct {
Cluster string
Slaves []Slave
}
// Container is a container on a node.
type Container struct {
ID string
}
type DataPoint struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Unit string `json:"unit"`
Value float64 `json:"value"`
}
// Metrics are the DCOS metrics
type Metrics struct {
Datapoints []DataPoint `json:"datapoints"`
Dimensions map[string]interface{} `json:"dimensions"`
}
// AuthToken is the authentication token.
type AuthToken struct {
Text string
Expire time.Time
}
// ClusterClient is a Client that uses the cluster URL.
type ClusterClient struct {
clusterURL *url.URL
httpClient *http.Client
credentials *Credentials
token string
semaphore chan struct{}
}
type claims struct {
UID string `json:"uid"`
jwt.StandardClaims
}
func (e APIError) Error() string {
if e.Description != "" {
return fmt.Sprintf("%s: %s", e.Title, e.Description)
}
return e.Title
}
func NewClusterClient(
clusterURL *url.URL,
timeout time.Duration,
maxConns int,
tlsConfig *tls.Config,
) *ClusterClient {
httpClient := &http.Client{
Transport: &http.Transport{
MaxIdleConns: maxConns,
TLSClientConfig: tlsConfig,
},
Timeout: timeout,
}
semaphore := make(chan struct{}, maxConns)
c := &ClusterClient{
clusterURL: clusterURL,
httpClient: httpClient,
semaphore: semaphore,
}
return c
}
func (c *ClusterClient) SetToken(token string) {
c.token = token
}
func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
token, err := c.createLoginToken(sa)
if err != nil {
return nil, err
}
exp := time.Now().Add(loginDuration)
body := &Login{
UID: sa.AccountID,
Exp: exp.Unix(),
Token: token,
}
octets, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", c.url("/acs/api/v1/auth/login"), bytes.NewBuffer(octets))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
req = req.WithContext(ctx)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
auth := &LoginAuth{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(auth)
if err != nil {
return nil, err
}
token := &AuthToken{
Text: auth.Token,
Expire: exp,
}
return token, nil
}
loginError := &LoginError{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(loginError)
if err != nil {
err := &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
}
return nil, err
}
err = &APIError{
StatusCode: resp.StatusCode,
Title: loginError.Title,
Description: loginError.Description,
}
return nil, err
}
func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) {
summary := &Summary{}
err := c.doGet(ctx, c.url("/mesos/master/state-summary"), summary)
if err != nil {
return nil, err
}
return summary, nil
}
func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
list := []string{}
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node)
err := c.doGet(ctx, c.url(path), &list)
if err != nil {
return nil, err
}
containers := make([]Container, 0, len(list))
for _, c := range list {
containers = append(containers, Container{ID: c})
}
return containers, nil
}
func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, error) {
metrics := &Metrics{}
err := c.doGet(ctx, url, metrics)
if err != nil {
return nil, err
}
return metrics, nil
}
func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node)
return c.getMetrics(ctx, c.url(path))
}
func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container)
return c.getMetrics(ctx, c.url(path))
}
func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container)
return c.getMetrics(ctx, c.url(path))
}
func createGetRequest(url string, token string) (*http.Request, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
if token != "" {
req.Header.Add("Authorization", "token="+token)
}
req.Header.Add("Accept", "application/json")
return req, nil
}
func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) error {
req, err := createGetRequest(url, c.token)
if err != nil {
return err
}
select {
case c.semaphore <- struct{}{}:
break
case <-ctx.Done():
return ctx.Err()
}
resp, err := c.httpClient.Do(req.WithContext(ctx))
if err != nil {
<-c.semaphore
return err
}
defer func() {
resp.Body.Close()
<-c.semaphore
}()
// Clear invalid token if unauthorized
if resp.StatusCode == http.StatusUnauthorized {
c.token = ""
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
}
}
if resp.StatusCode == http.StatusNoContent {
return nil
}
err = json.NewDecoder(resp.Body).Decode(v)
return err
}
func (c *ClusterClient) url(path string) string {
url := c.clusterURL
url.Path = path
return url.String()
}
func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{
UID: sa.AccountID,
StandardClaims: jwt.StandardClaims{
// How long we have to login with this token
ExpiresAt: time.Now().Add(5 * time.Minute).Unix(),
},
})
return token.SignedString(sa.PrivateKey)
}

View File

@@ -1,232 +0,0 @@
package dcos
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
jwt "github.com/dgrijalva/jwt-go"
"github.com/stretchr/testify/require"
)
const (
privateKey = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
-----END RSA PRIVATE KEY-----`
)
func TestLogin(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedError error
expectedToken string
}{
{
name: "Login successful",
responseCode: 200,
responseBody: `{"token": "XXX.YYY.ZZZ"}`,
expectedError: nil,
expectedToken: "XXX.YYY.ZZZ",
},
{
name: "Unauthorized Error",
responseCode: http.StatusUnauthorized,
responseBody: `{"title": "x", "description": "y"}`,
expectedError: &APIError{http.StatusUnauthorized, "x", "y"},
expectedToken: "",
},
}
key, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(privateKey))
require.NoError(t, err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
sa := &ServiceAccount{
AccountID: "telegraf",
PrivateKey: key,
}
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
auth, err := client.Login(ctx, sa)
require.Equal(t, tt.expectedError, err)
if tt.expectedToken != "" {
require.Equal(t, tt.expectedToken, auth.Text)
} else {
require.Nil(t, auth)
}
ts.Close()
})
}
}
func TestGetSummary(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Summary
expectedError error
}{
{
name: "No nodes",
responseCode: 200,
responseBody: `{"cluster": "a", "slaves": []}`,
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
expectedError: nil,
},
{
name: "Unauthorized Error",
responseCode: http.StatusUnauthorized,
responseBody: `<html></html>`,
expectedValue: nil,
expectedError: &APIError{StatusCode: http.StatusUnauthorized, Title: "401 Unauthorized"},
},
{
name: "Has nodes",
responseCode: 200,
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
expectedValue: &Summary{
Cluster: "a",
Slaves: []Slave{
Slave{ID: "a"},
Slave{ID: "b"},
},
},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
summary, err := client.GetSummary(ctx)
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, summary)
ts.Close()
})
}
}
func TestGetNodeMetrics(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Metrics
expectedError error
}{
{
name: "Empty Body",
responseCode: 200,
responseBody: `{}`,
expectedValue: &Metrics{},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
m, err := client.GetNodeMetrics(ctx, "foo")
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, m)
ts.Close()
})
}
}
func TestGetContainerMetrics(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Metrics
expectedError error
}{
{
name: "204 No Contents",
responseCode: 204,
responseBody: ``,
expectedValue: &Metrics{},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
m, err := client.GetContainerMetrics(ctx, "foo", "bar")
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, m)
ts.Close()
})
}
}

View File

@@ -1,72 +0,0 @@
package dcos
import (
"context"
"crypto/rsa"
"fmt"
"io/ioutil"
"strings"
"time"
"unicode/utf8"
)
const (
// How long before expiration to renew token
relogDuration = 5 * time.Minute
)
type Credentials interface {
Token(ctx context.Context, client Client) (string, error)
IsExpired() bool
}
type ServiceAccount struct {
AccountID string
PrivateKey *rsa.PrivateKey
auth *AuthToken
}
type TokenCreds struct {
Path string
}
type NullCreds struct {
}
func (c *ServiceAccount) Token(ctx context.Context, client Client) (string, error) {
auth, err := client.Login(ctx, c)
if err != nil {
return "", err
}
c.auth = auth
return auth.Text, nil
}
func (c *ServiceAccount) IsExpired() bool {
return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now())
}
func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) {
octets, err := ioutil.ReadFile(c.Path)
if err != nil {
return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err)
}
if !utf8.Valid(octets) {
return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path)
}
token := strings.TrimSpace(string(octets))
return token, nil
}
func (c *TokenCreds) IsExpired() bool {
return true
}
func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) {
return "", nil
}
func (c *NullCreds) IsExpired() bool {
return true
}

View File

@@ -1,435 +0,0 @@
package dcos
import (
"context"
"io/ioutil"
"net/url"
"sort"
"strings"
"sync"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
const (
defaultMaxConnections = 10
defaultResponseTimeout = 20 * time.Second
)
var (
nodeDimensions = []string{
"hostname",
"path",
"interface",
}
containerDimensions = []string{
"hostname",
"container_id",
"task_name",
}
appDimensions = []string{
"hostname",
"container_id",
"task_name",
}
)
type DCOS struct {
ClusterURL string `toml:"cluster_url"`
ServiceAccountID string `toml:"service_account_id"`
ServiceAccountPrivateKey string
TokenFile string
NodeInclude []string
NodeExclude []string
ContainerInclude []string
ContainerExclude []string
AppInclude []string
AppExclude []string
MaxConnections int
ResponseTimeout internal.Duration
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
client Client
creds Credentials
initialized bool
nodeFilter filter.Filter
containerFilter filter.Filter
appFilter filter.Filter
taskNameFilter filter.Filter
}
func (d *DCOS) Description() string {
return "Input plugin for DC/OS metrics"
}
var sampleConfig = `
## The DC/OS cluster URL.
cluster_url = "https://dcos-ee-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]
`
func (d *DCOS) SampleConfig() string {
return sampleConfig
}
func (d *DCOS) Gather(acc telegraf.Accumulator) error {
err := d.init()
if err != nil {
return err
}
ctx := context.Background()
token, err := d.creds.Token(ctx, d.client)
if err != nil {
return err
}
d.client.SetToken(token)
summary, err := d.client.GetSummary(ctx)
if err != nil {
return err
}
var wg sync.WaitGroup
for _, node := range summary.Slaves {
wg.Add(1)
go func(node string) {
defer wg.Done()
d.GatherNode(ctx, acc, summary.Cluster, node)
}(node.ID)
}
wg.Wait()
return nil
}
func (d *DCOS) GatherNode(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
if !d.nodeFilter.Match(node) {
return
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
m, err := d.client.GetNodeMetrics(ctx, node)
if err != nil {
acc.AddError(err)
return
}
d.addNodeMetrics(acc, cluster, m)
}()
d.GatherContainers(ctx, acc, cluster, node)
wg.Wait()
}
func (d *DCOS) GatherContainers(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
containers, err := d.client.GetContainers(ctx, node)
if err != nil {
acc.AddError(err)
return
}
var wg sync.WaitGroup
for _, container := range containers {
if d.containerFilter.Match(container.ID) {
wg.Add(1)
go func(container string) {
defer wg.Done()
m, err := d.client.GetContainerMetrics(ctx, node, container)
if err != nil {
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
return
}
acc.AddError(err)
return
}
d.addContainerMetrics(acc, cluster, m)
}(container.ID)
}
if d.appFilter.Match(container.ID) {
wg.Add(1)
go func(container string) {
defer wg.Done()
m, err := d.client.GetAppMetrics(ctx, node, container)
if err != nil {
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
return
}
acc.AddError(err)
return
}
d.addAppMetrics(acc, cluster, m)
}(container.ID)
}
}
wg.Wait()
}
type point struct {
tags map[string]string
labels map[string]string
fields map[string]interface{}
}
func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point {
points := make(map[string]*point)
for _, dp := range m.Datapoints {
fieldKey := strings.Replace(dp.Name, ".", "_", -1)
tags := dp.Tags
if tags == nil {
tags = make(map[string]string)
}
if dp.Unit == "bytes" && !strings.HasSuffix(fieldKey, "_bytes") {
fieldKey = fieldKey + "_bytes"
}
if strings.HasPrefix(fieldKey, "dcos_metrics_module_") {
fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_")
}
tagset := make([]string, 0, len(tags))
for k, v := range tags {
tagset = append(tagset, k+"="+v)
}
sort.Strings(tagset)
seriesParts := make([]string, 0, len(tagset))
seriesParts = append(seriesParts, tagset...)
seriesKey := strings.Join(seriesParts, ",")
p, ok := points[seriesKey]
if !ok {
p = &point{}
p.tags = tags
p.labels = make(map[string]string)
p.fields = make(map[string]interface{})
points[seriesKey] = p
}
if dp.Unit == "bytes" {
p.fields[fieldKey] = int64(dp.Value)
} else {
p.fields[fieldKey] = dp.Value
}
}
results := make([]*point, 0, len(points))
for _, p := range points {
for k, v := range m.Dimensions {
switch v := v.(type) {
case string:
p.tags[k] = v
case map[string]string:
if k == "labels" {
for k, v := range v {
p.labels[k] = v
}
}
}
}
results = append(results, p)
}
return results
}
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) {
tm := time.Now()
points := d.createPoints(acc, m)
for _, p := range points {
tags := make(map[string]string)
tags["cluster"] = cluster
for _, tagkey := range tagDimensions {
v, ok := p.tags[tagkey]
if ok {
tags[tagkey] = v
}
}
for k, v := range p.labels {
tags[k] = v
}
acc.AddFields(mname, p.fields, tags, tm)
}
}
func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions)
}
func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions)
}
func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_app", m, appDimensions)
}
func (d *DCOS) init() error {
if !d.initialized {
err := d.createFilters()
if err != nil {
return err
}
if d.client == nil {
client, err := d.createClient()
if err != nil {
return err
}
d.client = client
}
if d.creds == nil {
creds, err := d.createCredentials()
if err != nil {
return err
}
d.creds = creds
}
d.initialized = true
}
return nil
}
func (d *DCOS) createClient() (Client, error) {
tlsCfg, err := internal.GetTLSConfig(
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
if err != nil {
return nil, err
}
url, err := url.Parse(d.ClusterURL)
if err != nil {
return nil, err
}
client := NewClusterClient(
url,
d.ResponseTimeout.Duration,
d.MaxConnections,
tlsCfg,
)
return client, nil
}
func (d *DCOS) createCredentials() (Credentials, error) {
if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" {
bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey)
if err != nil {
return nil, err
}
privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(bs)
if err != nil {
return nil, err
}
creds := &ServiceAccount{
AccountID: d.ServiceAccountID,
PrivateKey: privateKey,
}
return creds, nil
} else if d.TokenFile != "" {
creds := &TokenCreds{
Path: d.TokenFile,
}
return creds, nil
} else {
creds := &NullCreds{}
return creds, nil
}
}
func (d *DCOS) createFilters() error {
var err error
d.nodeFilter, err = filter.NewIncludeExcludeFilter(
d.NodeInclude, d.NodeExclude)
if err != nil {
return err
}
d.containerFilter, err = filter.NewIncludeExcludeFilter(
d.ContainerInclude, d.ContainerExclude)
if err != nil {
return err
}
d.appFilter, err = filter.NewIncludeExcludeFilter(
d.AppInclude, d.AppExclude)
if err != nil {
return err
}
return nil
}
func init() {
inputs.Add("dcos", func() telegraf.Input {
return &DCOS{
MaxConnections: defaultMaxConnections,
ResponseTimeout: internal.Duration{
Duration: defaultResponseTimeout,
},
}
})
}

View File

@@ -1,441 +0,0 @@
package dcos
import (
"context"
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
type mockClient struct {
SetTokenF func(token string)
LoginF func(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
GetSummaryF func(ctx context.Context) (*Summary, error)
GetContainersF func(ctx context.Context, node string) ([]Container, error)
GetNodeMetricsF func(ctx context.Context, node string) (*Metrics, error)
GetContainerMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
GetAppMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
}
func (c *mockClient) SetToken(token string) {
c.SetTokenF(token)
}
func (c *mockClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
return c.LoginF(ctx, sa)
}
func (c *mockClient) GetSummary(ctx context.Context) (*Summary, error) {
return c.GetSummaryF(ctx)
}
func (c *mockClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
return c.GetContainersF(ctx, node)
}
func (c *mockClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
return c.GetNodeMetricsF(ctx, node)
}
func (c *mockClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
return c.GetContainerMetricsF(ctx, node, container)
}
func (c *mockClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
return c.GetAppMetricsF(ctx, node, container)
}
func TestAddNodeMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "basic datapoint conversion",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "process.count",
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
},
"process_count", 42.0,
)}
},
},
{
name: "path added as tag",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "filesystem.inode.free",
Tags: map[string]string{
"path": "/var/lib",
},
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"path": "/var/lib",
},
"filesystem_inode_free", 42.0,
)}
},
},
{
name: "interface added as tag",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "network.out.dropped",
Tags: map[string]string{
"interface": "eth0",
},
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"interface": "eth0",
},
"network_out_dropped", 42.0,
)}
},
},
{
name: "bytes unit appended to fieldkey",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "network.in",
Tags: map[string]string{
"interface": "eth0",
},
Unit: "bytes",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"interface": "eth0",
},
"network_in_bytes", int64(42),
)}
},
},
{
name: "dimensions added as tags",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "process.count",
Tags: map[string]string{},
Unit: "count",
Value: 42.0,
},
{
Name: "memory.total",
Tags: map[string]string{},
Unit: "bytes",
Value: 42,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"hostname": "192.168.122.18",
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "192.168.122.18",
},
"process_count", 42.0),
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "192.168.122.18",
},
"memory_total_bytes", int64(42)),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addNodeMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestAddContainerMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "container",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "net.rx.errors",
Tags: map[string]string{
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"executor_name": "Command Executor (Task: telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a) (Command: NO EXECUTABLE)",
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
"source": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
},
Unit: "count",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
"framework_name": "marathon",
"framework_principal": "dcos_marathon",
"framework_role": "slave_public",
"hostname": "192.168.122.18",
"labels": map[string]string{
"DCOS_SPACE": "/telegraf",
},
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
"task_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"task_name": "telegraf",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_container",
map[string]string{
"cluster": "a",
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"hostname": "192.168.122.18",
"task_name": "telegraf",
"DCOS_SPACE": "/telegraf",
},
"net_rx_errors",
42.0,
),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addContainerMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestAddAppMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "tags are optional",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
Unit: "",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_app",
map[string]string{
"cluster": "a",
},
"container_throttled_bytes_per_sec", 42.0,
),
}
},
},
{
name: "dimensions are tagged",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
Unit: "",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
"hostname": "192.168.122.18",
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_app",
map[string]string{
"cluster": "a",
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
"hostname": "192.168.122.18",
},
"container_throttled_bytes_per_sec", 42.0,
),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addAppMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestGatherFilterNode(t *testing.T) {
var tests = []struct {
name string
nodeInclude []string
nodeExclude []string
client Client
check func(*testutil.Accumulator) []bool
}{
{
name: "cluster without nodes has no metrics",
client: &mockClient{
SetTokenF: func(token string) {},
GetSummaryF: func(ctx context.Context) (*Summary, error) {
return &Summary{
Cluster: "a",
Slaves: []Slave{},
}, nil
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.NMetrics() == 0,
}
},
},
{
name: "node include",
nodeInclude: []string{"x"},
client: &mockClient{
SetTokenF: func(token string) {},
GetSummaryF: func(ctx context.Context) (*Summary, error) {
return &Summary{
Cluster: "a",
Slaves: []Slave{
Slave{ID: "x"},
Slave{ID: "y"},
},
}, nil
},
GetContainersF: func(ctx context.Context, node string) ([]Container, error) {
return []Container{}, nil
},
GetNodeMetricsF: func(ctx context.Context, node string) (*Metrics, error) {
return &Metrics{
Datapoints: []DataPoint{
{
Name: "value",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"hostname": "x",
},
}, nil
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "x",
},
"value", 42.0,
),
acc.NMetrics() == 1,
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{
NodeInclude: tt.nodeInclude,
NodeExclude: tt.nodeExclude,
client: tt.client,
}
err := dcos.Gather(&acc)
require.NoError(t, err)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}

View File

@@ -17,7 +17,7 @@ type DnsQuery struct {
// Domains or subdomains to query
Domains []string
// Network protocol name
// Network protocl name
Network string
// Server to query

View File

@@ -17,11 +17,6 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
## Note: configure this in one of the manager nodes in a Swarm cluster.
## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
@@ -62,15 +57,6 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
When using the `"ENV"` endpoint, the connection is configured using the
[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient).
#### Kubernetes Labels
Kubernetes may add many labels to your containers, if they are not needed you
may prefer to exclude them:
```
docker_label_exclude = ["annotation.kubernetes*"]
```
### Measurements & Fields:
Every effort was made to preserve the names based on the JSON response from the
@@ -166,9 +152,6 @@ based on the availability of per-cpu stats on your system.
- available
- total
- used
- docker_swarm
- tasks_desired
- tasks_running
### Tags:
@@ -199,10 +182,6 @@ based on the availability of per-cpu stats on your system.
- network
- docker_container_blkio specific:
- device
- docker_swarm specific:
- service_id
- service_name
- service_mode
### Example Output:
@@ -254,7 +233,4 @@ io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
>docker_swarm,
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
tasks_desired=3,tasks_running=3 1508968160000000000
```
```

View File

@@ -6,7 +6,6 @@ import (
"net/http"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
docker "github.com/docker/docker/client"
"github.com/docker/go-connections/sockets"
)
@@ -21,9 +20,6 @@ type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
}
func NewEnvClient() (Client, error) {
@@ -69,12 +65,3 @@ func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, s
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
return c.client.ContainerInspect(ctx, containerID)
}
func (c *SocketClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
return c.client.ServiceList(ctx, options)
}
func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
return c.client.TaskList(ctx, options)
}
func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
return c.client.NodeList(ctx, options)
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"regexp"
"strconv"
@@ -15,7 +14,6 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
@@ -27,8 +25,6 @@ type Docker struct {
Endpoint string
ContainerNames []string
GatherServices bool `toml:"gather_services"`
Timeout internal.Duration
PerDevice bool `toml:"perdevice"`
Total bool `toml:"total"`
@@ -76,9 +72,6 @@ var sampleConfig = `
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
gather_services = false
## Only collect metrics for these containers, collect all if empty
container_names = []
@@ -157,13 +150,6 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
acc.AddError(err)
}
if d.GatherServices {
err := d.gatherSwarmInfo(acc)
if err != nil {
acc.AddError(err)
}
}
// List containers
opts := types.ContainerListOptions{}
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
@@ -191,75 +177,6 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
return nil
}
func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
services, err := d.client.ServiceList(ctx, types.ServiceListOptions{})
if err != nil {
return err
}
if len(services) > 0 {
tasks, err := d.client.TaskList(ctx, types.TaskListOptions{})
if err != nil {
return err
}
nodes, err := d.client.NodeList(ctx, types.NodeListOptions{})
if err != nil {
return err
}
running := map[string]int{}
tasksNoShutdown := map[string]int{}
activeNodes := make(map[string]struct{})
for _, n := range nodes {
if n.Status.State != swarm.NodeStateDown {
activeNodes[n.ID] = struct{}{}
}
}
for _, task := range tasks {
if task.DesiredState != swarm.TaskStateShutdown {
tasksNoShutdown[task.ServiceID]++
}
if task.Status.State == swarm.TaskStateRunning {
running[task.ServiceID]++
}
}
for _, service := range services {
tags := map[string]string{}
fields := make(map[string]interface{})
now := time.Now()
tags["service_id"] = service.ID
tags["service_name"] = service.Spec.Name
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
tags["service_mode"] = "replicated"
fields["tasks_running"] = running[service.ID]
fields["tasks_desired"] = *service.Spec.Mode.Replicated.Replicas
} else if service.Spec.Mode.Global != nil {
tags["service_mode"] = "global"
fields["tasks_running"] = running[service.ID]
fields["tasks_desired"] = tasksNoShutdown[service.ID]
} else {
log.Printf("E! Unknow Replicas Mode")
}
// Add metrics
acc.AddFields("docker_swarm",
fields,
tags,
now)
}
}
return nil
}
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// Init vars
dataFields := make(map[string]interface{})

View File

@@ -8,7 +8,6 @@ import (
"github.com/influxdata/telegraf/testutil"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/stretchr/testify/require"
)
@@ -17,9 +16,6 @@ type MockClient struct {
ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerStatsF func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error)
ServiceListF func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
TaskListF func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
NodeListF func(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
}
func (c *MockClient) Info(ctx context.Context) (types.Info, error) {
@@ -48,27 +44,6 @@ func (c *MockClient) ContainerInspect(
return c.ContainerInspectF(ctx, containerID)
}
func (c *MockClient) ServiceList(
ctx context.Context,
options types.ServiceListOptions,
) ([]swarm.Service, error) {
return c.ServiceListF(ctx, options)
}
func (c *MockClient) TaskList(
ctx context.Context,
options types.TaskListOptions,
) ([]swarm.Task, error) {
return c.TaskListF(ctx, options)
}
func (c *MockClient) NodeList(
ctx context.Context,
options types.NodeListOptions,
) ([]swarm.Node, error) {
return c.NodeListF(ctx, options)
}
var baseClient = MockClient{
InfoF: func(context.Context) (types.Info, error) {
return info, nil
@@ -82,15 +57,6 @@ var baseClient = MockClient{
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
return containerInspect, nil
},
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
return ServiceList, nil
},
TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) {
return TaskList, nil
},
NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) {
return NodeList, nil
},
}
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
@@ -263,15 +229,6 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
return containerInspect, nil
},
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
return ServiceList, nil
},
TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) {
return TaskList, nil
},
NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) {
return NodeList, nil
},
}, nil
},
}
@@ -672,42 +629,3 @@ func TestDockerGatherInfo(t *testing.T) {
},
)
}
func TestDockerGatherSwarmInfo(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
newClient: newClient,
}
err := acc.GatherError(d.Gather)
require.NoError(t, err)
d.gatherSwarmInfo(&acc)
// test docker_container_net measurement
acc.AssertContainsTaggedFields(t,
"docker_swarm",
map[string]interface{}{
"tasks_running": int(2),
"tasks_desired": uint64(2),
},
map[string]string{
"service_id": "qolkls9g5iasdiuihcyz9rnx2",
"service_name": "test1",
"service_mode": "replicated",
},
)
acc.AssertContainsTaggedFields(t,
"docker_swarm",
map[string]interface{}{
"tasks_running": int(1),
"tasks_desired": int(1),
},
map[string]string{
"service_id": "qolkls9g5iasdiuihcyz9rn3",
"service_name": "test2",
"service_mode": "global",
},
)
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
)
var info = types.Info{
@@ -134,79 +133,6 @@ var containerList = []types.Container{
},
}
var two = uint64(2)
var ServiceList = []swarm.Service{
swarm.Service{
ID: "qolkls9g5iasdiuihcyz9rnx2",
Spec: swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: "test1",
},
Mode: swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &two,
},
},
},
},
swarm.Service{
ID: "qolkls9g5iasdiuihcyz9rn3",
Spec: swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: "test2",
},
Mode: swarm.ServiceMode{
Global: &swarm.GlobalService{},
},
},
},
}
var TaskList = []swarm.Task{
swarm.Task{
ID: "kwh0lv7hwwbh",
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
NodeID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.TaskStatus{
State: "running",
},
DesiredState: "running",
},
swarm.Task{
ID: "u78m5ojbivc3",
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
NodeID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.TaskStatus{
State: "running",
},
DesiredState: "running",
},
swarm.Task{
ID: "1n1uilkhr98l",
ServiceID: "qolkls9g5iasdiuihcyz9rn3",
NodeID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.TaskStatus{
State: "running",
},
DesiredState: "running",
},
}
var NodeList = []swarm.Node{
swarm.Node{
ID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.NodeStatus{
State: "ready",
},
},
swarm.Node{
ID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.NodeStatus{
State: "ready",
},
},
}
func containerStats() types.ContainerStats {
var stat types.ContainerStats
jsonStat := `

View File

@@ -23,21 +23,10 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
## Set cluster_health to true when you want to also obtain cluster health stats
cluster_health = false
## Adjust cluster_health_level when you want to also obtain detailed health stats
## The options are
## - indices (default)
## - cluster
# cluster_health_level = "indices"
## Set cluster_stats to true when you want to also obtain cluster stats from the
## Master node.
## Set cluster_stats to true when you want to obtain cluster stats from the
## Master node.
cluster_stats = false
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breakers". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"

View File

@@ -3,16 +3,17 @@ package elasticsearch
import (
"encoding/json"
"fmt"
"net/http"
"regexp"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
"io/ioutil"
"net/http"
"regexp"
"strings"
"sync"
"time"
)
// mask for masking username/password from error messages
@@ -93,21 +94,10 @@ const sampleConfig = `
## Set cluster_health to true when you want to also obtain cluster health stats
cluster_health = false
## Adjust cluster_health_level when you want to also obtain detailed health stats
## The options are
## - indices (default)
## - cluster
# cluster_health_level = "indices"
## Set cluster_stats to true when you want to also obtain cluster stats from the
## Master node.
cluster_stats = false
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breakers". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
@@ -123,9 +113,7 @@ type Elasticsearch struct {
Servers []string
HttpTimeout internal.Duration
ClusterHealth bool
ClusterHealthLevel string
ClusterStats bool
NodeStats []string
SSLCA string `toml:"ssl_ca"` // Path to CA file
SSLCert string `toml:"ssl_cert"` // Path to host cert file
SSLKey string `toml:"ssl_key"` // Path to cert key file
@@ -138,8 +126,7 @@ type Elasticsearch struct {
// NewElasticsearch return a new instance of Elasticsearch
func NewElasticsearch() *Elasticsearch {
return &Elasticsearch{
HttpTimeout: internal.Duration{Duration: time.Second * 5},
ClusterHealthLevel: "indices",
HttpTimeout: internal.Duration{Duration: time.Second * 5},
}
}
@@ -171,7 +158,12 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
for _, serv := range e.Servers {
go func(s string, acc telegraf.Accumulator) {
defer wg.Done()
url := e.nodeStatsUrl(s)
var url string
if e.Local {
url = s + statsPathLocal
} else {
url = s + statsPath
}
e.isMaster = false
if e.ClusterStats {
@@ -190,10 +182,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
}
if e.ClusterHealth {
url = s + "/_cluster/health"
if e.ClusterHealthLevel != "" {
url = url + "?level=" + e.ClusterHealthLevel
}
url = s + "/_cluster/health?level=indices"
if err := e.gatherClusterHealth(url, acc); err != nil {
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
return
@@ -230,22 +219,6 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
return client, nil
}
func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string {
var url string
if e.Local {
url = baseUrl + statsPathLocal
} else {
url = baseUrl + statsPath
}
if len(e.NodeStats) == 0 {
return url
}
return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ","))
}
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
nodeStats := &struct {
ClusterName string `json:"cluster_name"`
@@ -286,11 +259,6 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
now := time.Now()
for p, s := range stats {
// if one of the individual node stats is not even in the
// original result
if s == nil {
continue
}
f := jsonparser.JSONFlattener{}
// parse Json, ignoring strings and bools
err := f.FlattenJSON("", s)

View File

@@ -13,16 +13,6 @@ import (
"github.com/stretchr/testify/require"
)
func defaultTags() map[string]string {
return map[string]string{
"cluster_name": "es-testcluster",
"node_attribute_master": "true",
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
"node_name": "test.host.com",
"node_host": "test",
}
}
type transportMock struct {
statusCode int
body string
@@ -55,9 +45,15 @@ func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) {
assert.Fail(t, msg)
}
}
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
tags := defaultTags()
tags := map[string]string{
"cluster_name": "es-testcluster",
"node_attribute_master": "true",
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
"node_name": "test.host.com",
"node_host": "test",
}
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
@@ -83,31 +79,6 @@ func TestGather(t *testing.T) {
checkNodeStatsResult(t, &acc)
}
func TestGatherIndividualStats(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.NodeStats = []string{"jvm", "process"}
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess)
var acc testutil.Accumulator
if err := acc.GatherError(es.Gather); err != nil {
t.Fatal(err)
}
checkIsMaster(es, false, t)
tags := defaultTags()
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
}
func TestGatherNodeStats(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
@@ -122,11 +93,10 @@ func TestGatherNodeStats(t *testing.T) {
checkNodeStatsResult(t, &acc)
}
func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) {
func TestGatherClusterHealth(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.ClusterHealthLevel = ""
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
var acc testutil.Accumulator
@@ -134,56 +104,6 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) {
checkIsMaster(es, false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v1IndexExpected,
map[string]string{"index": "v1"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v2IndexExpected,
map[string]string{"index": "v2"})
}
func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.ClusterHealthLevel = "cluster"
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
var acc testutil.Accumulator
require.NoError(t, es.gatherClusterHealth("junk", &acc))
checkIsMaster(es, false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v1IndexExpected,
map[string]string{"index": "v1"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v2IndexExpected,
map[string]string{"index": "v2"})
}
func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.ClusterHealthLevel = "indices"
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices)
var acc testutil.Accumulator
require.NoError(t, es.gatherClusterHealth("junk", &acc))
checkIsMaster(es, false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
@@ -265,6 +185,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
// ensure flag is clear so Cluster Stats would not be done
checkIsMaster(es, false, t)
checkNodeStatsResult(t, &acc)
}
func newElasticsearchWithClient() *Elasticsearch {

View File

@@ -1,21 +1,6 @@
package elasticsearch
const clusterHealthResponse = `
{
"cluster_name": "elasticsearch_telegraf",
"status": "green",
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0
}
`
const clusterHealthResponseWithIndices = `
{
"cluster_name": "elasticsearch_telegraf",
"status": "green",
@@ -504,100 +489,6 @@ const nodeStatsResponse = `
}
`
const nodeStatsResponseJVMProcess = `
{
"cluster_name": "es-testcluster",
"nodes": {
"SDFsfSDFsdfFSDSDfSFDSDF": {
"timestamp": 1436365550135,
"name": "test.host.com",
"transport_address": "inet[/127.0.0.1:9300]",
"host": "test",
"ip": [
"inet[/127.0.0.1:9300]",
"NONE"
],
"attributes": {
"master": "true"
},
"process": {
"timestamp": 1436460392945,
"open_file_descriptors": 160,
"cpu": {
"percent": 2,
"sys_in_millis": 1870,
"user_in_millis": 13610,
"total_in_millis": 15480
},
"mem": {
"total_virtual_in_bytes": 4747890688
}
},
"jvm": {
"timestamp": 1436460392945,
"uptime_in_millis": 202245,
"mem": {
"heap_used_in_bytes": 52709568,
"heap_used_percent": 5,
"heap_committed_in_bytes": 259522560,
"heap_max_in_bytes": 1038876672,
"non_heap_used_in_bytes": 39634576,
"non_heap_committed_in_bytes": 40841216,
"pools": {
"young": {
"used_in_bytes": 32685760,
"max_in_bytes": 279183360,
"peak_used_in_bytes": 71630848,
"peak_max_in_bytes": 279183360
},
"survivor": {
"used_in_bytes": 8912880,
"max_in_bytes": 34865152,
"peak_used_in_bytes": 8912888,
"peak_max_in_bytes": 34865152
},
"old": {
"used_in_bytes": 11110928,
"max_in_bytes": 724828160,
"peak_used_in_bytes": 14354608,
"peak_max_in_bytes": 724828160
}
}
},
"threads": {
"count": 44,
"peak_count": 45
},
"gc": {
"collectors": {
"young": {
"collection_count": 2,
"collection_time_in_millis": 98
},
"old": {
"collection_count": 1,
"collection_time_in_millis": 24
}
}
},
"buffer_pools": {
"direct": {
"count": 40,
"used_in_bytes": 6304239,
"total_capacity_in_bytes": 6304239
},
"mapped": {
"count": 0,
"used_in_bytes": 0,
"total_capacity_in_bytes": 0
}
}
}
}
}
}
`
var nodestatsIndicesExpected = map[string]interface{}{
"id_cache_memory_size_in_bytes": float64(0),
"completion_size_in_bytes": float64(0),

View File

@@ -20,7 +20,6 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
- filestat
- exists (int, 0 | 1)
- size_bytes (int, bytes)
- modification_time (int, unixtime)
- md5 (optional, string)
### Tags:
@@ -33,6 +32,6 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
```
$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test
* Plugin: filestat, Collection 1
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1507218518192154351
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i,modification_time=1507152973123456789i 1507218518192154351
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1461203374493128216
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i 1461203374493199335
```

View File

@@ -86,7 +86,6 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
fileName)
} else {
fields["size_bytes"] = fileInfo.Size()
fields["modification_time"] = fileInfo.ModTime().UnixNano()
}
if f.Md5 {

View File

@@ -5,8 +5,6 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
@@ -26,19 +24,28 @@ func TestGatherNoMd5(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
tags2 := map[string]string{
"file": dir + "log2.log",
}
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
tags3 := map[string]string{
"file": "/non/existant/file",
}
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0)))
fields3 := map[string]interface{}{
"exists": int64(0),
}
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
}
func TestGatherExplicitFiles(t *testing.T) {
@@ -57,21 +64,30 @@ func TestGatherExplicitFiles(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
tags2 := map[string]string{
"file": dir + "log2.log",
}
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
tags3 := map[string]string{
"file": "/non/existant/file",
}
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0)))
fields3 := map[string]interface{}{
"exists": int64(0),
}
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
}
func TestGatherGlob(t *testing.T) {
@@ -88,16 +104,22 @@ func TestGatherGlob(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
tags2 := map[string]string{
"file": dir + "log2.log",
}
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
}
func TestGatherSuperAsterisk(t *testing.T) {
@@ -114,57 +136,32 @@ func TestGatherSuperAsterisk(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
tags2 := map[string]string{
"file": dir + "log2.log",
}
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
tags3 := map[string]string{
"file": dir + "test.conf",
}
require.True(t, acc.HasPoint("filestat", tags3, "size_bytes", int64(104)))
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags3, "md5_sum", "5a7e9b77fa25e7bb411dbd17cf403c1f"))
}
func TestModificationTime(t *testing.T) {
dir := getTestdataDir()
fs := NewFileStat()
fs.Files = []string{
dir + "log1.log",
fields3 := map[string]interface{}{
"size_bytes": int64(104),
"exists": int64(1),
"md5_sum": "5a7e9b77fa25e7bb411dbd17cf403c1f",
}
acc := testutil.Accumulator{}
acc.GatherError(fs.Gather)
tags1 := map[string]string{
"file": dir + "log1.log",
}
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasInt64Field("filestat", "modification_time"))
}
func TestNoModificationTime(t *testing.T) {
fs := NewFileStat()
fs.Files = []string{
"/non/existant/file",
}
acc := testutil.Accumulator{}
acc.GatherError(fs.Gather)
tags1 := map[string]string{
"file": "/non/existant/file",
}
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(0)))
require.False(t, acc.HasInt64Field("filestat", "modification_time"))
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
}
func TestGetMd5(t *testing.T) {

View File

@@ -22,11 +22,11 @@ example configuratio with `@id` parameter for http plugin:
[[inputs.fluentd]]
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
##
## Endpoint:
## Endpoint:
## - only one URI is allowed
## - https is not supported
endpoint = "http://localhost:24220/api/plugins.json"
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
exclude = [
"monitor_agent",

View File

@@ -18,11 +18,11 @@ const (
sampleConfig = `
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
##
## Endpoint:
## Endpoint:
## - only one URI is allowed
## - https is not supported
endpoint = "http://localhost:24220/api/plugins.json"
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
exclude = [
"monitor_agent",

View File

@@ -8,10 +8,6 @@ The `/write` endpoint supports the `precision` query parameter and can be set to
When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
Enable TLS by specifying the file names of a service TLS certificate and key.
Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````.
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
**Example:**
@@ -32,11 +28,4 @@ This is a sample configuration for the plugin.
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
tls_cert= "/etc/telegraf/cert.pem"
tls_key = "/etc/telegraf/key.pem"
## MTLS
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
```

View File

@@ -3,10 +3,7 @@ package http_listener
import (
"bytes"
"compress/gzip"
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"log"
"net"
"net/http"
@@ -40,10 +37,6 @@ type HTTPListener struct {
MaxLineSize int
Port int
TlsAllowedCacerts []string
TlsCert string
TlsKey string
mu sync.Mutex
wg sync.WaitGroup
@@ -82,14 +75,6 @@ const sampleConfig = `
## Maximum line size allowed to be sent in bytes.
## 0 means to use the default of 65536 bytes (64 kibibytes)
max_line_size = 0
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
tls_cert = "/etc/telegraf/cert.pem"
tls_key = "/etc/telegraf/key.pem"
`
func (h *HTTPListener) SampleConfig() string {
@@ -132,33 +117,10 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
h.MaxLineSize = DEFAULT_MAX_LINE_SIZE
}
if h.ReadTimeout.Duration < time.Second {
h.ReadTimeout.Duration = time.Second * 10
}
if h.WriteTimeout.Duration < time.Second {
h.WriteTimeout.Duration = time.Second * 10
}
h.acc = acc
h.pool = NewPool(200, h.MaxLineSize)
tlsConf := h.getTLSConfig()
server := &http.Server{
Addr: h.ServiceAddress,
Handler: h,
ReadTimeout: h.ReadTimeout.Duration,
WriteTimeout: h.WriteTimeout.Duration,
TLSConfig: tlsConf,
}
var err error
var listener net.Listener
if tlsConf != nil {
listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
} else {
listener, err = net.Listen("tcp", h.ServiceAddress)
}
var listener, err = net.Listen("tcp", h.ServiceAddress)
if err != nil {
return err
}
@@ -168,7 +130,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
h.wg.Add(1)
go func() {
defer h.wg.Done()
server.Serve(h.listener)
h.httpListen()
}()
log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress)
@@ -187,6 +149,27 @@ func (h *HTTPListener) Stop() {
log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress)
}
// httpListen sets up an http.Server and calls server.Serve.
// like server.Serve, httpListen will always return a non-nil error, for this
// reason, the error returned should probably be ignored.
// see https://golang.org/pkg/net/http/#Server.Serve
func (h *HTTPListener) httpListen() error {
if h.ReadTimeout.Duration < time.Second {
h.ReadTimeout.Duration = time.Second * 10
}
if h.WriteTimeout.Duration < time.Second {
h.WriteTimeout.Duration = time.Second * 10
}
var server = http.Server{
Handler: h,
ReadTimeout: h.ReadTimeout.Duration,
WriteTimeout: h.WriteTimeout.Duration,
}
return server.Serve(h.listener)
}
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
h.RequestsRecv.Incr(1)
defer h.RequestsServed.Incr(1)
@@ -344,38 +327,6 @@ func badRequest(res http.ResponseWriter) {
res.Write([]byte(`{"error":"http: bad request"}`))
}
func (h *HTTPListener) getTLSConfig() *tls.Config {
tlsConf := &tls.Config{
InsecureSkipVerify: false,
Renegotiation: tls.RenegotiateNever,
}
if len(h.TlsCert) == 0 || len(h.TlsKey) == 0 {
return nil
}
cert, err := tls.LoadX509KeyPair(h.TlsCert, h.TlsKey)
if err != nil {
return nil
}
tlsConf.Certificates = []tls.Certificate{cert}
if h.TlsAllowedCacerts != nil {
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
clientPool := x509.NewCertPool()
for _, ca := range h.TlsAllowedCacerts {
c, err := ioutil.ReadFile(ca)
if err != nil {
continue
}
clientPool.AppendCertsFromPEM(c)
}
tlsConf.ClientCAs = clientPool
}
return tlsConf
}
func init() {
inputs.Add("http_listener", func() telegraf.Input {
return &HTTPListener{

View File

@@ -2,9 +2,6 @@ package http_listener
import (
"bytes"
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"net/http"
"net/url"
@@ -32,84 +29,6 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
badMsg = "blahblahblah: 42\n"
emptyMsg = ""
serviceRootPEM = `-----BEGIN CERTIFICATE-----
MIIBxzCCATCgAwIBAgIJAJb7HqN2BzWWMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV
BAMMC1RlbGVncmFmIENBMB4XDTE3MTEwNDA0MzEwN1oXDTI3MTEwMjA0MzEwN1ow
FjEUMBIGA1UEAwwLVGVsZWdyYWYgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ
AoGBANbkUkK6JQC3rbLcXhLJTS9SX6uXyFwl7bUfpAN5Hm5EqfvG3PnLrogfTGLr
Tq5CRAu/gbbdcMoL9TLv/aaDVnrpV0FslKhqYmkOgT28bdmA7Qtr539aQpMKCfcW
WCnoMcBD5u5h9MsRqpdq+0Mjlsf1H2hSf07jHk5R1T4l8RMXAgMBAAGjHTAbMAwG
A1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4GBANSrwvpU
t8ihIhpHqgJZ34DM92CZZ3ZHmH/KyqlnuGzjjpnVZiXVrLDTOzrA0ziVhmefY29w
roHjENbFm54HW97ogxeURuO8HRHIVh2U0rkyVxOfGZiUdINHqsZdSnDY07bzCtSr
Z/KsfWXM5llD1Ig1FyBHpKjyUvfzr73sjm/4
-----END CERTIFICATE-----`
serviceCertPEM = `-----BEGIN CERTIFICATE-----
MIIBzzCCATigAwIBAgIBATANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBQxEjAQBgNV
BAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsJRss1af
XKrcIjQoAp2kdJIpT2Ya+MRQXJ18b0PP7szh2lisY11kd/HCkd4D4efuIkpszHaN
xwyTOZLOoplxp6fizzgOYjXsJ6SzbO1MQNmq8Ch/+uKiGgFwLX+YxOOsGSDIHNhF
vcBi93cQtCWPBFz6QRQf9yfIAA5KKxUfJcMCAwEAAaMvMC0wCQYDVR0TBAIwADAL
BgNVHQ8EBAMCBSAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQAD
gYEAiC3WI4y9vfYz53gw7FKnNK7BBdwRc43x7Pd+5J/cclWyUZPdmcj1UNmv/3rj
2qcMmX06UdgPoHppzNAJePvMVk0vjMBUe9MmYlafMz0h4ma/it5iuldXwmejFcdL
6wWQp7gVTileCEmq9sNvfQN1FmT3EWf4IMdO2MNat/1If0g=
-----END CERTIFICATE-----`
serviceKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
-----END RSA PRIVATE KEY-----`
clientRootPEM = serviceRootPEM
clientCertPEM = `-----BEGIN CERTIFICATE-----
MIIBzjCCATegAwIBAgIBAjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBMxETAPBgNV
BAMMCHRlbGVncmFmMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDP2IMqyOqI
sJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqMpBUTj3vLlOzsHfVVot1WRqc6
3esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4UkJBWim8ArSbFqnZjcR19G3tG
LUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQABoy8wLTAJBgNVHRMEAjAAMAsG
A1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOB
gQCHxMk38XNxL9nPFBYo3JqITJCFswu6/NLHwDBXCuZKl53rUuFWduiO+1OuScKQ
sQ79W0jHsWRKGOUFrF5/Gdnh8AlkVaITVlcmhdAOFCEbeGpeEvLuuK6grckPitxy
bRF5oM4TCLKKAha60Ir41rk2bomZM9+NZu+Bm+csDqCoxQ==
-----END CERTIFICATE-----`
clientKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQDP2IMqyOqIsJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqM
pBUTj3vLlOzsHfVVot1WRqc63esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4U
kJBWim8ArSbFqnZjcR19G3tGLUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQAB
AoGAFzb/r4+xYoMXEfgq5ZvXXTCY5cVNpR6+jCsqqYODPnn9XRLeCsdo8z5bfWms
7NKLzHzca/6IPzL6Rf3vOxFq1YyIZfYVHH+d63/9blAm3Iajjp1W2yW5aj9BJjTb
nm6F0RfuW/SjrZ9IXxTZhSpCklPmUzVZpzvwV3KGeVTVCEECQQDoavCeOwLuqDpt
0aM9GMFUpOU7kLPDuicSwCDaTae4kN2rS17Zki41YXe8A8+509IEN7mK09Vq9HxY
SX6EmV1FAkEA5O9QcCHEa8P12EmUC8oqD2bjq6o7JjUIRlKinwZTlooMJYZw98gA
FVSngTUvLVCVIvSdjldXPOGgfYiccTZrFwJAfHS3gKOtAEuJbkEyHodhD4h1UB4+
hPLr9Xh4ny2yQH0ilpV3px5GLEOTMFUCKUoqTiPg8VxaDjn5U/WXED5n2QJAR4J1
NsFlcGACj+/TvacFYlA6N2nyFeokzoqLX28Ddxdh2erXqJ4hYIhT1ik9tkLggs2z
1T1084BquCuO6lIcOwJBALX4xChoMUF9k0IxSQzlz//seQYDkQNsE7y9IgAOXkzp
RaR4pzgPbnKj7atG+2dBnffWfE+1Mcy0INDAO6WxPg0=
-----END RSA PRIVATE KEY-----`
)
var (
initClient sync.Once
client *http.Client
initServiceCertFiles sync.Once
allowedCAFiles []string
serviceCAFiles []string
serviceCertFile string
serviceKeyFile string
)
func newTestHTTPListener() *HTTPListener {
@@ -119,79 +38,9 @@ func newTestHTTPListener() *HTTPListener {
return listener
}
func newTestHTTPSListener() *HTTPListener {
initServiceCertFiles.Do(func() {
acaf, err := ioutil.TempFile("", "allowedCAFile.crt")
if err != nil {
panic(err)
}
defer acaf.Close()
_, err = io.Copy(acaf, bytes.NewReader([]byte(clientRootPEM)))
allowedCAFiles = []string{acaf.Name()}
scaf, err := ioutil.TempFile("", "serviceCAFile.crt")
if err != nil {
panic(err)
}
defer scaf.Close()
_, err = io.Copy(scaf, bytes.NewReader([]byte(serviceRootPEM)))
serviceCAFiles = []string{scaf.Name()}
scf, err := ioutil.TempFile("", "serviceCertFile.crt")
if err != nil {
panic(err)
}
defer scf.Close()
_, err = io.Copy(scf, bytes.NewReader([]byte(serviceCertPEM)))
serviceCertFile = scf.Name()
skf, err := ioutil.TempFile("", "serviceKeyFile.crt")
if err != nil {
panic(err)
}
defer skf.Close()
_, err = io.Copy(skf, bytes.NewReader([]byte(serviceKeyPEM)))
serviceKeyFile = skf.Name()
})
listener := &HTTPListener{
ServiceAddress: ":0",
TlsAllowedCacerts: allowedCAFiles,
TlsCert: serviceCertFile,
TlsKey: serviceKeyFile,
}
return listener
}
func getHTTPSClient() *http.Client {
initClient.Do(func() {
cas := x509.NewCertPool()
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
clientCert, err := tls.X509KeyPair([]byte(clientCertPEM), []byte(clientKeyPEM))
if err != nil {
panic(err)
}
client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: cas,
Certificates: []tls.Certificate{clientCert},
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS12,
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
Renegotiation: tls.RenegotiateNever,
InsecureSkipVerify: false,
},
},
}
})
return client
}
func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string {
func createURL(listener *HTTPListener, path string, rawquery string) string {
u := url.URL{
Scheme: scheme,
Scheme: "http",
Host: "localhost:" + strconv.Itoa(listener.Port),
Path: path,
RawQuery: rawquery,
@@ -199,45 +48,6 @@ func createURL(listener *HTTPListener, scheme string, path string, rawquery stri
return u.String()
}
func TestWriteHTTPSNoClientAuth(t *testing.T) {
listener := newTestHTTPSListener()
listener.TlsAllowedCacerts = nil
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
cas := x509.NewCertPool()
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
noClientAuthClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: cas,
},
},
}
// post single message to listener
resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
func TestWriteHTTPSWithClientAuth(t *testing.T) {
listener := newTestHTTPSListener()
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
// post single message to listener
resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
func TestWriteHTTP(t *testing.T) {
listener := newTestHTTPListener()
@@ -246,9 +56,8 @@ func TestWriteHTTP(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(1)
@@ -258,9 +67,8 @@ func TestWriteHTTP(t *testing.T) {
)
// post multiple message to listener
resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(2)
@@ -274,9 +82,8 @@ func TestWriteHTTP(t *testing.T) {
}
// Post a gigantic metric to the listener and verify that an error is returned:
resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 400, resp.StatusCode)
acc.Wait(3)
@@ -295,9 +102,8 @@ func TestWriteHTTPNoNewline(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(1)
@@ -318,9 +124,8 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
defer listener.Stop()
// Post a gigantic metric to the listener and verify that it writes OK this time:
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
@@ -334,9 +139,8 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
require.NoError(t, listener.Start(acc))
defer listener.Stop()
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 413, resp.StatusCode)
}
@@ -350,9 +154,8 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
require.NoError(t, listener.Start(acc))
defer listener.Stop()
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
hostTags := []string{"server02", "server03",
@@ -376,9 +179,8 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
require.NoError(t, listener.Start(acc))
defer listener.Stop()
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 400, resp.StatusCode)
hostTags := []string{"server02", "server03",
@@ -403,7 +205,7 @@ func TestWriteHTTPGzippedData(t *testing.T) {
data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
require.NoError(t, err)
req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data))
req, err := http.NewRequest("POST", createURL(listener, "/write", ""), bytes.NewBuffer(data))
require.NoError(t, err)
req.Header.Set("Content-Encoding", "gzip")
@@ -438,9 +240,8 @@ func TestWriteHTTPHighTraffic(t *testing.T) {
go func(innerwg *sync.WaitGroup) {
defer innerwg.Done()
for i := 0; i < 500; i++ {
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
}(&wg)
@@ -461,9 +262,8 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
resp, err := http.Post(createURL(listener, "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 404, resp.StatusCode)
}
@@ -475,9 +275,8 @@ func TestWriteHTTPInvalid(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 400, resp.StatusCode)
}
@@ -489,9 +288,8 @@ func TestWriteHTTPEmpty(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
@@ -504,14 +302,13 @@ func TestQueryAndPingHTTP(t *testing.T) {
// post query to listener
resp, err := http.Post(
createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
createURL(listener, "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
require.NoError(t, err)
require.EqualValues(t, 200, resp.StatusCode)
// post ping to listener
resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil)
resp, err = http.Post(createURL(listener, "/ping", ""), "", nil)
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
@@ -524,9 +321,8 @@ func TestWriteWithPrecision(t *testing.T) {
msg := "xyzzy value=42 1422568543\n"
resp, err := http.Post(
createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
createURL(listener, "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(1)

View File

@@ -1,7 +1,8 @@
# IPMI Sensor Input Plugin
# Telegraf ipmi plugin
Get bare metal metrics using the command line utility
[`ipmitool`](https://sourceforge.net/projects/ipmitool/files/ipmitool/).
Get bare metal metrics using the command line utility `ipmitool`
see ipmitool(https://sourceforge.net/projects/ipmitool/files/ipmitool/)
If no servers are specified, the plugin will query the local machine sensor stats via the following command:
@@ -15,7 +16,18 @@ When one or more servers are specified, the plugin will use the following comman
ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
```
### Configuration
## Measurements
- ipmi_sensor:
* Tags: `name`, `unit`
* Fields:
- status
- value
The `server` tag will be made available when retrieving stats from remote server(s).
## Configuration
```toml
# Read metrics from the bare metal servers via IPMI
@@ -40,49 +52,26 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
timeout = "20s"
```
### Measurements
- ipmi_sensor:
- tags:
- name
- unit
- server (only when retrieving stats from remote servers)
- fields:
- status (int)
- value (float)
#### Permissions
When gathering from the local system, Telegraf will need permission to the
ipmi device node. When using udev you can create the device node giving
`rw` permissions to the `telegraf` user by adding the following rule to
`/etc/udev/rules.d/52-telegraf-ipmi.rules`:
```
KERNEL=="ipmi*", MODE="660", GROUP="telegraf"
```
### Example Output
## Output
When retrieving stats from a remote server:
```
ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613
ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
> ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
> ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613
> ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
```
When retrieving stats from the local machine (no server specified):
```
ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613
ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
> ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
> ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613
> ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
> ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
> ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
> ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
> ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
```

View File

@@ -35,7 +35,7 @@ var sampleConfig = `
##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
## gaps or overlap in pulled data
interval = "30s"

View File

@@ -81,7 +81,7 @@ func TestIptables_Gather(t *testing.T) {
K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 8 - Multiple rows, multiple chains => no error
{ // 8 - Multiple rows, multipe chains => no error
table: "filter",
chains: []string{"INPUT", "FORWARD"},
values: []string{

View File

@@ -1,7 +1,5 @@
# Telegraf plugin: Jolokia
**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin.
#### Configuration
```toml

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"time"
@@ -60,10 +59,6 @@ type Jolokia struct {
}
const sampleConfig = `
# DEPRECATED: the jolokia plugin has been deprecated in favor of the
# jolokia2 plugin
# see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
## This is the context root used to compose the jolokia url
## NOTE that Jolokia requires a trailing slash at the end of the context root
## NOTE that your jolokia security policy must allow for POST requests.
@@ -259,10 +254,6 @@ func (j *Jolokia) extractValues(measurement string, value interface{}, fields ma
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
if j.jClient == nil {
log.Println("W! DEPRECATED: the jolokia plugin has been deprecated " +
"in favor of the jolokia2 plugin " +
"(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2)")
tr := &http.Transport{ResponseHeaderTimeout: j.ResponseHeaderTimeout.Duration}
j.jClient = &JolokiaClientImpl{&http.Client{
Transport: tr,

View File

@@ -1,169 +0,0 @@
# Jolokia2 Input Plugins
The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
## Jolokia Agent Configuration
The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints.
```toml
[[inputs.jolokia2_agent]]
urls = ["http://agent:8080/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
Optionally, specify SSL options for communicating with agents:
```toml
[[inputs.jolokia2_agent]]
urls = ["https://agent:8080/jolokia"]
ssl_ca = "/var/private/ca.pem"
ssl_cert = "/var/private/client.pem"
ssl_key = "/var/private/client-key.pem"
#insecure_skip_verify = false
[[inputs.jolokia2_agent.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
## Jolokia Proxy Configuration
The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint.
```toml
[[inputs.jolokia2_proxy]]
url = "http://proxy:8080/jolokia"
#default_target_username = ""
#default_target_password = ""
[[inputs.jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# username = ""
# password = ""
[[inputs.jolokia2_proxy.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
Optionally, specify SSL options for communicating with proxies:
```toml
[[inputs.jolokia2_proxy]]
url = "https://proxy:8080/jolokia"
ssl_ca = "/var/private/ca.pem"
ssl_cert = "/var/private/client.pem"
ssl_key = "/var/private/client-key.pem"
#insecure_skip_verify = false
#default_target_username = ""
#default_target_password = ""
[[inputs.jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# username = ""
# password = ""
[[inputs.jolokia2_agent.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
## Jolokia Metric Configuration
Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean.
| Key | Required | Description |
|----------------|----------|-------------|
| `mbean` | yes | The object name of a JMX MBean. MBean property-key values can contain a wildcard `*`, allowing you to fetch multiple MBeans with one declaration. |
| `paths` | no | A list of MBean attributes to read. |
| `tag_keys` | no | A list of MBean property-key names to convert into tags. The property-key name becomes the tag name, while the property-key value becomes the tag value. |
| `tag_prefix` | no | A string to prepend to the tag names produced by this `metric` declaration. |
| `field_name` | no | A string to set as the name of the field produced by this metric; can contain substitutions. |
| `field_prefix` | no | A string to prepend to the field names produced by this `metric` declaration; can contain substitutions. |
Use `paths` to refine which fields to collect.
```toml
[[inputs.jolokia2_agent.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
```
The preceeding `jvm_memory` `metric` declaration produces the following output:
```
jvm_memory HeapMemoryUsage.committed=4294967296,HeapMemoryUsage.init=4294967296,HeapMemoryUsage.max=4294967296,HeapMemoryUsage.used=1750658992,NonHeapMemoryUsage.committed=67350528,NonHeapMemoryUsage.init=2555904,NonHeapMemoryUsage.max=-1,NonHeapMemoryUsage.used=65821352,ObjectPendingFinalizationCount=0 1503762436000000000
```
Use `*` wildcards against `mbean` property-key values to create distinct series by capturing values into `tag_keys`.
```toml
[[inputs.jolokia2_agent.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
```
Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and `name` is used as a tag, the preceeding `jvm_garbage_collector` `metric` declaration produces two metrics.
```
jvm_garbage_collector,name=G1\ Old\ Generation CollectionCount=0,CollectionTime=0 1503762520000000000
jvm_garbage_collector,name=G1\ Young\ Generation CollectionTime=32,CollectionCount=2 1503762520000000000
```
Use `tag_prefix` along with `tag_keys` to add detail to tag names.
```toml
[[inputs.jolokia2_agent.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"
```
The preceeding `jvm_memory_pool` `metric` declaration produces six metrics, each with a distinct `pool_name` tag.
```
jvm_memory_pool,pool_name=Compressed\ Class\ Space PeakUsage.max=1073741824,PeakUsage.committed=3145728,PeakUsage.init=0,Usage.committed=3145728,Usage.init=0,PeakUsage.used=3017976,Usage.max=1073741824,Usage.used=3017976 1503764025000000000
jvm_memory_pool,pool_name=Code\ Cache PeakUsage.init=2555904,PeakUsage.committed=6291456,Usage.committed=6291456,PeakUsage.used=6202752,PeakUsage.max=251658240,Usage.used=6210368,Usage.max=251658240,Usage.init=2555904 1503764025000000000
jvm_memory_pool,pool_name=G1\ Eden\ Space CollectionUsage.max=-1,PeakUsage.committed=56623104,PeakUsage.init=56623104,PeakUsage.used=53477376,Usage.max=-1,Usage.committed=49283072,Usage.used=19922944,CollectionUsage.committed=49283072,CollectionUsage.init=56623104,CollectionUsage.used=0,PeakUsage.max=-1,Usage.init=56623104 1503764025000000000
jvm_memory_pool,pool_name=G1\ Old\ Gen CollectionUsage.max=1073741824,CollectionUsage.committed=0,PeakUsage.max=1073741824,PeakUsage.committed=1017118720,PeakUsage.init=1017118720,PeakUsage.used=137032208,Usage.max=1073741824,CollectionUsage.init=1017118720,Usage.committed=1017118720,Usage.init=1017118720,Usage.used=134708752,CollectionUsage.used=0 1503764025000000000
jvm_memory_pool,pool_name=G1\ Survivor\ Space Usage.max=-1,Usage.init=0,CollectionUsage.max=-1,CollectionUsage.committed=7340032,CollectionUsage.used=7340032,PeakUsage.committed=7340032,Usage.committed=7340032,Usage.used=7340032,CollectionUsage.init=0,PeakUsage.max=-1,PeakUsage.init=0,PeakUsage.used=7340032 1503764025000000000
jvm_memory_pool,pool_name=Metaspace PeakUsage.init=0,PeakUsage.used=21852224,PeakUsage.max=-1,Usage.max=-1,Usage.committed=22282240,Usage.init=0,Usage.used=21852224,PeakUsage.committed=22282240 1503764025000000000
```
Use substitutions to create fields and field prefixes with MBean property-keys captured by wildcards. In the following example, `$1` represents the value of the property-key `name`, and `$2` represents the value of the property-key `topic`.
```toml
[[inputs.jolokia2_agent.metric]]
name = "kafka_topic"
mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics"
field_prefix = "$1"
tag_keys = ["topic"]
```
The preceeding `kafka_topic` `metric` declaration produces a metric per Kafka topic. The `name` Mbean property-key is used as a field prefix to aid in gathering fields together into the single metric.
```
kafka_topic,topic=my-topic BytesOutPerSec.MeanRate=0,FailedProduceRequestsPerSec.MeanRate=0,BytesOutPerSec.EventType="bytes",BytesRejectedPerSec.Count=0,FailedProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.EventType="requests",MessagesInPerSec.RateUnit="SECONDS",BytesInPerSec.EventType="bytes",BytesOutPerSec.RateUnit="SECONDS",BytesInPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.EventType="requests",TotalFetchRequestsPerSec.MeanRate=146.301533938701,BytesOutPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.MeanRate=0,BytesRejectedPerSec.FifteenMinuteRate=0,MessagesInPerSec.FiveMinuteRate=0,BytesInPerSec.Count=0,BytesRejectedPerSec.MeanRate=0,FailedFetchRequestsPerSec.MeanRate=0,FailedFetchRequestsPerSec.FiveMinuteRate=0,FailedFetchRequestsPerSec.FifteenMinuteRate=0,FailedProduceRequestsPerSec.Count=0,TotalFetchRequestsPerSec.FifteenMinuteRate=128.59314292334466,TotalFetchRequestsPerSec.OneMinuteRate=126.71551273850747,TotalFetchRequestsPerSec.Count=1353483,TotalProduceRequestsPerSec.FifteenMinuteRate=0,FailedFetchRequestsPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.Count=0,FailedProduceRequestsPerSec.FifteenMinuteRate=0,TotalFetchRequestsPerSec.FiveMinuteRate=130.8516148751592,TotalFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.RateUnit="SECONDS",BytesInPerSec.MeanRate=0,FailedFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.OneMinuteRate=0,BytesOutPerSec.Count=0,BytesOutPerSec.OneMinuteRate=0,MessagesInPerSec.FifteenMinuteRate=0,MessagesInPerSec.MeanRate=0,BytesInPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.OneMinuteRate=0,TotalProduceRequestsPerSec.EventType="requests",BytesRejectedPerSec.FiveMinuteRate=0,BytesRejectedPerSec.EventType="bytes",BytesOutPerSec.FiveMinuteRate=0,FailedProduceRequestsPerSec.FiveMinuteRate=0,MessagesInPerSec.Count=0,TotalProduceRequestsPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.OneMinuteRate=0,MessagesInPerSec.EventType="messages",MessagesInPerSec.OneMinuteRate=0,TotalFetchRequestsPerSec.EventType="requests",BytesInPerSec.RateUnit="SECONDS",BytesInPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.Count=0 1503767532000000000
```
Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configurations that apply to every `metric` declaration.
| Key | Default Value | Description |
|---------------------------|---------------|-------------|
| `default_field_separator` | `.` | A character to use to join Mbean attributes when creating fields. |
| `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. |
| `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. |

View File

@@ -1,271 +0,0 @@
package jolokia2
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"time"
"github.com/influxdata/telegraf/internal"
)
type Client struct {
URL string
client *http.Client
config *ClientConfig
}
type ClientConfig struct {
ResponseTimeout time.Duration
Username string
Password string
SSLCA string
SSLCert string
SSLKey string
InsecureSkipVerify bool
ProxyConfig *ProxyConfig
}
type ProxyConfig struct {
DefaultTargetUsername string
DefaultTargetPassword string
Targets []ProxyTargetConfig
}
type ProxyTargetConfig struct {
Username string
Password string
URL string
}
type ReadRequest struct {
Mbean string
Attributes []string
Path string
}
type ReadResponse struct {
Status int
Value interface{}
RequestMbean string
RequestAttributes []string
RequestPath string
RequestTarget string
}
// Jolokia JSON request object. Example: {
// "type": "read",
// "mbean: "java.lang:type="Runtime",
// "attribute": "Uptime",
// "target": {
// "url: "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"
// }
// }
type jolokiaRequest struct {
Type string `json:"type"`
Mbean string `json:"mbean"`
Attribute interface{} `json:"attribute,omitempty"`
Path string `json:"path,omitempty"`
Target *jolokiaTarget `json:"target,omitempty"`
}
type jolokiaTarget struct {
URL string `json:"url"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
}
// Jolokia JSON response object. Example: {
// "request": {
// "type": "read"
// "mbean": "java.lang:type=Runtime",
// "attribute": "Uptime",
// "target": {
// "url": "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"
// }
// },
// "value": 1214083,
// "timestamp": 1488059309,
// "status": 200
// }
type jolokiaResponse struct {
Request jolokiaRequest `json:"request"`
Value interface{} `json:"value"`
Status int `json:"status"`
}
func NewClient(url string, config *ClientConfig) (*Client, error) {
tlsConfig, err := internal.GetTLSConfig(
config.SSLCert, config.SSLKey, config.SSLCA, config.InsecureSkipVerify)
if err != nil {
return nil, err
}
transport := &http.Transport{
ResponseHeaderTimeout: config.ResponseTimeout,
TLSClientConfig: tlsConfig,
}
client := &http.Client{
Transport: transport,
Timeout: config.ResponseTimeout,
}
return &Client{
URL: url,
config: config,
client: client,
}, nil
}
func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) {
jrequests := makeJolokiaRequests(requests, c.config.ProxyConfig)
requestBody, err := json.Marshal(jrequests)
if err != nil {
return nil, err
}
requestUrl, err := formatReadUrl(c.URL, c.config.Username, c.config.Password)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", requestUrl, bytes.NewBuffer(requestBody))
req.Header.Add("Content-type", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK))
}
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var jresponses []jolokiaResponse
if err = json.Unmarshal([]byte(responseBody), &jresponses); err != nil {
return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, responseBody)
}
return makeReadResponses(jresponses), nil
}
func makeJolokiaRequests(rrequests []ReadRequest, proxyConfig *ProxyConfig) []jolokiaRequest {
jrequests := make([]jolokiaRequest, 0)
if proxyConfig == nil {
for _, rr := range rrequests {
jrequests = append(jrequests, makeJolokiaRequest(rr, nil))
}
} else {
for _, t := range proxyConfig.Targets {
if t.Username == "" {
t.Username = proxyConfig.DefaultTargetUsername
}
if t.Password == "" {
t.Password = proxyConfig.DefaultTargetPassword
}
for _, rr := range rrequests {
jtarget := &jolokiaTarget{
URL: t.URL,
User: t.Username,
Password: t.Password,
}
jrequests = append(jrequests, makeJolokiaRequest(rr, jtarget))
}
}
}
return jrequests
}
func makeJolokiaRequest(rrequest ReadRequest, jtarget *jolokiaTarget) jolokiaRequest {
jrequest := jolokiaRequest{
Type: "read",
Mbean: rrequest.Mbean,
Path: rrequest.Path,
Target: jtarget,
}
if len(rrequest.Attributes) == 1 {
jrequest.Attribute = rrequest.Attributes[0]
}
if len(rrequest.Attributes) > 1 {
jrequest.Attribute = rrequest.Attributes
}
return jrequest
}
func makeReadResponses(jresponses []jolokiaResponse) []ReadResponse {
rresponses := make([]ReadResponse, 0)
for _, jr := range jresponses {
rrequest := ReadRequest{
Mbean: jr.Request.Mbean,
Path: jr.Request.Path,
Attributes: []string{},
}
attrValue := jr.Request.Attribute
if attrValue != nil {
attribute, ok := attrValue.(string)
if ok {
rrequest.Attributes = []string{attribute}
} else {
attributes, _ := attrValue.([]interface{})
rrequest.Attributes = make([]string, len(attributes))
for i, attr := range attributes {
rrequest.Attributes[i] = attr.(string)
}
}
}
rresponse := ReadResponse{
Value: jr.Value,
Status: jr.Status,
RequestMbean: rrequest.Mbean,
RequestAttributes: rrequest.Attributes,
RequestPath: rrequest.Path,
}
if jtarget := jr.Request.Target; jtarget != nil {
rresponse.RequestTarget = jtarget.URL
}
rresponses = append(rresponses, rresponse)
}
return rresponses
}
func formatReadUrl(configUrl, username, password string) (string, error) {
parsedUrl, err := url.Parse(configUrl)
if err != nil {
return "", err
}
readUrl := url.URL{
Host: parsedUrl.Host,
Scheme: parsedUrl.Scheme,
}
if username != "" || password != "" {
readUrl.User = url.UserPassword(username, password)
}
readUrl.Path = path.Join(parsedUrl.Path, "read")
readUrl.Query().Add("ignoreErrors", "true")
return readUrl.String(), nil
}

View File

@@ -1,129 +0,0 @@
package jolokia2
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf/testutil"
)
func TestJolokia2_ClientAuthRequest(t *testing.T) {
var username string
var password string
var requests []map[string]interface{}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, _ = r.BasicAuth()
body, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(body, &requests)
if err != nil {
t.Error(err)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(`
[jolokia2_agent]
urls = ["%s/jolokia"]
username = "sally"
password = "seashore"
[[jolokia2_agent.metric]]
name = "hello"
mbean = "hello:foo=bar"
`, server.URL))
var acc testutil.Accumulator
plugin.Gather(&acc)
if username != "sally" {
t.Errorf("Expected to post with username %s, but was %s", "sally", username)
}
if password != "seashore" {
t.Errorf("Expected to post with password %s, but was %s", "seashore", password)
}
if len(requests) == 0 {
t.Fatal("Expected to post a request body, but was empty.")
}
request := requests[0]
if expect := "hello:foo=bar"; request["mbean"] != expect {
t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"])
}
}
func TestJolokia2_ClientProxyAuthRequest(t *testing.T) {
var requests []map[string]interface{}
var username string
var password string
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, _ = r.BasicAuth()
body, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(body, &requests)
if err != nil {
t.Error(err)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(`
[jolokia2_proxy]
url = "%s/jolokia"
username = "sally"
password = "seashore"
[[jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"
username = "jack"
password = "benimble"
[[jolokia2_proxy.metric]]
name = "hello"
mbean = "hello:foo=bar"
`, server.URL))
var acc testutil.Accumulator
plugin.Gather(&acc)
if username != "sally" {
t.Errorf("Expected to post with username %s, but was %s", "sally", username)
}
if password != "seashore" {
t.Errorf("Expected to post with password %s, but was %s", "seashore", password)
}
if len(requests) == 0 {
t.Fatal("Expected to post a request body, but was empty.")
}
request := requests[0]
if expect := "hello:foo=bar"; request["mbean"] != expect {
t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"])
}
target, ok := request["target"].(map[string]interface{})
if !ok {
t.Fatal("Expected a proxy target, but was empty.")
}
if expect := "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"; target["url"] != expect {
t.Errorf("Expected proxy target url %s, but was %s", expect, target["url"])
}
if expect := "jack"; target["user"] != expect {
t.Errorf("Expected proxy target username %s, but was %s", expect, target["user"])
}
if expect := "benimble"; target["password"] != expect {
t.Errorf("Expected proxy target password %s, but was %s", expect, target["password"])
}
}

View File

@@ -1,40 +0,0 @@
[[inputs.jolokia2_agent]]
urls = ["http://localhost:8080/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[inputs.jolokia2_agent.metric]]
name = "java_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[inputs.jolokia2_agent.metric]]
name = "java_garbage_collector"
mbean = "java.lang:name=G1*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[inputs.jolokia2_agent.metric]]
name = "java_last_garbage_collection"
mbean = "java.lang:name=G1 Young Generation,type=GarbageCollector"
paths = ["LastGcInfo/duration", "LastGcInfo/GcThreadCount", "LastGcInfo/memoryUsageAfterGc"]
[[inputs.jolokia2_agent.metrics]]
name = "java_threading"
mbean = "java.lang:type=Threading"
paths = ["TotalStartedThreadCount", "ThreadCount", "DaemonThreadCount", "PeakThreadCount"]
[[inputs.jolokia2_agent.metrics]]
name = "java_class_loading"
mbean = "java.lang:type=ClassLoading"
paths = ["LoadedClassCount", "UnloadedClassCount", "TotalLoadedClassCount"]
[[inputs.jolokia2_agent.metrics]]
name = "java_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]

View File

@@ -1,55 +0,0 @@
[[inputs.jolokia2_agent]]
name_prefix = "kafka_"
urls = ["http://localhost:8080/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "controller"
mbean = "kafka.controller:name=*,type=*"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "replica_manager"
mbean = "kafka.server:name=*,type=ReplicaManager"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "purgatory"
mbean = "kafka.server:delayedOperation=*,name=*,type=DelayedOperationPurgatory"
field_prefix = "$1."
field_name = "$2"
[[inputs.jolokia2_agent.metric]]
name = "client"
mbean = "kafka.server:client-id=*,type=*"
tag_keys = ["client-id", "type"]
[[inputs.jolokia2_agent.metric]]
name = "request"
mbean = "kafka.network:name=*,request=*,type=RequestMetrics"
field_prefix = "$1."
tag_keys = ["request"]
[[inputs.jolokia2_agent.metric]]
name = "topics"
mbean = "kafka.server:name=*,type=BrokerTopicMetrics"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "topic"
mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics"
field_prefix = "$1."
tag_keys = ["topic"]
[[inputs.jolokia2_agent.metric]]
name = "partition"
mbean = "kafka.log:name=*,partition=*,topic=*,type=Log"
field_name = "$1"
tag_keys = ["topic", "partition"]
[[inputs.jolokia2_agent.metric]]
name = "partition"
mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition"
field_name = "UnderReplicatedPartitions"
tag_keys = ["topic", "partition"]

View File

@@ -1,266 +0,0 @@
package jolokia2
import (
"fmt"
"sort"
"strings"
"github.com/influxdata/telegraf"
)
const defaultFieldName = "value"
type Gatherer struct {
metrics []Metric
requests []ReadRequest
}
func NewGatherer(metrics []Metric) *Gatherer {
return &Gatherer{
metrics: metrics,
requests: makeReadRequests(metrics),
}
}
// Gather adds points to an accumulator from responses returned
// by a Jolokia agent.
func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error {
var tags map[string]string
if client.config.ProxyConfig != nil {
tags = map[string]string{"jolokia_proxy_url": client.URL}
} else {
tags = map[string]string{"jolokia_agent_url": client.URL}
}
requests := makeReadRequests(g.metrics)
responses, err := client.read(requests)
if err != nil {
return err
}
g.gatherResponses(responses, tags, acc)
return nil
}
// gatherReponses adds points to an accumulator from the ReadResponse objects
// returned by a Jolokia agent.
func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) {
series := make(map[string][]point, 0)
for _, metric := range g.metrics {
points, ok := series[metric.Name]
if !ok {
points = make([]point, 0)
}
responsePoints, responseErrors := g.generatePoints(metric, responses)
for _, responsePoint := range responsePoints {
points = append(points, responsePoint)
}
for _, err := range responseErrors {
acc.AddError(err)
}
series[metric.Name] = points
}
for measurement, points := range series {
for _, point := range compactPoints(points) {
acc.AddFields(measurement,
point.Fields, mergeTags(point.Tags, tags))
}
}
}
// generatePoints creates points for the supplied metric from the ReadResponse
// objects returned by the Jolokia client.
func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]point, []error) {
points := make([]point, 0)
errors := make([]error, 0)
for _, response := range responses {
switch response.Status {
case 200:
break
case 404:
continue
default:
errors = append(errors, fmt.Errorf("Unexpected status in response from target %s: %d",
response.RequestTarget, response.Status))
continue
}
if !metricMatchesResponse(metric, response) {
continue
}
pb := newPointBuilder(metric, response.RequestAttributes, response.RequestPath)
for _, point := range pb.Build(metric.Mbean, response.Value) {
if response.RequestTarget != "" {
point.Tags["jolokia_agent_url"] = response.RequestTarget
}
points = append(points, point)
}
}
return points, errors
}
// mergeTags combines two tag sets into a single tag set.
func mergeTags(metricTags, outerTags map[string]string) map[string]string {
tags := make(map[string]string)
for k, v := range outerTags {
tags[k] = v
}
for k, v := range metricTags {
tags[k] = v
}
return tags
}
// metricMatchesResponse returns true when the name, attributes, and path
// of a Metric match the corresponding elements in a ReadResponse object
// returned by a Jolokia agent.
func metricMatchesResponse(metric Metric, response ReadResponse) bool {
if !metric.MatchObjectName(response.RequestMbean) {
return false
}
if len(metric.Paths) == 0 {
return len(response.RequestAttributes) == 0
}
for _, attribute := range response.RequestAttributes {
if metric.MatchAttributeAndPath(attribute, response.RequestPath) {
return true
}
}
return false
}
// compactPoints attepts to remove points by compacting points
// with matching tag sets. When a match is found, the fields from
// one point are moved to another, and the empty point is removed.
func compactPoints(points []point) []point {
compactedPoints := make([]point, 0)
for _, sourcePoint := range points {
keepPoint := true
for _, compactPoint := range compactedPoints {
if !tagSetsMatch(sourcePoint.Tags, compactPoint.Tags) {
continue
}
keepPoint = false
for key, val := range sourcePoint.Fields {
compactPoint.Fields[key] = val
}
}
if keepPoint {
compactedPoints = append(compactedPoints, sourcePoint)
}
}
return compactedPoints
}
// tagSetsMatch returns true if two maps are equivalent.
func tagSetsMatch(a, b map[string]string) bool {
if len(a) != len(b) {
return false
}
for ak, av := range a {
bv, ok := b[ak]
if !ok {
return false
}
if av != bv {
return false
}
}
return true
}
// makeReadRequests creates ReadRequest objects from metrics definitions.
func makeReadRequests(metrics []Metric) []ReadRequest {
var requests []ReadRequest
for _, metric := range metrics {
if len(metric.Paths) == 0 {
requests = append(requests, ReadRequest{
Mbean: metric.Mbean,
Attributes: []string{},
})
} else {
attributes := make(map[string][]string)
for _, path := range metric.Paths {
segments := strings.Split(path, "/")
attribute := segments[0]
if _, ok := attributes[attribute]; !ok {
attributes[attribute] = make([]string, 0)
}
if len(segments) > 1 {
paths := attributes[attribute]
attributes[attribute] = append(paths, strings.Join(segments[1:], "/"))
}
}
rootAttributes := findRequestAttributesWithoutPaths(attributes)
if len(rootAttributes) > 0 {
requests = append(requests, ReadRequest{
Mbean: metric.Mbean,
Attributes: rootAttributes,
})
}
for _, deepAttribute := range findRequestAttributesWithPaths(attributes) {
for _, path := range attributes[deepAttribute] {
requests = append(requests, ReadRequest{
Mbean: metric.Mbean,
Attributes: []string{deepAttribute},
Path: path,
})
}
}
}
}
return requests
}
func findRequestAttributesWithoutPaths(attributes map[string][]string) []string {
results := make([]string, 0)
for attr, paths := range attributes {
if len(paths) == 0 {
results = append(results, attr)
}
}
sort.Strings(results)
return results
}
func findRequestAttributesWithPaths(attributes map[string][]string) []string {
results := make([]string, 0)
for attr, paths := range attributes {
if len(paths) != 0 {
results = append(results, attr)
}
}
sort.Strings(results)
return results
}

View File

@@ -1,104 +0,0 @@
package jolokia2
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestJolokia2_makeReadRequests(t *testing.T) {
cases := []struct {
metric Metric
expected []ReadRequest
}{
{
metric: Metric{
Name: "object",
Mbean: "test:foo=bar",
},
expected: []ReadRequest{
ReadRequest{
Mbean: "test:foo=bar",
Attributes: []string{},
},
},
}, {
metric: Metric{
Name: "object_with_an_attribute",
Mbean: "test:foo=bar",
Paths: []string{"biz"},
},
expected: []ReadRequest{
ReadRequest{
Mbean: "test:foo=bar",
Attributes: []string{"biz"},
},
},
}, {
metric: Metric{
Name: "object_with_attributes",
Mbean: "test:foo=bar",
Paths: []string{"baz", "biz"},
},
expected: []ReadRequest{
ReadRequest{
Mbean: "test:foo=bar",
Attributes: []string{"baz", "biz"},
},
},
}, {
metric: Metric{
Name: "object_with_an_attribute_and_path",
Mbean: "test:foo=bar",
Paths: []string{"biz/baz"},
},
expected: []ReadRequest{
ReadRequest{
Mbean: "test:foo=bar",
Attributes: []string{"biz"},
Path: "baz",
},
},
}, {
metric: Metric{
Name: "object_with_an_attribute_and_a_deep_path",
Mbean: "test:foo=bar",
Paths: []string{"biz/baz/fiz/faz"},
},
expected: []ReadRequest{
ReadRequest{
Mbean: "test:foo=bar",
Attributes: []string{"biz"},
Path: "baz/fiz/faz",
},
},
}, {
metric: Metric{
Name: "object_with_attributes_and_paths",
Mbean: "test:foo=bar",
Paths: []string{"baz/biz", "faz/fiz"},
},
expected: []ReadRequest{
ReadRequest{
Mbean: "test:foo=bar",
Attributes: []string{"baz"},
Path: "biz",
},
ReadRequest{
Mbean: "test:foo=bar",
Attributes: []string{"faz"},
Path: "fiz",
},
},
},
}
for _, c := range cases {
payload := makeReadRequests([]Metric{c.metric})
assert.Equal(t, len(c.expected), len(payload), "Failing case: "+c.metric.Name)
for _, actual := range payload {
assert.Contains(t, c.expected, actual, "Failing case: "+c.metric.Name)
}
}
}

View File

@@ -1,21 +0,0 @@
package jolokia2
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
func init() {
inputs.Add("jolokia2_agent", func() telegraf.Input {
return &JolokiaAgent{
Metrics: []MetricConfig{},
DefaultFieldSeparator: ".",
}
})
inputs.Add("jolokia2_proxy", func() telegraf.Input {
return &JolokiaProxy{
Metrics: []MetricConfig{},
DefaultFieldSeparator: ".",
}
})
}

View File

@@ -1,112 +0,0 @@
package jolokia2
import (
"fmt"
"sync"
"time"
"github.com/influxdata/telegraf"
)
type JolokiaAgent struct {
DefaultFieldPrefix string
DefaultFieldSeparator string
DefaultTagPrefix string
URLs []string `toml:"urls"`
Username string
Password string
ResponseTimeout time.Duration `toml:"response_timeout"`
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool
Metrics []MetricConfig `toml:"metric"`
gatherer *Gatherer
}
func (ja *JolokiaAgent) SampleConfig() string {
return `
# default_tag_prefix = ""
# default_field_prefix = ""
# default_field_separator = "."
# Add agents URLs to query
urls = ["http://localhost:8080/jolokia"]
# username = ""
# password = ""
# response_timeout = "5s"
## Optional SSL config
# ssl_ca = "/var/private/ca.pem"
# ssl_cert = "/var/private/client.pem"
# ssl_key = "/var/private/client-key.pem"
# insecure_skip_verify = false
## Add metrics to read
[[inputs.jolokia2_agent.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
`
}
func (ja *JolokiaAgent) Description() string {
return "Read JMX metrics from a Jolokia REST agent endpoint"
}
func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error {
if ja.gatherer == nil {
ja.gatherer = NewGatherer(ja.createMetrics())
}
var wg sync.WaitGroup
for _, url := range ja.URLs {
client, err := ja.createClient(url)
if err != nil {
acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err))
continue
}
wg.Add(1)
go func(client *Client) {
defer wg.Done()
err = ja.gatherer.Gather(client, acc)
if err != nil {
acc.AddError(fmt.Errorf("Unable to gather metrics for %s: %v", client.URL, err))
}
}(client)
}
wg.Wait()
return nil
}
func (ja *JolokiaAgent) createMetrics() []Metric {
var metrics []Metric
for _, config := range ja.Metrics {
metrics = append(metrics, NewMetric(config,
ja.DefaultFieldPrefix, ja.DefaultFieldSeparator, ja.DefaultTagPrefix))
}
return metrics
}
func (ja *JolokiaAgent) createClient(url string) (*Client, error) {
return NewClient(url, &ClientConfig{
Username: ja.Username,
Password: ja.Password,
ResponseTimeout: ja.ResponseTimeout,
SSLCA: ja.SSLCA,
SSLCert: ja.SSLCert,
SSLKey: ja.SSLKey,
InsecureSkipVerify: ja.InsecureSkipVerify,
})
}

View File

@@ -1,129 +0,0 @@
package jolokia2
import (
"time"
"github.com/influxdata/telegraf"
)
type JolokiaProxy struct {
DefaultFieldPrefix string
DefaultFieldSeparator string
DefaultTagPrefix string
URL string `toml:"url"`
DefaultTargetPassword string
DefaultTargetUsername string
Targets []JolokiaProxyTargetConfig `toml:"target"`
Username string
Password string
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool
ResponseTimeout time.Duration `toml:"response_timeout"`
Metrics []MetricConfig `toml:"metric"`
client *Client
gatherer *Gatherer
}
type JolokiaProxyTargetConfig struct {
URL string `toml:"url"`
Username string
Password string
}
func (jp *JolokiaProxy) SampleConfig() string {
return `
# default_tag_prefix = ""
# default_field_prefix = ""
# default_field_separator = "."
## Proxy agent
url = "http://localhost:8080/jolokia"
# username = ""
# password = ""
# response_timeout = "5s"
## Optional SSL config
# ssl_ca = "/var/private/ca.pem"
# ssl_cert = "/var/private/client.pem"
# ssl_key = "/var/private/client-key.pem"
# insecure_skip_verify = false
## Add proxy targets to query
# default_target_username = ""
# default_target_password = ""
[[inputs.jolokia_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# username = ""
# password = ""
## Add metrics to read
[[inputs.jolokia_proxy.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
`
}
func (jp *JolokiaProxy) Description() string {
return "Read JMX metrics from a Jolokia REST proxy endpoint"
}
func (jp *JolokiaProxy) Gather(acc telegraf.Accumulator) error {
if jp.gatherer == nil {
jp.gatherer = NewGatherer(jp.createMetrics())
}
if jp.client == nil {
client, err := jp.createClient()
if err != nil {
return err
}
jp.client = client
}
return jp.gatherer.Gather(jp.client, acc)
}
func (jp *JolokiaProxy) createMetrics() []Metric {
var metrics []Metric
for _, config := range jp.Metrics {
metrics = append(metrics, NewMetric(config,
jp.DefaultFieldPrefix, jp.DefaultFieldSeparator, jp.DefaultTagPrefix))
}
return metrics
}
func (jp *JolokiaProxy) createClient() (*Client, error) {
proxyConfig := &ProxyConfig{
DefaultTargetUsername: jp.DefaultTargetUsername,
DefaultTargetPassword: jp.DefaultTargetPassword,
}
for _, target := range jp.Targets {
proxyConfig.Targets = append(proxyConfig.Targets, ProxyTargetConfig{
URL: target.URL,
Username: target.Username,
Password: target.Password,
})
}
return NewClient(jp.URL, &ClientConfig{
Username: jp.Username,
Password: jp.Password,
ResponseTimeout: jp.ResponseTimeout,
SSLCA: jp.SSLCA,
SSLCert: jp.SSLCert,
SSLKey: jp.SSLKey,
InsecureSkipVerify: jp.InsecureSkipVerify,
ProxyConfig: proxyConfig,
})
}

View File

@@ -1,781 +0,0 @@
package jolokia2
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
"github.com/stretchr/testify/assert"
)
func TestJolokia2_ScalarValues(t *testing.T) {
config := `
[jolokia2_agent]
urls = ["%s"]
[[jolokia2_agent.metric]]
name = "scalar_without_attribute"
mbean = "scalar_without_attribute"
[[jolokia2_agent.metric]]
name = "scalar_with_attribute"
mbean = "scalar_with_attribute"
paths = ["biz"]
[[jolokia2_agent.metric]]
name = "scalar_with_attribute_and_path"
mbean = "scalar_with_attribute_and_path"
paths = ["biz/baz"]
# This should return multiple series with different test tags.
[[jolokia2_agent.metric]]
name = "scalar_with_key_pattern"
mbean = "scalar_with_key_pattern:test=*"
tag_keys = ["test"]`
response := `[{
"request": {
"mbean": "scalar_without_attribute",
"type": "read"
},
"value": 123,
"status": 200
}, {
"request": {
"mbean": "scalar_with_attribute",
"attribute": "biz",
"type": "read"
},
"value": 456,
"status": 200
}, {
"request": {
"mbean": "scalar_with_attribute_and_path",
"attribute": "biz",
"path": "baz",
"type": "read"
},
"value": 789,
"status": 200
}, {
"request": {
"mbean": "scalar_with_key_pattern:test=*",
"type": "read"
},
"value": {
"scalar_with_key_pattern:test=foo": 123,
"scalar_with_key_pattern:test=bar": 456
},
"status": 200
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "scalar_without_attribute", map[string]interface{}{
"value": 123.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "scalar_with_attribute", map[string]interface{}{
"biz": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "scalar_with_attribute_and_path", map[string]interface{}{
"biz.baz": 789.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "scalar_with_key_pattern", map[string]interface{}{
"value": 123.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
"test": "foo",
})
acc.AssertContainsTaggedFields(t, "scalar_with_key_pattern", map[string]interface{}{
"value": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
"test": "bar",
})
}
func TestJolokia2_ObjectValues(t *testing.T) {
config := `
[jolokia2_agent]
urls = ["%s"]
[[jolokia2_agent.metric]]
name = "object_without_attribute"
mbean = "object_without_attribute"
tag_keys = ["foo"]
[[jolokia2_agent.metric]]
name = "object_with_attribute"
mbean = "object_with_attribute"
paths = ["biz"]
[[jolokia2_agent.metric]]
name = "object_with_attribute_and_path"
mbean = "object_with_attribute_and_path"
paths = ["biz/baz"]
# This will generate two separate request objects.
[[jolokia2_agent.metric]]
name = "object_with_branching_paths"
mbean = "object_with_branching_paths"
paths = ["foo/fiz", "foo/faz"]
# This should return multiple series with different test tags.
[[jolokia2_agent.metric]]
name = "object_with_key_pattern"
mbean = "object_with_key_pattern:test=*"
tag_keys = ["test"]`
response := `[{
"request": {
"mbean": "object_without_attribute",
"type": "read"
},
"value": {
"biz": 123,
"baz": 456
},
"status": 200
}, {
"request": {
"mbean": "object_with_attribute",
"attribute": "biz",
"type": "read"
},
"value": {
"fiz": 123,
"faz": 456
},
"status": 200
}, {
"request": {
"mbean": "object_with_branching_paths",
"attribute": "foo",
"path": "fiz",
"type": "read"
},
"value": {
"bing": 123
},
"status": 200
}, {
"request": {
"mbean": "object_with_branching_paths",
"attribute": "foo",
"path": "faz",
"type": "read"
},
"value": {
"bang": 456
},
"status": 200
}, {
"request": {
"mbean": "object_with_attribute_and_path",
"attribute": "biz",
"path": "baz",
"type": "read"
},
"value": {
"bing": 123,
"bang": 456
},
"status": 200
}, {
"request": {
"mbean": "object_with_key_pattern:test=*",
"type": "read"
},
"value": {
"object_with_key_pattern:test=foo": {
"fiz": 123
},
"object_with_key_pattern:test=bar": {
"biz": 456
}
},
"status": 200
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "object_without_attribute", map[string]interface{}{
"biz": 123.0,
"baz": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "object_with_attribute", map[string]interface{}{
"biz.fiz": 123.0,
"biz.faz": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "object_with_attribute_and_path", map[string]interface{}{
"biz.baz.bing": 123.0,
"biz.baz.bang": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "object_with_branching_paths", map[string]interface{}{
"foo.fiz.bing": 123.0,
"foo.faz.bang": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "object_with_key_pattern", map[string]interface{}{
"fiz": 123.0,
}, map[string]string{
"test": "foo",
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "object_with_key_pattern", map[string]interface{}{
"biz": 456.0,
}, map[string]string{
"test": "bar",
"jolokia_agent_url": server.URL,
})
}
func TestJolokia2_StatusCodes(t *testing.T) {
config := `
[jolokia2_agent]
urls = ["%s"]
[[jolokia2_agent.metric]]
name = "ok"
mbean = "ok"
[[jolokia2_agent.metric]]
name = "not_found"
mbean = "not_found"
[[jolokia2_agent.metric]]
name = "unknown"
mbean = "unknown"`
response := `[{
"request": {
"mbean": "ok",
"type": "read"
},
"value": 1,
"status": 200
}, {
"request": {
"mbean": "not_found",
"type": "read"
},
"status": 404
}, {
"request": {
"mbean": "unknown",
"type": "read"
},
"status": 500
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "ok", map[string]interface{}{
"value": 1.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertDoesNotContainMeasurement(t, "not_found")
acc.AssertDoesNotContainMeasurement(t, "unknown")
}
func TestJolokia2_TagRenaming(t *testing.T) {
config := `
[jolokia2_agent]
default_tag_prefix = "DEFAULT_PREFIX_"
urls = ["%s"]
[[jolokia2_agent.metric]]
name = "default_tag_prefix"
mbean = "default_tag_prefix:biz=baz,fiz=faz"
tag_keys = ["biz", "fiz"]
[[jolokia2_agent.metric]]
name = "custom_tag_prefix"
mbean = "custom_tag_prefix:biz=baz,fiz=faz"
tag_keys = ["biz", "fiz"]
tag_prefix = "CUSTOM_PREFIX_"`
response := `[{
"request": {
"mbean": "default_tag_prefix:biz=baz,fiz=faz",
"type": "read"
},
"value": 123,
"status": 200
}, {
"request": {
"mbean": "custom_tag_prefix:biz=baz,fiz=faz",
"type": "read"
},
"value": 123,
"status": 200
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "default_tag_prefix", map[string]interface{}{
"value": 123.0,
}, map[string]string{
"DEFAULT_PREFIX_biz": "baz",
"DEFAULT_PREFIX_fiz": "faz",
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "custom_tag_prefix", map[string]interface{}{
"value": 123.0,
}, map[string]string{
"CUSTOM_PREFIX_biz": "baz",
"CUSTOM_PREFIX_fiz": "faz",
"jolokia_agent_url": server.URL,
})
}
func TestJolokia2_FieldRenaming(t *testing.T) {
config := `
[jolokia2_agent]
default_field_prefix = "DEFAULT_PREFIX_"
default_field_separator = "_DEFAULT_SEPARATOR_"
urls = ["%s"]
[[jolokia2_agent.metric]]
name = "default_field_modifiers"
mbean = "default_field_modifiers"
[[jolokia2_agent.metric]]
name = "custom_field_modifiers"
mbean = "custom_field_modifiers"
field_prefix = "CUSTOM_PREFIX_"
field_separator = "_CUSTOM_SEPARATOR_"
[[jolokia2_agent.metric]]
name = "field_prefix_substitution"
mbean = "field_prefix_substitution:foo=*"
field_prefix = "$1_"
[[jolokia2_agent.metric]]
name = "field_name_substitution"
mbean = "field_name_substitution:foo=*"
field_prefix = ""
field_name = "$1"`
response := `[{
"request": {
"mbean": "default_field_modifiers",
"type": "read"
},
"value": {
"hello": { "world": 123 }
},
"status": 200
}, {
"request": {
"mbean": "custom_field_modifiers",
"type": "read"
},
"value": {
"hello": { "world": 123 }
},
"status": 200
}, {
"request": {
"mbean": "field_prefix_substitution:foo=*",
"type": "read"
},
"value": {
"field_prefix_substitution:foo=biz": 123,
"field_prefix_substitution:foo=baz": 456
},
"status": 200
}, {
"request": {
"mbean": "field_name_substitution:foo=*",
"type": "read"
},
"value": {
"field_name_substitution:foo=biz": 123,
"field_name_substitution:foo=baz": 456
},
"status": 200
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "default_field_modifiers", map[string]interface{}{
"DEFAULT_PREFIX_hello_DEFAULT_SEPARATOR_world": 123.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "custom_field_modifiers", map[string]interface{}{
"CUSTOM_PREFIX_hello_CUSTOM_SEPARATOR_world": 123.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "field_prefix_substitution", map[string]interface{}{
"biz_value": 123.0,
"baz_value": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "field_name_substitution", map[string]interface{}{
"biz": 123.0,
"baz": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
}
func TestJolokia2_MetricMbeanMatching(t *testing.T) {
config := `
[jolokia2_agent]
urls = ["%s"]
[[jolokia2_agent.metric]]
name = "mbean_name_and_object_keys"
mbean = "test1:foo=bar,fizz=buzz"
[[jolokia2_agent.metric]]
name = "mbean_name_and_unordered_object_keys"
mbean = "test2:fizz=buzz,foo=bar"
[[jolokia2_agent.metric]]
name = "mbean_name_and_attributes"
mbean = "test3"
paths = ["foo", "bar"]
[[jolokia2_agent.metric]]
name = "mbean_name_and_attribute_with_paths"
mbean = "test4"
paths = ["flavor/chocolate", "flavor/strawberry"]
`
response := `[{
"request": {
"mbean": "test1:foo=bar,fizz=buzz",
"type": "read"
},
"value": 123,
"status": 200
}, {
"request": {
"mbean": "test2:foo=bar,fizz=buzz",
"type": "read"
},
"value": 123,
"status": 200
}, {
"request": {
"mbean": "test3",
"attribute": "foo",
"type": "read"
},
"value": 123,
"status": 200
}, {
"request": {
"mbean": "test3",
"attribute": "bar",
"type": "read"
},
"value": 456,
"status": 200
}, {
"request": {
"mbean": "test4",
"attribute": "flavor",
"path": "chocolate",
"type": "read"
},
"value": 123,
"status": 200
}, {
"request": {
"mbean": "test4",
"attribute": "flavor",
"path": "strawberry",
"type": "read"
},
"value": 456,
"status": 200
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "mbean_name_and_object_keys", map[string]interface{}{
"value": 123.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "mbean_name_and_unordered_object_keys", map[string]interface{}{
"value": 123.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "mbean_name_and_attributes", map[string]interface{}{
"foo": 123.0,
"bar": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "mbean_name_and_attribute_with_paths", map[string]interface{}{
"flavor.chocolate": 123.0,
"flavor.strawberry": 456.0,
}, map[string]string{
"jolokia_agent_url": server.URL,
})
}
func TestJolokia2_MetricCompaction(t *testing.T) {
config := `
[jolokia2_agent]
urls = ["%s"]
[[jolokia2_agent.metric]]
name = "compact_metric"
mbean = "scalar_value:flavor=chocolate"
tag_keys = ["flavor"]
[[jolokia2_agent.metric]]
name = "compact_metric"
mbean = "scalar_value:flavor=vanilla"
tag_keys = ["flavor"]
[[jolokia2_agent.metric]]
name = "compact_metric"
mbean = "object_value1:flavor=chocolate"
tag_keys = ["flavor"]
[[jolokia2_agent.metric]]
name = "compact_metric"
mbean = "object_value2:flavor=chocolate"
tag_keys = ["flavor"]`
response := `[{
"request": {
"mbean": "scalar_value:flavor=chocolate",
"type": "read"
},
"value": 123,
"status": 200
}, {
"request": {
"mbean": "scalar_value:flavor=vanilla",
"type": "read"
},
"value": 999,
"status": 200
}, {
"request": {
"mbean": "object_value1:flavor=chocolate",
"type": "read"
},
"value": {
"foo": 456
},
"status": 200
}, {
"request": {
"mbean": "object_value2:flavor=chocolate",
"type": "read"
},
"value": {
"bar": 789
},
"status": 200
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "compact_metric", map[string]interface{}{
"value": 123.0,
"foo": 456.0,
"bar": 789.0,
}, map[string]string{
"flavor": "chocolate",
"jolokia_agent_url": server.URL,
})
acc.AssertContainsTaggedFields(t, "compact_metric", map[string]interface{}{
"value": 999.0,
}, map[string]string{
"flavor": "vanilla",
"jolokia_agent_url": server.URL,
})
}
func TestJolokia2_ProxyTargets(t *testing.T) {
config := `
[jolokia2_proxy]
url = "%s"
[[jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://target1:9010/jmxrmi"
[[jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://target2:9010/jmxrmi"
[[jolokia2_proxy.metric]]
name = "hello"
mbean = "hello:foo=bar"`
response := `[{
"request": {
"type": "read",
"mbean": "hello:foo=bar",
"target": {
"url": "service:jmx:rmi:///jndi/rmi://target1:9010/jmxrmi"
}
},
"value": 123,
"status": 200
}, {
"request": {
"type": "read",
"mbean": "hello:foo=bar",
"target": {
"url": "service:jmx:rmi:///jndi/rmi://target2:9010/jmxrmi"
}
},
"value": 456,
"status": 200
}]`
server := setupServer(http.StatusOK, response)
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(config, server.URL))
var acc testutil.Accumulator
assert.NoError(t, plugin.Gather(&acc))
acc.AssertContainsTaggedFields(t, "hello", map[string]interface{}{
"value": 123.0,
}, map[string]string{
"jolokia_proxy_url": server.URL,
"jolokia_agent_url": "service:jmx:rmi:///jndi/rmi://target1:9010/jmxrmi",
})
acc.AssertContainsTaggedFields(t, "hello", map[string]interface{}{
"value": 456.0,
}, map[string]string{
"jolokia_proxy_url": server.URL,
"jolokia_agent_url": "service:jmx:rmi:///jndi/rmi://target2:9010/jmxrmi",
})
}
func setupServer(status int, resp string) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
//body, err := ioutil.ReadAll(r.Body)
//if err == nil {
// fmt.Println(string(body))
//}
fmt.Fprintln(w, resp)
}))
}
func setupPlugin(t *testing.T, conf string) telegraf.Input {
table, err := toml.Parse([]byte(conf))
if err != nil {
t.Fatalf("Unable to parse config! %v", err)
}
for name, _ := range table.Fields {
object := table.Fields[name]
switch name {
case "jolokia2_agent":
plugin := JolokiaAgent{
Metrics: []MetricConfig{},
DefaultFieldSeparator: ".",
}
if err := toml.UnmarshalTable(object.(*ast.Table), &plugin); err != nil {
t.Fatalf("Unable to parse jolokia_agent plugin config! %v", err)
}
return &plugin
case "jolokia2_proxy":
plugin := JolokiaProxy{
Metrics: []MetricConfig{},
DefaultFieldSeparator: ".",
}
if err := toml.UnmarshalTable(object.(*ast.Table), &plugin); err != nil {
t.Fatalf("Unable to parse jolokia_proxy plugin config! %v", err)
}
return &plugin
}
}
return nil
}

View File

@@ -1,128 +0,0 @@
package jolokia2
import "strings"
// A MetricConfig represents a TOML form of
// a Metric with some optional fields.
type MetricConfig struct {
Name string
Mbean string
Paths []string
FieldName *string
FieldPrefix *string
FieldSeparator *string
TagPrefix *string
TagKeys []string
}
// A Metric represents a specification for a
// Jolokia read request, and the transformations
// to apply to points generated from the responses.
type Metric struct {
Name string
Mbean string
Paths []string
FieldName string
FieldPrefix string
FieldSeparator string
TagPrefix string
TagKeys []string
mbeanDomain string
mbeanProperties []string
}
func NewMetric(config MetricConfig, defaultFieldPrefix, defaultFieldSeparator, defaultTagPrefix string) Metric {
metric := Metric{
Name: config.Name,
Mbean: config.Mbean,
Paths: config.Paths,
TagKeys: config.TagKeys,
}
if config.FieldName != nil {
metric.FieldName = *config.FieldName
}
if config.FieldPrefix == nil {
metric.FieldPrefix = defaultFieldPrefix
} else {
metric.FieldPrefix = *config.FieldPrefix
}
if config.FieldSeparator == nil {
metric.FieldSeparator = defaultFieldSeparator
} else {
metric.FieldSeparator = *config.FieldSeparator
}
if config.TagPrefix == nil {
metric.TagPrefix = defaultTagPrefix
} else {
metric.TagPrefix = *config.TagPrefix
}
mbeanDomain, mbeanProperties := parseMbeanObjectName(config.Mbean)
metric.mbeanDomain = mbeanDomain
metric.mbeanProperties = mbeanProperties
return metric
}
func (m Metric) MatchObjectName(name string) bool {
if name == m.Mbean {
return true
}
mbeanDomain, mbeanProperties := parseMbeanObjectName(name)
if mbeanDomain != m.mbeanDomain {
return false
}
if len(mbeanProperties) != len(m.mbeanProperties) {
return false
}
NEXT_PROPERTY:
for _, mbeanProperty := range m.mbeanProperties {
for i := range mbeanProperties {
if mbeanProperties[i] == mbeanProperty {
continue NEXT_PROPERTY
}
}
return false
}
return true
}
func (m Metric) MatchAttributeAndPath(attribute, innerPath string) bool {
path := attribute
if innerPath != "" {
path = path + "/" + innerPath
}
for i := range m.Paths {
if path == m.Paths[i] {
return true
}
}
return false
}
func parseMbeanObjectName(name string) (string, []string) {
index := strings.Index(name, ":")
if index == -1 {
return name, []string{}
}
domain := name[:index]
if index+1 > len(name) {
return domain, []string{}
}
return domain, strings.Split(name[index+1:], ",")
}

View File

@@ -1,271 +0,0 @@
package jolokia2
import (
"fmt"
"strings"
)
type point struct {
Tags map[string]string
Fields map[string]interface{}
}
type pointBuilder struct {
metric Metric
objectAttributes []string
objectPath string
substitutions []string
}
func newPointBuilder(metric Metric, attributes []string, path string) *pointBuilder {
return &pointBuilder{
metric: metric,
objectAttributes: attributes,
objectPath: path,
substitutions: makeSubstitutionList(metric.Mbean),
}
}
// Build generates a point for a given mbean name/pattern and value object.
func (pb *pointBuilder) Build(mbean string, value interface{}) []point {
hasPattern := strings.Contains(mbean, "*")
if !hasPattern {
value = map[string]interface{}{mbean: value}
}
valueMap, ok := value.(map[string]interface{})
if !ok { // FIXME: log it and move on.
panic(fmt.Sprintf("There should be a map here for %s!\n", mbean))
}
points := make([]point, 0)
for mbean, value := range valueMap {
points = append(points, point{
Tags: pb.extractTags(mbean),
Fields: pb.extractFields(mbean, value),
})
}
return compactPoints(points)
}
// extractTags generates the map of tags for a given mbean name/pattern.
func (pb *pointBuilder) extractTags(mbean string) map[string]string {
propertyMap := makePropertyMap(mbean)
tagMap := make(map[string]string)
for key, value := range propertyMap {
if pb.includeTag(key) {
tagName := pb.formatTagName(key)
tagMap[tagName] = value
}
}
return tagMap
}
func (pb *pointBuilder) includeTag(tagName string) bool {
for _, t := range pb.metric.TagKeys {
if tagName == t {
return true
}
}
return false
}
func (pb *pointBuilder) formatTagName(tagName string) string {
if tagName == "" {
return ""
}
if tagPrefix := pb.metric.TagPrefix; tagPrefix != "" {
return tagPrefix + tagName
}
return tagName
}
// extractFields generates the map of fields for a given mbean name
// and value object.
func (pb *pointBuilder) extractFields(mbean string, value interface{}) map[string]interface{} {
fieldMap := make(map[string]interface{})
valueMap, ok := value.(map[string]interface{})
if ok {
// complex value
if len(pb.objectAttributes) == 0 {
// if there were no attributes requested,
// then the keys are attributes
pb.fillFields("", valueMap, fieldMap)
} else if len(pb.objectAttributes) == 1 {
// if there was a single attribute requested,
// then the keys are the attribute's properties
fieldName := pb.formatFieldName(pb.objectAttributes[0], pb.objectPath)
pb.fillFields(fieldName, valueMap, fieldMap)
} else {
// if there were multiple attributes requested,
// then the keys are the attribute names
for _, attribute := range pb.objectAttributes {
fieldName := pb.formatFieldName(attribute, pb.objectPath)
pb.fillFields(fieldName, valueMap[attribute], fieldMap)
}
}
} else {
// scalar value
var fieldName string
if len(pb.objectAttributes) == 0 {
fieldName = pb.formatFieldName(defaultFieldName, pb.objectPath)
} else {
fieldName = pb.formatFieldName(pb.objectAttributes[0], pb.objectPath)
}
pb.fillFields(fieldName, value, fieldMap)
}
if len(pb.substitutions) > 1 {
pb.applySubstitutions(mbean, fieldMap)
}
return fieldMap
}
// formatFieldName generates a field name from the supplied attribute and
// path. The return value has the configured FieldPrefix and FieldSuffix
// instructions applied.
func (pb *pointBuilder) formatFieldName(attribute, path string) string {
fieldName := attribute
fieldPrefix := pb.metric.FieldPrefix
fieldSeparator := pb.metric.FieldSeparator
if fieldPrefix != "" {
fieldName = fieldPrefix + fieldName
}
if path != "" {
fieldName = fieldName + fieldSeparator + strings.Replace(path, "/", fieldSeparator, -1)
}
return fieldName
}
// fillFields recurses into the supplied value object, generating a named field
// for every value it discovers.
func (pb *pointBuilder) fillFields(name string, value interface{}, fieldMap map[string]interface{}) {
if valueMap, ok := value.(map[string]interface{}); ok {
// keep going until we get to something that is not a map
for key, innerValue := range valueMap {
var innerName string
if name == "" {
innerName = pb.metric.FieldPrefix + key
} else {
innerName = name + pb.metric.FieldSeparator + key
}
pb.fillFields(innerName, innerValue, fieldMap)
}
return
}
if pb.metric.FieldName != "" {
name = pb.metric.FieldName
if prefix := pb.metric.FieldPrefix; prefix != "" {
name = prefix + name
}
}
if name == "" {
name = defaultFieldName
}
fieldMap[name] = value
}
// applySubstitutions updates all the keys in the supplied map
// of fields to account for $1-style substitution instructions.
func (pb *pointBuilder) applySubstitutions(mbean string, fieldMap map[string]interface{}) {
properties := makePropertyMap(mbean)
for i, subKey := range pb.substitutions[1:] {
symbol := fmt.Sprintf("$%d", i+1)
substitution := properties[subKey]
for fieldName, fieldValue := range fieldMap {
newFieldName := strings.Replace(fieldName, symbol, substitution, -1)
if fieldName != newFieldName {
fieldMap[newFieldName] = fieldValue
delete(fieldMap, fieldName)
}
}
}
}
// makePropertyMap returns a the mbean property-key list as
// a dictionary. foo:x=y becomes map[string]string { "x": "y" }
func makePropertyMap(mbean string) map[string]string {
props := make(map[string]string)
object := strings.SplitN(mbean, ":", 2)
domain := object[0]
if domain != "" && len(object) == 2 {
list := object[1]
for _, keyProperty := range strings.Split(list, ",") {
pair := strings.SplitN(keyProperty, "=", 2)
if len(pair) != 2 {
continue
}
if key := pair[0]; key != "" {
props[key] = pair[1]
}
}
}
return props
}
// makeSubstitutionList returns an array of values to
// use as substitutions when renaming fields
// with the $1..$N syntax. The first item in the list
// is always the mbean domain.
func makeSubstitutionList(mbean string) []string {
subs := make([]string, 0)
object := strings.SplitN(mbean, ":", 2)
domain := object[0]
if domain != "" && len(object) == 2 {
subs = append(subs, domain)
list := object[1]
for _, keyProperty := range strings.Split(list, ",") {
pair := strings.SplitN(keyProperty, "=", 2)
if len(pair) != 2 {
continue
}
key := pair[0]
if key == "" {
continue
}
property := pair[1]
if !strings.Contains(property, "*") {
continue
}
subs = append(subs, key)
}
}
return subs
}

View File

@@ -30,9 +30,6 @@ type Kubernetes struct {
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
// HTTP Timeout specified as a string - 3s, 1m, 1h
ResponseTimeout internal.Duration
RoundTripper http.RoundTripper
}
@@ -43,9 +40,6 @@ var sampleConfig = `
## Use bearer token for authorization
# bearer_token = /path/to/bearer/token
## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = /path/to/cafile
# ssl_cert = /path/to/certfile
@@ -107,14 +101,10 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err
}
if k.RoundTripper == nil {
// Set default values
if k.ResponseTimeout.Duration < time.Second {
k.ResponseTimeout.Duration = time.Second * 5
}
k.RoundTripper = &http.Transport{
TLSHandshakeTimeout: 5 * time.Second,
TLSClientConfig: tlsCfg,
ResponseHeaderTimeout: k.ResponseTimeout.Duration,
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
}

View File

@@ -15,15 +15,12 @@ regex patterns.
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
## /var/log/apache.log -> only tail the apache log file
files = ["/var/log/apache/access.log"]
## Read files that currently exist from the beginning. Files that are created
## while telegraf is running (and that match the "files" globs) will always
## be read from the beginning.
from_beginning = false
## Method used to watch for file updates. Can be either "inotify" or "poll".
# watch_method = "inotify"
## Parse logstash-style "grok" patterns:
## Telegraf built-in parsing patterns: https://goo.gl/dkay10
[inputs.logparser.grok]
@@ -37,15 +34,15 @@ regex patterns.
## Name of the outputted measurement name.
measurement = "apache_access_log"
## Full path(s) to custom pattern files.
custom_pattern_files = []
## Custom patterns can also be defined here. Put one pattern per line.
custom_patterns = '''
'''
## Timezone allows you to provide an override for timestamps that
## Timezone allows you to provide an override for timestamps that
## don't already include an offset
## e.g. 04/06/2016 12:41:45 data one two 5.43µs
##
@@ -100,7 +97,7 @@ current time.
- ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
- ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
- ts-httpd ("02/Jan/2006:15:04:05 -0700")
- ts-epoch (seconds since unix epoch, may contain decimal)
- ts-epoch (seconds since unix epoch)
- ts-epochnano (nanoseconds since unix epoch)
- ts-"CUSTOM"
@@ -130,19 +127,6 @@ This example input and config parses a file using a custom timestamp conversion:
patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}']
```
This example input and config parses a file using a timestamp in unix time:
```
1466004605 value=42
1466004605.123456789 value=42
```
```toml
[[inputs.logparser]]
[inputs.logparser.grok]
patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}']
```
This example parses a file using a built-in conversion and a custom pattern:
```
@@ -161,7 +145,7 @@ Wed Apr 12 13:10:34 PST 2017 value=42
For cases where the timestamp itself is without offset, the `timezone` config var is available
to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times
are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp
will be processed based on the current machine timezone configuration. Lastly, if using a
will be processed based on the current machine timezone configuration. Lastly, if using a
timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), the logparser grok will attempt to offset
the timestamp accordingly. See test cases for more detailed examples.

View File

@@ -253,30 +253,12 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
case STRING:
fields[k] = strings.Trim(v, `"`)
case EPOCH:
parts := strings.SplitN(v, ".", 2)
if len(parts) == 0 {
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
break
}
sec, err := strconv.ParseInt(parts[0], 10, 64)
iv, err := strconv.ParseInt(v, 10, 64)
if err != nil {
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
break
log.Printf("E! Error parsing %s to int: %s", v, err)
} else {
timestamp = time.Unix(iv, 0)
}
ts := time.Unix(sec, 0)
if len(parts) == 2 {
padded := fmt.Sprintf("%-9s", parts[1])
nsString := strings.Replace(padded[:9], " ", "0", -1)
nanosec, err := strconv.ParseInt(nsString, 10, 64)
if err != nil {
log.Printf("E! Error parsing %s to timestamp: %s", v, err)
break
}
ts = ts.Add(time.Duration(nanosec) * time.Nanosecond)
}
timestamp = ts
case EPOCH_NANO:
iv, err := strconv.ParseInt(v, 10, 64)
if err != nil {

View File

@@ -385,77 +385,6 @@ func TestParseEpoch(t *testing.T) {
assert.Equal(t, time.Unix(1466004605, 0), metricA.Time())
}
func TestParseEpochDecimal(t *testing.T) {
var tests = []struct {
name string
line string
noMatch bool
err error
tags map[string]string
fields map[string]interface{}
time time.Time
}{
{
name: "ns precision",
line: "1466004605.359052000 value=42",
tags: map[string]string{},
fields: map[string]interface{}{
"value": int64(42),
},
time: time.Unix(0, 1466004605359052000),
},
{
name: "ms precision",
line: "1466004605.359 value=42",
tags: map[string]string{},
fields: map[string]interface{}{
"value": int64(42),
},
time: time.Unix(0, 1466004605359000000),
},
{
name: "second precision",
line: "1466004605 value=42",
tags: map[string]string{},
fields: map[string]interface{}{
"value": int64(42),
},
time: time.Unix(0, 1466004605000000000),
},
{
name: "sub ns precision",
line: "1466004605.123456789123 value=42",
tags: map[string]string{},
fields: map[string]interface{}{
"value": int64(42),
},
time: time.Unix(0, 1466004605123456789),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
parser := &Parser{
Patterns: []string{"%{NUMBER:ts:ts-epoch} value=%{NUMBER:value:int}"},
}
assert.NoError(t, parser.Compile())
m, err := parser.ParseLine(tt.line)
if tt.noMatch {
require.Nil(t, m)
require.Nil(t, err)
return
}
require.Equal(t, tt.err, err)
require.NotNil(t, m)
require.Equal(t, tt.tags, m.Tags())
require.Equal(t, tt.fields, m.Fields())
require.Equal(t, tt.time, m.Time())
})
}
}
func TestParseEpochErrors(t *testing.T) {
p := &Parser{
Patterns: []string{"%{MYAPP}"},

View File

@@ -19,10 +19,6 @@ import (
"github.com/influxdata/telegraf/plugins/inputs/logparser/grok"
)
const (
defaultWatchMethod = "inotify"
)
// LogParser in the primary interface for the plugin
type LogParser interface {
ParseLine(line string) (telegraf.Metric, error)
@@ -38,7 +34,6 @@ type logEntry struct {
type LogParserPlugin struct {
Files []string
FromBeginning bool
WatchMethod string
tailers map[string]*tail.Tail
lines chan logEntry
@@ -66,9 +61,6 @@ const sampleConfig = `
## be read from the beginning.
from_beginning = false
## Method used to watch for file updates. Can be either "inotify" or "poll".
# watch_method = "inotify"
## Parse logstash-style "grok" patterns:
## Telegraf built-in parsing patterns: https://goo.gl/dkay10
[inputs.logparser.grok]
@@ -175,11 +167,6 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
seek.Offset = 0
}
var poll bool
if l.WatchMethod == "poll" {
poll = true
}
// Create a "tailer" for each file
for _, filepath := range l.Files {
g, err := globpath.Compile(filepath)
@@ -201,7 +188,6 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error {
Follow: true,
Location: &seek,
MustExist: true,
Poll: poll,
Logger: tail.DiscardingLogger,
})
if err != nil {
@@ -299,8 +285,6 @@ func (l *LogParserPlugin) Stop() {
func init() {
inputs.Add("logparser", func() telegraf.Input {
return &LogParserPlugin{
WatchMethod: defaultWatchMethod,
}
return &LogParserPlugin{}
})
}

View File

@@ -367,7 +367,7 @@ func getMetrics(role Role, group string) []string {
ret, ok := m[group]
if !ok {
log.Printf("I! [mesos] Unknown %s metrics group: %s\n", role, group)
log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group)
return []string{}
}

View File

@@ -197,7 +197,7 @@ func (c *Client) Send(typ int32, command string) (response *Packet, err error) {
}
// NewClient creates a new Client type, creating the connection
// to the server specified by the host and port arguments. If
// to the server specified by the host and port arguements. If
// the connection fails, an error is returned.
func NewClient(host string, port int) (client *Client, err error) {
client = new(Client)

View File

@@ -47,7 +47,7 @@ func (s *Minecraft) SampleConfig() string {
return sampleConfig
}
// Gather uses the RCON protocol to collect player and
// Gather uses the RCON protocal to collect player and
// scoreboard stats from a minecraft server.
//var hasClient bool = false
func (s *Minecraft) Gather(acc telegraf.Accumulator) error {

View File

@@ -76,7 +76,7 @@ func newClient(server, port string) (*rcon.Client, error) {
return client, nil
}
// Gather receives all player scoreboard information and returns it per player.
// Gather recieves all player scoreboard information and returns it per player.
func (r *RCON) Gather(producer RCONClientProducer) ([]string, error) {
if r.client == nil {
var err error

View File

@@ -77,21 +77,6 @@ var WiredTigerStats = map[string]string{
"percent_cache_used": "CacheUsedPercent",
}
var WiredTigerExtStats = map[string]string{
"wtcache_tracked_dirty_bytes": "TrackedDirtyBytes",
"wtcache_current_bytes": "CurrentCachedBytes",
"wtcache_max_bytes_configured": "MaxBytesConfigured",
"wtcache_app_threads_page_read_count": "AppThreadsPageReadCount",
"wtcache_app_threads_page_read_time": "AppThreadsPageReadTime",
"wtcache_app_threads_page_write_count": "AppThreadsPageWriteCount",
"wtcache_bytes_written_from": "BytesWrittenFrom",
"wtcache_bytes_read_into": "BytesReadInto",
"wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread",
"wtcache_pages_queued_for_eviction": "PagesQueuedForEviction",
"wtcache_server_evicting_pages": "ServerEvictingPages",
"wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages",
}
var DbDataStats = map[string]string{
"collections": "Collections",
"objects": "Objects",
@@ -136,11 +121,13 @@ func (d *MongodbData) AddDefaultStats() {
floatVal, _ := strconv.ParseFloat(percentVal, 64)
d.add(key, floatVal)
}
d.addStat(statLine, WiredTigerExtStats)
}
}
func (d *MongodbData) addStat(statLine reflect.Value, stats map[string]string) {
func (d *MongodbData) addStat(
statLine reflect.Value,
stats map[string]string,
) {
for key, value := range stats {
val := statLine.FieldByName(value).Interface()
d.add(key, val)

View File

@@ -70,21 +70,9 @@ func TestAddReplStats(t *testing.T) {
func TestAddWiredTigerStats(t *testing.T) {
d := NewMongodbData(
&StatLine{
StorageEngine: "wiredTiger",
CacheDirtyPercent: 0,
CacheUsedPercent: 0,
TrackedDirtyBytes: 0,
CurrentCachedBytes: 0,
MaxBytesConfigured: 0,
AppThreadsPageReadCount: 0,
AppThreadsPageReadTime: 0,
AppThreadsPageWriteCount: 0,
BytesWrittenFrom: 0,
BytesReadInto: 0,
PagesEvictedByAppThread: 0,
PagesQueuedForEviction: 0,
ServerEvictingPages: 0,
WorkerThreadEvictingPages: 0,
StorageEngine: "wiredTiger",
CacheDirtyPercent: 0,
CacheUsedPercent: 0,
},
tags,
)

View File

@@ -127,19 +127,9 @@ type ConcurrentTransStats struct {
// CacheStats stores cache statistics for WiredTiger.
type CacheStats struct {
TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"`
CurrentCachedBytes int64 `bson:"bytes currently in the cache"`
MaxBytesConfigured int64 `bson:"maximum bytes configured"`
AppThreadsPageReadCount int64 `bson:"application threads page read from disk to cache count"`
AppThreadsPageReadTime int64 `bson:"application threads page read from disk to cache time (usecs)"`
AppThreadsPageWriteCount int64 `bson:"application threads page write from cache to disk count"`
AppThreadsPageWriteTime int64 `bson:"application threads page write from cache to disk time (usecs)"`
BytesWrittenFrom int64 `bson:"bytes written from cache"`
BytesReadInto int64 `bson:"bytes read into cache"`
PagesEvictedByAppThread int64 `bson:"pages evicted by application threads"`
PagesQueuedForEviction int64 `bson:"pages queued for eviction"`
ServerEvictingPages int64 `bson:"eviction server evicting pages"`
WorkerThreadEvictingPages int64 `bson:"eviction worker thread evicting pages"`
TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"`
CurrentCachedBytes int64 `bson:"bytes currently in the cache"`
MaxBytesConfigured int64 `bson:"maximum bytes configured"`
}
// TransactionStats stores transaction checkpoints in WiredTiger.
@@ -416,20 +406,6 @@ type StatLine struct {
CacheDirtyPercent float64
CacheUsedPercent float64
// Cache ultilization extended (wiredtiger only)
TrackedDirtyBytes int64
CurrentCachedBytes int64
MaxBytesConfigured int64
AppThreadsPageReadCount int64
AppThreadsPageReadTime int64
AppThreadsPageWriteCount int64
BytesWrittenFrom int64
BytesReadInto int64
PagesEvictedByAppThread int64
PagesQueuedForEviction int64
ServerEvictingPages int64
WorkerThreadEvictingPages int64
// Replicated Opcounter fields
InsertR, QueryR, UpdateR, DeleteR, GetMoreR, CommandR int64
ReplLag int64
@@ -558,19 +534,6 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal.Flushes = newStat.WiredTiger.Transaction.TransCheckpoints - oldStat.WiredTiger.Transaction.TransCheckpoints
returnVal.CacheDirtyPercent = float64(newStat.WiredTiger.Cache.TrackedDirtyBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured)
returnVal.CacheUsedPercent = float64(newStat.WiredTiger.Cache.CurrentCachedBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured)
returnVal.TrackedDirtyBytes = newStat.WiredTiger.Cache.TrackedDirtyBytes
returnVal.CurrentCachedBytes = newStat.WiredTiger.Cache.CurrentCachedBytes
returnVal.MaxBytesConfigured = newStat.WiredTiger.Cache.MaxBytesConfigured
returnVal.AppThreadsPageReadCount = newStat.WiredTiger.Cache.AppThreadsPageReadCount
returnVal.AppThreadsPageReadTime = newStat.WiredTiger.Cache.AppThreadsPageReadTime
returnVal.AppThreadsPageWriteCount = newStat.WiredTiger.Cache.AppThreadsPageWriteCount
returnVal.BytesWrittenFrom = newStat.WiredTiger.Cache.BytesWrittenFrom
returnVal.BytesReadInto = newStat.WiredTiger.Cache.BytesReadInto
returnVal.PagesEvictedByAppThread = newStat.WiredTiger.Cache.PagesEvictedByAppThread
returnVal.PagesQueuedForEviction = newStat.WiredTiger.Cache.PagesQueuedForEviction
returnVal.ServerEvictingPages = newStat.WiredTiger.Cache.ServerEvictingPages
returnVal.WorkerThreadEvictingPages = newStat.WiredTiger.Cache.WorkerThreadEvictingPages
} else if newStat.BackgroundFlushing != nil && oldStat.BackgroundFlushing != nil {
returnVal.Flushes = newStat.BackgroundFlushing.Flushes - oldStat.BackgroundFlushing.Flushes
}

View File

@@ -72,7 +72,6 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error {
addr, err := url.Parse(u)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
wg.Add(1)

View File

@@ -1,124 +0,0 @@
# Telegraf Plugin: nginx_plus
Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/).
Structures for Nginx Plus have been built based on history of
[status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html)
### Configuration:
```
# Read Nginx Plus' advanced status information
[[inputs.nginx_plus]]
## An array of Nginx status URIs to gather stats.
urls = ["http://localhost/status"]
```
### Measurements & Fields:
- nginx_plus_processes
- respawned
- nginx_plus_connections
- accepted
- dropped
- active
- idle
- nginx_plus_ssl
- handshakes
- handshakes_failed
- session_reuses
- nginx_plus_requests
- total
- current
- nginx_plus_upstream, nginx_plus_stream_upstream
- keepalive
- zombies
- nginx_plus_upstream_peer, nginx_plus_stream_upstream_peer
- requests
- unavail
- healthchecks_checks
- header_time
- response_time
- state
- active
- downstart
- healthchecks_last_passed
- weight
- responses_1xx
- responses_2xx
- responses_3xx
- responses_4xx
- responses_5xx
- received
- selected
- healthchecks_fails
- healthchecks_unhealthy
- backup
- responses_total
- sent
- fails
- downtime
### Tags:
- nginx_plus_processes, nginx_plus_connections, nginx_plus_ssl, nginx_plus_requests
- server
- port
- nginx_plus_upstream, nginx_plus_stream_upstream
- upstream
- server
- port
- nginx_plus_upstream_peer, nginx_plus_stream_upstream_peer
- id
- upstream
- server
- port
- upstream_address
### Example Output:
Using this configuration:
```
[[inputs.nginx_plus]]
## An array of Nginx Plus status URIs to gather stats.
urls = ["http://localhost/status"]
```
When run with:
```
./telegraf -config telegraf.conf -input-filter nginx_plus -test
```
It produces:
```
* Plugin: inputs.nginx_plus, Collection 1
> nginx_plus_processes,server=localhost,port=12021,host=word.local respawned=0i 1505782513000000000
> nginx_plus_connections,server=localhost,port=12021,host=word.local accepted=5535735212i,dropped=10140186i,active=9541i,idle=67540i 1505782513000000000
> nginx_plus_ssl,server=localhost,port=12021,host=word.local handshakes=0i,handshakes_failed=0i,session_reuses=0i 1505782513000000000
> nginx_plus_requests,server=localhost,port=12021,host=word.local total=186780541173i,current=9037i 1505782513000000000
> nginx_plus_upstream,port=12021,host=word.local,upstream=dataserver80,server=localhost keepalive=0i,zombies=0i 1505782513000000000
> nginx_plus_upstream_peer,upstream=dataserver80,upstream_address=10.10.102.181:80,id=0,server=localhost,port=12021,host=word.local sent=53806910399i,received=7516943964i,fails=207i,downtime=2325979i,selected=1505782512000i,backup=false,active=6i,responses_4xx=6935i,header_time=80i,response_time=80i,healthchecks_last_passed=true,responses_1xx=0i,responses_2xx=36299890i,responses_5xx=360450i,responses_total=36667275i,unavail=154i,downstart=0i,state="up",requests=36673741i,responses_3xx=0i,healthchecks_unhealthy=5i,weight=1i,healthchecks_checks=177209i,healthchecks_fails=29i 1505782513000000000
> nginx_plus_stream_upstream,server=localhost,port=12021,host=word.local,upstream=dataserver443 zombies=0i 1505782513000000000
> nginx_plus_stream_upstream_peer,server=localhost,upstream_address=10.10.102.181:443,id=0,port=12021,host=word.local,upstream=dataserver443 active=1i,healthchecks_unhealthy=1i,weight=1i,unavail=0i,connect_time=24i,first_byte_time=78i,healthchecks_last_passed=true,state="up",sent=4457713140i,received=698065272i,fails=0i,healthchecks_checks=178421i,downstart=0i,selected=1505782512000i,response_time=5156i,backup=false,connections=56251i,healthchecks_fails=20i,downtime=391017i 1505782513000000000
```
### Reference material
Subsequent versions of status response structure available here:
- [version 1](http://web.archive.org/web/20130805111222/http://nginx.org/en/docs/http/ngx_http_status_module.html)
- [version 2](http://web.archive.org/web/20131218101504/http://nginx.org/en/docs/http/ngx_http_status_module.html)
- version 3 - not available
- [version 4](http://web.archive.org/web/20141218170938/http://nginx.org/en/docs/http/ngx_http_status_module.html)
- [version 5](http://web.archive.org/web/20150414043916/http://nginx.org/en/docs/http/ngx_http_status_module.html)
- [version 6](http://web.archive.org/web/20150918163811/http://nginx.org/en/docs/http/ngx_http_status_module.html)
- [version 7](http://web.archive.org/web/20161107221028/http://nginx.org/en/docs/http/ngx_http_status_module.html)

View File

@@ -1,569 +0,0 @@
package nginx_plus
import (
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type NginxPlus struct {
Urls []string
client *http.Client
ResponseTimeout internal.Duration
}
var sampleConfig = `
## An array of ngx_http_status_module or status URI to gather stats.
urls = ["http://localhost/status"]
# HTTP response timeout (default: 5s)
response_timeout = "5s"
`
func (n *NginxPlus) SampleConfig() string {
return sampleConfig
}
func (n *NginxPlus) Description() string {
return "Read Nginx Plus' full status information (ngx_http_status_module)"
}
func (n *NginxPlus) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
// Create an HTTP client that is re-used for each
// collection interval
if n.client == nil {
client, err := n.createHttpClient()
if err != nil {
return err
}
n.client = client
}
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
wg.Add(1)
go func(addr *url.URL) {
defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
}(addr)
}
wg.Wait()
return nil
}
func (n *NginxPlus) createHttpClient() (*http.Client, error) {
if n.ResponseTimeout.Duration < time.Second {
n.ResponseTimeout.Duration = time.Second * 5
}
client := &http.Client{
Transport: &http.Transport{},
Timeout: n.ResponseTimeout.Duration,
}
return client, nil
}
func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
resp, err := n.client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
}
contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0]
switch contentType {
case "application/json":
return gatherStatusUrl(bufio.NewReader(resp.Body), getTags(addr), acc)
default:
return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType)
}
}
func getTags(addr *url.URL) map[string]string {
h := addr.Host
host, port, err := net.SplitHostPort(h)
if err != nil {
host = addr.Host
if addr.Scheme == "http" {
port = "80"
} else if addr.Scheme == "https" {
port = "443"
} else {
port = ""
}
}
return map[string]string{"server": host, "port": port}
}
type ResponseStats struct {
Responses1xx int64 `json:"1xx"`
Responses2xx int64 `json:"2xx"`
Responses3xx int64 `json:"3xx"`
Responses4xx int64 `json:"4xx"`
Responses5xx int64 `json:"5xx"`
Total int64 `json:"total"`
}
type BasicHitStats struct {
Responses int64 `json:"responses"`
Bytes int64 `json:"bytes"`
}
type ExtendedHitStats struct {
BasicHitStats
ResponsesWritten int64 `json:"responses_written"`
BytesWritten int64 `json:"bytes_written"`
}
type HealthCheckStats struct {
Checks int64 `json:"checks"`
Fails int64 `json:"fails"`
Unhealthy int64 `json:"unhealthy"`
LastPassed *bool `json:"last_passed"`
}
type Status struct {
Version int `json:"version"`
NginxVersion string `json:"nginx_version"`
Address string `json:"address"`
Generation *int `json:"generation"` // added in version 5
LoadTimestamp *int64 `json:"load_timestamp"` // added in version 2
Timestamp int64 `json:"timestamp"`
Pid *int `json:"pid"` // added in version 6
Processes *struct { // added in version 5
Respawned *int `json:"respawned"`
} `json:"processes"`
Connections struct {
Accepted int64 `json:"accepted"`
Dropped int64 `json:"dropped"`
Active int64 `json:"active"`
Idle int64 `json:"idle"`
} `json:"connections"`
Ssl *struct { // added in version 6
Handshakes int64 `json:"handshakes"`
HandshakesFailed int64 `json:"handshakes_failed"`
SessionReuses int64 `json:"session_reuses"`
} `json:"ssl"`
Requests struct {
Total int64 `json:"total"`
Current int `json:"current"`
} `json:"requests"`
ServerZones map[string]struct { // added in version 2
Processing int `json:"processing"`
Requests int64 `json:"requests"`
Responses ResponseStats `json:"responses"`
Discarded *int64 `json:"discarded"` // added in version 6
Received int64 `json:"received"`
Sent int64 `json:"sent"`
} `json:"server_zones"`
Upstreams map[string]struct {
Peers []struct {
ID *int `json:"id"` // added in version 3
Server string `json:"server"`
Backup bool `json:"backup"`
Weight int `json:"weight"`
State string `json:"state"`
Active int `json:"active"`
Keepalive *int `json:"keepalive"` // removed in version 5
MaxConns *int `json:"max_conns"` // added in version 3
Requests int64 `json:"requests"`
Responses ResponseStats `json:"responses"`
Sent int64 `json:"sent"`
Received int64 `json:"received"`
Fails int64 `json:"fails"`
Unavail int64 `json:"unavail"`
HealthChecks HealthCheckStats `json:"health_checks"`
Downtime int64 `json:"downtime"`
Downstart int64 `json:"downstart"`
Selected *int64 `json:"selected"` // added in version 4
HeaderTime *int64 `json:"header_time"` // added in version 5
ResponseTime *int64 `json:"response_time"` // added in version 5
} `json:"peers"`
Keepalive int `json:"keepalive"`
Zombies int `json:"zombies"` // added in version 6
Queue *struct { // added in version 6
Size int `json:"size"`
MaxSize int `json:"max_size"`
Overflows int64 `json:"overflows"`
} `json:"queue"`
} `json:"upstreams"`
Caches map[string]struct { // added in version 2
Size int64 `json:"size"`
MaxSize int64 `json:"max_size"`
Cold bool `json:"cold"`
Hit BasicHitStats `json:"hit"`
Stale BasicHitStats `json:"stale"`
Updating BasicHitStats `json:"updating"`
Revalidated *BasicHitStats `json:"revalidated"` // added in version 3
Miss ExtendedHitStats `json:"miss"`
Expired ExtendedHitStats `json:"expired"`
Bypass ExtendedHitStats `json:"bypass"`
} `json:"caches"`
Stream struct {
ServerZones map[string]struct {
Processing int `json:"processing"`
Connections int `json:"connections"`
Sessions *ResponseStats `json:"sessions"`
Discarded *int64 `json:"discarded"` // added in version 7
Received int64 `json:"received"`
Sent int64 `json:"sent"`
} `json:"server_zones"`
Upstreams map[string]struct {
Peers []struct {
ID int `json:"id"`
Server string `json:"server"`
Backup bool `json:"backup"`
Weight int `json:"weight"`
State string `json:"state"`
Active int `json:"active"`
Connections int64 `json:"connections"`
ConnectTime *int `json:"connect_time"`
FirstByteTime *int `json:"first_byte_time"`
ResponseTime *int `json:"response_time"`
Sent int64 `json:"sent"`
Received int64 `json:"received"`
Fails int64 `json:"fails"`
Unavail int64 `json:"unavail"`
HealthChecks HealthCheckStats `json:"health_checks"`
Downtime int64 `json:"downtime"`
Downstart int64 `json:"downstart"`
Selected int64 `json:"selected"`
} `json:"peers"`
Zombies int `json:"zombies"`
} `json:"upstreams"`
} `json:"stream"`
}
func gatherStatusUrl(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error {
dec := json.NewDecoder(r)
status := &Status{}
if err := dec.Decode(status); err != nil {
return fmt.Errorf("Error while decoding JSON response")
}
status.Gather(tags, acc)
return nil
}
func (s *Status) Gather(tags map[string]string, acc telegraf.Accumulator) {
s.gatherProcessesMetrics(tags, acc)
s.gatherConnectionsMetrics(tags, acc)
s.gatherSslMetrics(tags, acc)
s.gatherRequestMetrics(tags, acc)
s.gatherZoneMetrics(tags, acc)
s.gatherUpstreamMetrics(tags, acc)
s.gatherCacheMetrics(tags, acc)
s.gatherStreamMetrics(tags, acc)
}
func (s *Status) gatherProcessesMetrics(tags map[string]string, acc telegraf.Accumulator) {
var respawned int
if s.Processes.Respawned != nil {
respawned = *s.Processes.Respawned
}
acc.AddFields(
"nginx_plus_processes",
map[string]interface{}{
"respawned": respawned,
},
tags,
)
}
func (s *Status) gatherConnectionsMetrics(tags map[string]string, acc telegraf.Accumulator) {
acc.AddFields(
"nginx_plus_connections",
map[string]interface{}{
"accepted": s.Connections.Accepted,
"dropped": s.Connections.Dropped,
"active": s.Connections.Active,
"idle": s.Connections.Idle,
},
tags,
)
}
func (s *Status) gatherSslMetrics(tags map[string]string, acc telegraf.Accumulator) {
acc.AddFields(
"nginx_plus_ssl",
map[string]interface{}{
"handshakes": s.Ssl.Handshakes,
"handshakes_failed": s.Ssl.HandshakesFailed,
"session_reuses": s.Ssl.SessionReuses,
},
tags,
)
}
func (s *Status) gatherRequestMetrics(tags map[string]string, acc telegraf.Accumulator) {
acc.AddFields(
"nginx_plus_requests",
map[string]interface{}{
"total": s.Requests.Total,
"current": s.Requests.Current,
},
tags,
)
}
func (s *Status) gatherZoneMetrics(tags map[string]string, acc telegraf.Accumulator) {
for zoneName, zone := range s.ServerZones {
zoneTags := map[string]string{}
for k, v := range tags {
zoneTags[k] = v
}
zoneTags["zone"] = zoneName
acc.AddFields(
"nginx_plus_zone",
func() map[string]interface{} {
result := map[string]interface{}{
"processing": zone.Processing,
"requests": zone.Requests,
"responses_1xx": zone.Responses.Responses1xx,
"responses_2xx": zone.Responses.Responses2xx,
"responses_3xx": zone.Responses.Responses3xx,
"responses_4xx": zone.Responses.Responses4xx,
"responses_5xx": zone.Responses.Responses5xx,
"responses_total": zone.Responses.Total,
"received": zone.Received,
"sent": zone.Sent,
}
if zone.Discarded != nil {
result["discarded"] = *zone.Discarded
}
return result
}(),
zoneTags,
)
}
}
func (s *Status) gatherUpstreamMetrics(tags map[string]string, acc telegraf.Accumulator) {
for upstreamName, upstream := range s.Upstreams {
upstreamTags := map[string]string{}
for k, v := range tags {
upstreamTags[k] = v
}
upstreamTags["upstream"] = upstreamName
upstreamFields := map[string]interface{}{
"keepalive": upstream.Keepalive,
"zombies": upstream.Zombies,
}
if upstream.Queue != nil {
upstreamFields["queue_size"] = upstream.Queue.Size
upstreamFields["queue_max_size"] = upstream.Queue.MaxSize
upstreamFields["queue_overflows"] = upstream.Queue.Overflows
}
acc.AddFields(
"nginx_plus_upstream",
upstreamFields,
upstreamTags,
)
for _, peer := range upstream.Peers {
var selected int64
if peer.Selected != nil {
selected = *peer.Selected
}
peerFields := map[string]interface{}{
"backup": peer.Backup,
"weight": peer.Weight,
"state": peer.State,
"active": peer.Active,
"requests": peer.Requests,
"responses_1xx": peer.Responses.Responses1xx,
"responses_2xx": peer.Responses.Responses2xx,
"responses_3xx": peer.Responses.Responses3xx,
"responses_4xx": peer.Responses.Responses4xx,
"responses_5xx": peer.Responses.Responses5xx,
"responses_total": peer.Responses.Total,
"sent": peer.Sent,
"received": peer.Received,
"fails": peer.Fails,
"unavail": peer.Unavail,
"healthchecks_checks": peer.HealthChecks.Checks,
"healthchecks_fails": peer.HealthChecks.Fails,
"healthchecks_unhealthy": peer.HealthChecks.Unhealthy,
"downtime": peer.Downtime,
"downstart": peer.Downstart,
"selected": selected,
}
if peer.HealthChecks.LastPassed != nil {
peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed
}
if peer.HeaderTime != nil {
peerFields["header_time"] = *peer.HeaderTime
}
if peer.ResponseTime != nil {
peerFields["response_time"] = *peer.ResponseTime
}
if peer.MaxConns != nil {
peerFields["max_conns"] = *peer.MaxConns
}
peerTags := map[string]string{}
for k, v := range upstreamTags {
peerTags[k] = v
}
peerTags["upstream_address"] = peer.Server
if peer.ID != nil {
peerTags["id"] = strconv.Itoa(*peer.ID)
}
acc.AddFields("nginx_plus_upstream_peer", peerFields, peerTags)
}
}
}
func (s *Status) gatherCacheMetrics(tags map[string]string, acc telegraf.Accumulator) {
for cacheName, cache := range s.Caches {
cacheTags := map[string]string{}
for k, v := range tags {
cacheTags[k] = v
}
cacheTags["cache"] = cacheName
acc.AddFields(
"nginx_plus_cache",
map[string]interface{}{
"size": cache.Size,
"max_size": cache.MaxSize,
"cold": cache.Cold,
"hit_responses": cache.Hit.Responses,
"hit_bytes": cache.Hit.Bytes,
"stale_responses": cache.Stale.Responses,
"stale_bytes": cache.Stale.Bytes,
"updating_responses": cache.Updating.Responses,
"updating_bytes": cache.Updating.Bytes,
"revalidated_responses": cache.Revalidated.Responses,
"revalidated_bytes": cache.Revalidated.Bytes,
"miss_responses": cache.Miss.Responses,
"miss_bytes": cache.Miss.Bytes,
"miss_responses_written": cache.Miss.ResponsesWritten,
"miss_bytes_written": cache.Miss.BytesWritten,
"expired_responses": cache.Expired.Responses,
"expired_bytes": cache.Expired.Bytes,
"expired_responses_written": cache.Expired.ResponsesWritten,
"expired_bytes_written": cache.Expired.BytesWritten,
"bypass_responses": cache.Bypass.Responses,
"bypass_bytes": cache.Bypass.Bytes,
"bypass_responses_written": cache.Bypass.ResponsesWritten,
"bypass_bytes_written": cache.Bypass.BytesWritten,
},
cacheTags,
)
}
}
func (s *Status) gatherStreamMetrics(tags map[string]string, acc telegraf.Accumulator) {
for zoneName, zone := range s.Stream.ServerZones {
zoneTags := map[string]string{}
for k, v := range tags {
zoneTags[k] = v
}
zoneTags["zone"] = zoneName
acc.AddFields(
"nginx.stream.zone",
map[string]interface{}{
"processing": zone.Processing,
"connections": zone.Connections,
"received": zone.Received,
"sent": zone.Sent,
},
zoneTags,
)
}
for upstreamName, upstream := range s.Stream.Upstreams {
upstreamTags := map[string]string{}
for k, v := range tags {
upstreamTags[k] = v
}
upstreamTags["upstream"] = upstreamName
acc.AddFields(
"nginx_plus_stream_upstream",
map[string]interface{}{
"zombies": upstream.Zombies,
},
upstreamTags,
)
for _, peer := range upstream.Peers {
peerFields := map[string]interface{}{
"backup": peer.Backup,
"weight": peer.Weight,
"state": peer.State,
"active": peer.Active,
"connections": peer.Connections,
"sent": peer.Sent,
"received": peer.Received,
"fails": peer.Fails,
"unavail": peer.Unavail,
"healthchecks_checks": peer.HealthChecks.Checks,
"healthchecks_fails": peer.HealthChecks.Fails,
"healthchecks_unhealthy": peer.HealthChecks.Unhealthy,
"downtime": peer.Downtime,
"downstart": peer.Downstart,
"selected": peer.Selected,
}
if peer.HealthChecks.LastPassed != nil {
peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed
}
if peer.ConnectTime != nil {
peerFields["connect_time"] = *peer.ConnectTime
}
if peer.FirstByteTime != nil {
peerFields["first_byte_time"] = *peer.FirstByteTime
}
if peer.ResponseTime != nil {
peerFields["response_time"] = *peer.ResponseTime
}
peerTags := map[string]string{}
for k, v := range upstreamTags {
peerTags[k] = v
}
peerTags["upstream_address"] = peer.Server
peerTags["id"] = strconv.Itoa(peer.ID)
acc.AddFields("nginx_plus_stream_upstream_peer", peerFields, peerTags)
}
}
}
func init() {
inputs.Add("nginx_plus", func() telegraf.Input {
return &NginxPlus{}
})
}

View File

@@ -1,413 +0,0 @@
package nginx_plus
import (
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/influxdata/telegraf/testutil"
//"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const sampleStatusResponse = `
{
"version": 6,
"nginx_version": "1.22.333",
"address": "1.2.3.4",
"generation": 88,
"load_timestamp": 1451606400000,
"timestamp": 1451606400000,
"pid": 9999,
"processes": {
"respawned": 9999
},
"connections": {
"accepted": 1234567890000,
"dropped": 2345678900000,
"active": 345,
"idle": 567
},
"ssl": {
"handshakes": 1234567800000,
"handshakes_failed": 5432100000000,
"session_reuses": 6543210000000
},
"requests": {
"total": 9876543210000,
"current": 98
},
"server_zones": {
"zone.a_80": {
"processing": 12,
"requests": 34,
"responses": {
"1xx": 111,
"2xx": 222,
"3xx": 333,
"4xx": 444,
"5xx": 555,
"total": 999
},
"discarded": 11,
"received": 22,
"sent": 33
},
"zone.a_443": {
"processing": 45,
"requests": 67,
"responses": {
"1xx": 1111,
"2xx": 2222,
"3xx": 3333,
"4xx": 4444,
"5xx": 5555,
"total": 999
},
"discarded": 44,
"received": 55,
"sent": 66
}
},
"upstreams": {
"first_upstream": {
"peers": [
{
"id": 0,
"server": "1.2.3.123:80",
"backup": false,
"weight": 1,
"state": "up",
"active": 0,
"requests": 9876,
"responses": {
"1xx": 1111,
"2xx": 2222,
"3xx": 3333,
"4xx": 4444,
"5xx": 5555,
"total": 987654
},
"sent": 987654321,
"received": 87654321,
"fails": 98,
"unavail": 65,
"health_checks": {
"checks": 54,
"fails": 32,
"unhealthy": 21
},
"downtime": 5432,
"downstart": 4321,
"selected": 1451606400000
},
{
"id": 1,
"server": "1.2.3.123:80",
"backup": true,
"weight": 1,
"state": "up",
"active": 0,
"requests": 8765,
"responses": {
"1xx": 1112,
"2xx": 2223,
"3xx": 3334,
"4xx": 4445,
"5xx": 5556,
"total": 987655
},
"sent": 987654322,
"received": 87654322,
"fails": 99,
"unavail": 88,
"health_checks": {
"checks": 77,
"fails": 66,
"unhealthy": 55
},
"downtime": 5433,
"downstart": 4322,
"selected": 1451606400000
}
],
"keepalive": 1,
"zombies": 2
}
},
"caches": {
"cache_01": {
"size": 12,
"max_size": 23,
"cold": false,
"hit": {
"responses": 34,
"bytes": 45
},
"stale": {
"responses": 56,
"bytes": 67
},
"updating": {
"responses": 78,
"bytes": 89
},
"revalidated": {
"responses": 90,
"bytes": 98
},
"miss": {
"responses": 87,
"bytes": 76,
"responses_written": 65,
"bytes_written": 54
},
"expired": {
"responses": 43,
"bytes": 32,
"responses_written": 21,
"bytes_written": 10
},
"bypass": {
"responses": 13,
"bytes": 35,
"responses_written": 57,
"bytes_written": 79
}
}
},
"stream": {
"server_zones": {
"stream.zone.01": {
"processing": 24,
"connections": 46,
"received": 68,
"sent": 80
},
"stream.zone.02": {
"processing": 96,
"connections": 63,
"received": 31,
"sent": 25
}
},
"upstreams": {
"upstream.01": {
"peers": [
{
"id": 0,
"server": "4.3.2.1:2345",
"backup": false,
"weight": 1,
"state": "up",
"active": 0,
"connections": 0,
"sent": 0,
"received": 0,
"fails": 0,
"unavail": 0,
"health_checks": {
"checks": 40848,
"fails": 0,
"unhealthy": 0,
"last_passed": true
},
"downtime": 0,
"downstart": 0,
"selected": 0
},
{
"id": 1,
"server": "5.4.3.2:2345",
"backup": false,
"weight": 1,
"state": "up",
"active": 0,
"connections": 0,
"sent": 0,
"received": 0,
"fails": 0,
"unavail": 0,
"health_checks": {
"checks": 40851,
"fails": 0,
"unhealthy": 0,
"last_passed": true
},
"downtime": 0,
"downstart": 0,
"selected": 0
}
],
"zombies": 0
}
}
}
}
`
func TestNginxPlusGeneratesMetrics(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var rsp string
if r.URL.Path == "/status" {
rsp = sampleStatusResponse
w.Header()["Content-Type"] = []string{"application/json"}
} else {
panic("Cannot handle request")
}
fmt.Fprintln(w, rsp)
}))
defer ts.Close()
n := &NginxPlus{
Urls: []string{fmt.Sprintf("%s/status", ts.URL)},
}
var acc testutil.Accumulator
err_nginx := n.Gather(&acc)
require.NoError(t, err_nginx)
addr, err := url.Parse(ts.URL)
if err != nil {
panic(err)
}
host, port, err := net.SplitHostPort(addr.Host)
if err != nil {
host = addr.Host
if addr.Scheme == "http" {
port = "80"
} else if addr.Scheme == "https" {
port = "443"
} else {
port = ""
}
}
acc.AssertContainsTaggedFields(
t,
"nginx_plus_processes",
map[string]interface{}{
"respawned": int(9999),
},
map[string]string{
"server": host,
"port": port,
})
acc.AssertContainsTaggedFields(
t,
"nginx_plus_connections",
map[string]interface{}{
"accepted": int64(1234567890000),
"dropped": int64(2345678900000),
"active": int64(345),
"idle": int64(567),
},
map[string]string{
"server": host,
"port": port,
})
acc.AssertContainsTaggedFields(
t,
"nginx_plus_ssl",
map[string]interface{}{
"handshakes": int64(1234567800000),
"handshakes_failed": int64(5432100000000),
"session_reuses": int64(6543210000000),
},
map[string]string{
"server": host,
"port": port,
})
acc.AssertContainsTaggedFields(
t,
"nginx_plus_requests",
map[string]interface{}{
"total": int64(9876543210000),
"current": int(98),
},
map[string]string{
"server": host,
"port": port,
})
acc.AssertContainsTaggedFields(
t,
"nginx_plus_zone",
map[string]interface{}{
"processing": int(12),
"requests": int64(34),
"responses_1xx": int64(111),
"responses_2xx": int64(222),
"responses_3xx": int64(333),
"responses_4xx": int64(444),
"responses_5xx": int64(555),
"responses_total": int64(999),
"discarded": int64(11),
"received": int64(22),
"sent": int64(33),
},
map[string]string{
"server": host,
"port": port,
"zone": "zone.a_80",
})
acc.AssertContainsTaggedFields(
t,
"nginx_plus_upstream",
map[string]interface{}{
"keepalive": int(1),
"zombies": int(2),
},
map[string]string{
"server": host,
"port": port,
"upstream": "first_upstream",
})
acc.AssertContainsTaggedFields(
t,
"nginx_plus_upstream_peer",
map[string]interface{}{
"backup": false,
"weight": int(1),
"state": "up",
"active": int(0),
"requests": int64(9876),
"responses_1xx": int64(1111),
"responses_2xx": int64(2222),
"responses_3xx": int64(3333),
"responses_4xx": int64(4444),
"responses_5xx": int64(5555),
"responses_total": int64(987654),
"sent": int64(987654321),
"received": int64(87654321),
"fails": int64(98),
"unavail": int64(65),
"healthchecks_checks": int64(54),
"healthchecks_fails": int64(32),
"healthchecks_unhealthy": int64(21),
"downtime": int64(5432),
"downstart": int64(4321),
"selected": int64(1451606400000),
},
map[string]string{
"server": host,
"port": port,
"upstream": "first_upstream",
"upstream_address": "1.2.3.123:80",
"id": "0",
})
}

View File

@@ -1,19 +1,15 @@
# NSQ Consumer Input Plugin
The [NSQ](http://nsq.io/) consumer plugin polls a specified NSQD
topic and adds messages to InfluxDB. This plugin allows a message to be in any of the supported `data_format` types.
topic and adds messages to InfluxDB. This plugin allows a message to be in any of the supported `data_format` types.
## Configuration
```toml
# Read metrics from NSQD topic(s)
[[inputs.nsq_consumer]]
## Server option still works but is deprecated, we just prepend it to the nsqd array.
# server = "localhost:4150"
## An array representing the NSQD TCP HTTP Endpoints
nsqd = ["localhost:4150"]
## An array representing the NSQLookupd HTTP Endpoints
nsqlookupd = ["localhost:4161"]
## An array of NSQD HTTP API endpoints
server = "localhost:4150"
topic = "telegraf"
channel = "consumer"
max_in_flight = 100

View File

@@ -6,14 +6,12 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
nsq "github.com/nsqio/go-nsq"
"github.com/nsqio/go-nsq"
)
//NSQConsumer represents the configuration of the plugin
type NSQConsumer struct {
Server string
Nsqd []string
Nsqlookupd []string
Topic string
Channel string
MaxInFlight int
@@ -23,12 +21,8 @@ type NSQConsumer struct {
}
var sampleConfig = `
## Server option still works but is deprecated, we just prepend it to the nsqd array.
# server = "localhost:4150"
## An array representing the NSQD TCP HTTP Endpoints
nsqd = ["localhost:4150"]
## An array representing the NSQLookupd HTTP Endpoints
nsqlookupd = ["localhost:4161"]
## An string representing the NSQD TCP Endpoint
server = "localhost:4150"
topic = "telegraf"
channel = "consumer"
max_in_flight = 100
@@ -77,11 +71,7 @@ func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
message.Finish()
return nil
}), n.MaxInFlight)
if len(n.Nsqlookupd) > 0 {
n.consumer.ConnectToNSQLookupds(n.Nsqlookupd)
}
n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server))
n.consumer.ConnectToNSQD(n.Server)
return nil
}

View File

@@ -40,7 +40,6 @@ func TestReadsMetricsFromNSQ(t *testing.T) {
Topic: "telegraf",
Channel: "consume",
MaxInFlight: 1,
Nsqd: []string{"127.0.0.1:4155"},
}
p, _ := parsers.NewInfluxParser()

View File

@@ -4,8 +4,6 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend.
### Configuration:
To use this plugin you must enable the [monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend.
```toml
[[inputs.openldap]]
host = "localhost"

View File

@@ -1,102 +0,0 @@
# OpenSMTPD Input Plugin
This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server-side SMTP protocol](https://www.opensmtpd.org/)
### Configuration:
```toml
# A plugin to collect stats from OpenSMTPD - a FREE implementation of the server-side SMTP protocol
[[inputs.smtpctl]]
## If running as a restricted user you can prepend sudo for additional access:
#use_sudo = false
## The default location of the smtpctl binary can be overridden with:
binary = "/usr/sbin/smtpctl"
# The default timeout of 1s can be overriden with:
#timeout = "1s"
```
### Measurements & Fields:
This is the full list of stats provided by smtpctl and potentially collected by telegram
depending of your smtpctl configuration.
- smtpctl
bounce_envelope
bounce_message
bounce_session
control_session
mda_envelope
mda_pending
mda_running
mda_user
mta_connector
mta_domain
mta_envelope
mta_host
mta_relay
mta_route
mta_session
mta_source
mta_task
mta_task_running
queue_bounce
queue_evpcache_load_hit
queue_evpcache_size
queue_evpcache_update_hit
scheduler_delivery_ok
scheduler_delivery_permfail
scheduler_delivery_tempfail
scheduler_envelope
scheduler_envelope_expired
scheduler_envelope_incoming
scheduler_envelope_inflight
scheduler_ramqueue_envelope
scheduler_ramqueue_message
scheduler_ramqueue_update
smtp_session
smtp_session_inet4
smtp_session_local
uptime
### Permissions:
It's important to note that this plugin references smtpctl, which may require additional permissions to execute successfully.
Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo.
**Group membership (Recommended)**:
```bash
$ groups telegraf
telegraf : telegraf
$ usermod -a -G opensmtpd telegraf
$ groups telegraf
telegraf : telegraf opensmtpd
```
**Sudo privileges**:
If you use this method, you will need the following in your telegraf config:
```toml
[[inputs.opensmtpd]]
use_sudo = true
```
You will also need to update your sudoers file:
```bash
$ visudo
# Add the following line:
telegraf ALL=(ALL) NOPASSWD: /usr/sbin/smtpctl
```
Please use the solution you see as most appropriate.
### Example Output:
```
telegraf --config etc/telegraf.conf --input-filter opensmtpd --test
* Plugin: inputs.opensmtpd, Collection 1
> opensmtpd,host=localhost scheduler_delivery_tempfail=822,mta_host=10,mta_task_running=4,queue_bounce=13017,scheduler_delivery_permfail=51022,mta_relay=7,queue_evpcache_size=2,scheduler_envelope_expired=26,bounce_message=0,mta_domain=7,queue_evpcache_update_hit=848,smtp_session_local=12294,bounce_envelope=0,queue_evpcache_load_hit=4389703,scheduler_ramqueue_update=0,mta_route=3,scheduler_delivery_ok=2149489,smtp_session_inet4=2131997,control_session=1,scheduler_envelope_incoming=0,uptime=10346728,scheduler_ramqueue_envelope=2,smtp_session=0,bounce_session=0,mta_envelope=2,mta_session=6,mta_task=2,scheduler_ramqueue_message=2,mta_connector=7,mta_source=1,scheduler_envelope=2,scheduler_envelope_inflight=2 1510220300000000000
```

View File

@@ -1,134 +0,0 @@
package opensmtpd
import (
"bufio"
"bytes"
"fmt"
"os/exec"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error)
// Opensmtpd is used to store configuration values
type Opensmtpd struct {
Binary string
Timeout internal.Duration
UseSudo bool
filter filter.Filter
run runner
}
var defaultBinary = "/usr/sbin/smtpctl"
var defaultTimeout = internal.Duration{Duration: time.Second}
var sampleConfig = `
## If running as a restricted user you can prepend sudo for additional access:
#use_sudo = false
## The default location of the smtpctl binary can be overridden with:
binary = "/usr/sbin/smtpctl"
## The default timeout of 1000ms can be overriden with (in milliseconds):
timeout = 1000
`
func (s *Opensmtpd) Description() string {
return "A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver "
}
// SampleConfig displays configuration instructions
func (s *Opensmtpd) SampleConfig() string {
return sampleConfig
}
// Shell out to opensmtpd_stat and return the output
func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) {
cmdArgs := []string{"show", "stats"}
cmd := exec.Command(cmdName, cmdArgs...)
if UseSudo {
cmdArgs = append([]string{cmdName}, cmdArgs...)
cmd = exec.Command("sudo", cmdArgs...)
}
var out bytes.Buffer
cmd.Stdout = &out
err := internal.RunTimeout(cmd, Timeout.Duration)
if err != nil {
return &out, fmt.Errorf("error running smtpctl: %s", err)
}
return &out, nil
}
// Gather collects the configured stats from smtpctl and adds them to the
// Accumulator
//
// All the dots in stat name will replaced by underscores. Histogram statistics will not be collected.
func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error {
// Always exclude uptime.human statistics
stat_excluded := []string{"uptime.human"}
filter_excluded, err := filter.Compile(stat_excluded)
if err != nil {
return err
}
out, err := s.run(s.Binary, s.Timeout, s.UseSudo)
if err != nil {
return fmt.Errorf("error gathering metrics: %s", err)
}
// Process values
fields := make(map[string]interface{})
scanner := bufio.NewScanner(out)
for scanner.Scan() {
cols := strings.Split(scanner.Text(), "=")
// Check split correctness
if len(cols) != 2 {
continue
}
stat := cols[0]
value := cols[1]
// Filter value
if filter_excluded.Match(stat) {
continue
}
field := strings.Replace(stat, ".", "_", -1)
fields[field], err = strconv.ParseFloat(value, 64)
if err != nil {
acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v\n",
stat, value))
}
}
acc.AddFields("opensmtpd", fields, nil)
return nil
}
func init() {
inputs.Add("opensmtpd", func() telegraf.Input {
return &Opensmtpd{
run: opensmtpdRunner,
Binary: defaultBinary,
Timeout: defaultTimeout,
UseSudo: false,
}
})
}

Some files were not shown because too many files have changed in this diff Show More