Compare commits

...

118 Commits

Author SHA1 Message Date
Daniel Nelson 8385206e68
Update changelog
(cherry picked from commit 2c5a5373f6)
2017-12-01 11:42:34 -08:00
Daniel Nelson 07e268f39e
Update changelog
(cherry picked from commit cabe10b88a)
2017-12-01 11:25:18 -08:00
Daniel Nelson c095876442
Fix HOST_MOUNT_PREFIX in docker with disk input (#3529)
(cherry picked from commit 7f66863b87)
2017-12-01 11:25:03 -08:00
Daniel Nelson 809ed511dd
Update changelog
(cherry picked from commit 24d82aebe6)
2017-11-29 12:12:54 -08:00
Daniel Nelson 741022b656
Update gopsutil version to include netstat fix (#3513)
(cherry picked from commit 7dc256e845)
2017-11-29 12:11:12 -08:00
Daniel Nelson 34d3bf4fa0
Update changelog
(cherry picked from commit a9ada5f65b)
2017-11-27 12:33:17 -08:00
Laurent Gosselin 700b52dbd1
Fix global variable collection when using interval_slow option in mysql input (#3500)
(cherry picked from commit f758d0c6c3)
2017-11-27 12:33:17 -08:00
Daniel Nelson ddcb93188f
Set 1.4.4 release date
(cherry picked from commit 07297e80a8)
2017-11-08 15:22:31 -08:00
Daniel Nelson cb193d0e8a
Update changelog
(cherry picked from commit 2c2dc97702)
2017-11-07 11:43:33 -08:00
Daniel Nelson 600f9fa067
Use current time if container read time is zero value (#3437)
(cherry picked from commit cbbdf1043b)
2017-11-07 11:43:29 -08:00
Daniel Nelson 4cedae9d2c
Update changelog
(cherry picked from commit c55f285de0)
2017-11-07 11:37:45 -08:00
Daniel Nelson 4c8e8fc2f1
Update gopsutil to v2.17.10 (#3441)
(cherry picked from commit e1295c41c8)
2017-11-07 11:37:24 -08:00
Daniel Nelson 7c5bcfe84e
Update changelog
(cherry picked from commit e0df62c27b)
2017-11-06 17:43:02 -08:00
Bob Shannon efa20d05fa
Redact datadog API key in log output (#3420)
(cherry picked from commit fdf12ce6b4)
2017-11-06 17:42:57 -08:00
Daniel Nelson 187c7e12a8
Update changelog
(cherry picked from commit c116af35c7)
2017-10-30 15:36:17 -07:00
Daniel Nelson f29a994743
Use explicit schemas in mqtt_consumer input (#3401)
(cherry picked from commit fcfcc803b1)
2017-10-30 15:35:48 -07:00
Daniel Nelson f416f429d7
Fix circle-ci Go version 2017-10-30 15:06:34 -07:00
Daniel Nelson ec6b1aae94
Fix unittest for golang 1.9
(cherry picked from commit cafb22d145)
2017-10-30 15:04:30 -07:00
Daniel Nelson b473b6a659
Set release date for 1.4.3 2017-10-25 14:16:05 -07:00
Daniel Nelson e5d08a4d86
Update changelog
(cherry picked from commit 13c1f1524a)
2017-10-24 16:26:06 -07:00
Daniel Nelson 3c894bb056
Use golang.org/x/sys/unix instead of syscall in diskio (#3384)
(cherry picked from commit 9a062498e7)
2017-10-24 16:26:06 -07:00
Daniel Nelson d2d173b792
Update changelog
(cherry picked from commit f64cf89db1)
2017-10-24 15:47:19 -07:00
Daniel Nelson 145f7da42e
If the connector name cannot be unquoted, use the raw value (#3371)
(cherry picked from commit 6d1777276c)
2017-10-24 15:47:15 -07:00
Daniel Nelson f9f8d9ed7e
Update changelog
(cherry picked from commit 65580759fc)
2017-10-23 12:37:30 -07:00
Sergei Smolianinov 0dd3b0507b
Fix ACL token usage in consul input plugin (#3376)
(cherry picked from commit d2f9fc7d8c)
2017-10-23 12:37:30 -07:00
Daniel Nelson c44b4fcc89
Update changelog
(cherry picked from commit 7088d98304)
2017-10-19 16:35:11 -07:00
Daniel Nelson cb9c1653d3
Remove warning when JSON contains null value (#3359)
(cherry picked from commit 4243403432)
2017-10-19 16:35:10 -07:00
Daniel Nelson cf7590b88e
Update changelog
(cherry picked from commit 9b59cdd10e)
2017-10-18 13:58:25 -07:00
clheikes 5a7d889908
Fix TELEGRAF_OPTS expansion in systemd service unit (#3354)
(cherry picked from commit 02baa696c3)
2017-10-18 13:58:25 -07:00
Daniel Nelson ef652678dd
Update changelog
(cherry picked from commit a4fa19252f)
2017-10-18 12:58:41 -07:00
Dimitris Rozakis c4cc57956b
Respect path prefix in influx output uri (#3224)
(cherry picked from commit 9c8f4afa37)
2017-10-18 12:58:41 -07:00
Daniel Nelson 7b8a761c63
Update changelog
(cherry picked from commit 7ba376964c)
2017-10-18 12:26:07 -07:00
Ayrdrie 7d66319f59
Fix mongodb input panic when restarting mongodb (#3355)
(cherry picked from commit a75ab3e190)
2017-10-18 12:26:03 -07:00
Pierre Fersing 22f64f8417
Fix CPU system plugin gets stuck after system suspend (#3342)
(cherry picked from commit f5a9d1bc75)
2017-10-16 14:27:58 -07:00
Daniel Nelson 6b4deb01bb
Update changelog
(cherry picked from commit 3ea41e885c)
2017-10-16 11:27:28 -07:00
Daniel Nelson e4835cdc30
Fix case sensitivity issue in sqlserver query (#3336)
(cherry picked from commit 1f348037b7)
2017-10-16 11:27:28 -07:00
Daniel Nelson e32ffdde06
Update changelog
(cherry picked from commit 0f9f757da7)
2017-10-12 17:27:24 -07:00
Windkit Li 0f905eaee7
Fix snmpwalk address format in leofs input (#3328)
(cherry picked from commit 2f8d0f4d47)
2017-10-12 17:27:24 -07:00
Daniel Nelson 4d48dcb84f
Update changelog
(cherry picked from commit 024dea2ff9)
2017-10-12 15:56:09 -07:00
Daniel Nelson 17377b4942
Fix container name filters in docker input (#3331)
(cherry picked from commit fa25e123d8)
2017-10-12 15:55:50 -07:00
Daniel Nelson 0cc5fc0ce4
Set 1.4.2 release date
(cherry picked from commit 4e0c8e6026)
2017-10-10 13:31:06 -07:00
Daniel Nelson 8011109466
Remove InfluxDB path prefix test
This tests a feature that is not yet on this branch and the test was
mistakenly backported.
2017-10-05 16:37:58 -07:00
Daniel Nelson 588f0c77f8
Update changelog
(cherry picked from commit 13c7802b84)
2017-10-05 16:17:06 -07:00
Daniel Nelson 4301b8e32a
Use chunked transfer encoding in InfluxDB output (#3307)
(cherry picked from commit cce40c515a)
2017-10-05 16:17:05 -07:00
Daniel Nelson 3c9d7db0a0
Update changelog
(cherry picked from commit 6e1fa559a3)
2017-10-05 16:06:11 -07:00
Daniel Nelson f7b3eb1ebd
Fix panic in cpu input if number of cpus changes (#3306)
(cherry picked from commit f56dda0ac8)
2017-10-05 16:06:11 -07:00
Daniel Nelson b8ab827629
Update changelog
(cherry picked from commit 002ccf3295)
2017-10-03 15:27:49 -07:00
Daniel Nelson d03e2fca32
Add support for proxy environment variables to http_response (#3302)
(cherry picked from commit a163effa6d)
2017-10-03 15:26:55 -07:00
Daniel Nelson eca00c10e0
Add support for standard proxy env vars in outputs. (#3212)
(cherry picked from commit 7b08f9d099)
2017-10-03 15:26:44 -07:00
Daniel Nelson 9cf19df04e
Update changelog
(cherry picked from commit f67350107d)
2017-10-02 17:17:10 -07:00
Daniel Nelson e77c2b76e7
Fix case sensitivity error in sqlserver input (#3287)
(cherry picked from commit 8e3ed96d6f)
2017-10-02 17:17:10 -07:00
Daniel Nelson c749c43dab
Fix mqtt_consumer connection_timeout test
(cherry picked from commit cdca81c999)
2017-10-02 12:32:05 -07:00
Daniel Nelson 1be17ea5af
Update example config 2017-09-29 16:04:02 -07:00
Daniel Nelson e1155bec20
Update changelog
(cherry picked from commit 29b6f4168c)
2017-09-29 16:01:11 -07:00
Daniel Nelson cfac750469
Fix format of connection_timeout in mqtt_consumer (#3286)
(cherry picked from commit 3d62e045af)
2017-09-29 16:01:11 -07:00
Daniel Nelson f10d5b43c4
Update changelog
(cherry picked from commit cadafa6405)
2017-09-26 16:03:30 -07:00
Daniel Nelson 47b2d04d5b
Allow JSON data format to contain zero metrics (#3268)
(cherry picked from commit 22a9ffbb9d)
2017-09-26 16:03:30 -07:00
Daniel Nelson 0e0da57b9a
Update changelog
(cherry picked from commit 2e1457a496)
2017-09-26 15:38:41 -07:00
Daniel Nelson 8e7cf0109e
Fix parsing of JSON with a UTF8 BOM in httpjson (#3267)
(cherry picked from commit 8614445235)
2017-09-26 15:38:41 -07:00
Daniel Nelson 5b791fd2e5
Update changelog
(cherry picked from commit f23d1eb078)
2017-09-26 15:29:19 -07:00
Daniel Nelson 293b1a0093
Fix dmcache tests with 32bit int
(cherry picked from commit ef5c12bd86)
2017-09-26 15:29:01 -07:00
Daniel Nelson 761ea06d6a
Fix cgroup tests with 32bit int
(cherry picked from commit c013cc1497)
2017-09-26 15:29:01 -07:00
Daniel Nelson 8fafe9878b
Fix ceph tests with 32bit int
(cherry picked from commit bb665cf013)
2017-09-26 15:29:01 -07:00
Daniel Nelson 5da3eef38b
Allow 64bit integers in kernel_vmstat
(cherry picked from commit f823fc73f6)
2017-09-26 15:29:00 -07:00
Daniel Nelson 2de7aa23d7
Set 1.4.1 release date in changelog
(cherry picked from commit fd702e6bb8)
2017-09-26 14:19:51 -07:00
Daniel Nelson 52cd38150c
Update changelog
(cherry picked from commit 0048bf2120)
2017-09-18 14:25:57 -07:00
Daniel Nelson c08f492f78
Fix arm64 packages contain 32-bit executable (#3246)
(cherry picked from commit b8e134cd37)
2017-09-18 14:25:57 -07:00
Daniel Nelson 66cfe80e37
Update changelog
(cherry picked from commit b94cda6b46)
2017-09-14 15:30:51 -07:00
Trevor Pounds ba5e5ec283
Fix panic in statsd p100 calculation (#3230)
(cherry picked from commit 73372872c2)
2017-09-14 15:30:51 -07:00
Daniel Nelson 259f8e4002
Update changelog
(cherry picked from commit 875ab3c4b7)
2017-09-14 15:05:38 -07:00
Mark Wilkinson - m82labs 558ab0c730
Fix duplicate keys in perf counters sqlserver query (#3175)
(cherry picked from commit 1c5ebd4be3)
2017-09-14 15:05:38 -07:00
Daniel Nelson 8d4fbe29e7
Update changelog
(cherry picked from commit 103d24bfba)
2017-09-14 15:01:28 -07:00
Daniel Nelson 72337a1c97
Fix skipped line with empty target in iptables (#3235)
(cherry picked from commit d5f48e3e96)
2017-09-14 15:01:21 -07:00
Daniel Nelson 86537899b2
Update changelog
(cherry picked from commit 7a41d2c586)
2017-09-14 13:07:30 -07:00
Trevor Pounds a727d5d1f0
Fix counter and gauge metric types. (#3232)
(cherry picked from commit fa1982323a)
2017-09-14 13:07:30 -07:00
Daniel Nelson 7ec194a482
Update changelog
(cherry picked from commit cdf63c5776)
2017-09-13 17:32:03 -07:00
Daniel Nelson 5a77d28837
Whitelist allowed char classes for opentsdb output. (#3227)
(cherry picked from commit 0a8c2e0b3b)
2017-09-13 17:32:03 -07:00
Daniel Nelson 47927c353d
Fix fluentd test
(cherry picked from commit eebee9759f)
2017-09-12 17:58:29 -07:00
Daniel Nelson b9e7fa27aa
Update changelog
(cherry picked from commit c5cfde667a)
2017-09-12 17:18:29 -07:00
Daniel Nelson 0d437140bd
Fix optional field types in fluentd input
(cherry picked from commit 8a68e7424c)
2017-09-12 17:18:29 -07:00
Daniel Nelson 36969a63c2
Update changelog
(cherry picked from commit cc63b3b667)
2017-09-11 12:28:37 -07:00
DanKans e9a12bb694
Fix MQTT input exits if Broker is not available on startup (#3202)
(cherry picked from commit 5488f4b3ac)
2017-09-11 12:28:12 -07:00
Daniel Nelson 34b7a4c361
Add 1.4.0 release date
(cherry picked from commit ab1c11b06d)
2017-09-05 17:15:06 -07:00
Daniel Nelson f46370d982
Sort metrics before comparing in graphite test
(cherry picked from commit 98e784faf3)
2017-09-05 12:50:55 -07:00
Daniel Nelson 07b7e09749
Update changelog
(cherry picked from commit f43af72785)
2017-08-31 13:44:05 -07:00
Daniel Nelson e54795795d
Fix panic when handling string fields with escapes (#3188)
(cherry picked from commit 28d16188b3)
2017-08-30 21:17:10 -07:00
Daniel Nelson b2b2bd8a27
Update changelog 2017-08-29 16:30:25 -07:00
Daniel Nelson f96cbb48c7
Convert bool fields to int in graphite serializer 2017-08-29 16:30:25 -07:00
Seua Polyakov 9077cb83bc
Skip non-numerical values in graphite format (#3179) 2017-08-29 16:30:25 -07:00
Daniel Nelson 0f188f280f
Update changelog 2017-08-28 17:18:00 -07:00
Dylan Meissner b9420e73bd
HTTP headers can be added to InfluxDB output (#3182)
(cherry picked from commit a9a40cbf87)
2017-08-28 17:15:43 -07:00
Daniel Nelson 1e43e5e7ae
Update changelog
(cherry picked from commit 5fd8ab36d3)
2017-08-28 17:09:08 -07:00
Jeff Nickoloff 5e104ad974
Added CloudWatch metric constraint validation (#3183)
(cherry picked from commit ac1fa05672)
2017-08-28 17:09:08 -07:00
Daniel Nelson cc9d8c700c
Update changelog
(cherry picked from commit a98496591a)
2017-08-25 18:08:55 -07:00
Ashton Kinslow b15ec21ba7
Fix NSQ input plugin when used with version 1.0.0-compat
(cherry picked from commit 0a6541dfa8)
2017-08-25 18:08:55 -07:00
Daniel Nelson a9abfe8f08
Update changelog
(cherry picked from commit 6abecd0ac7)
2017-08-25 12:59:51 -07:00
Rickard von Essen 307210242c
Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149)
(cherry picked from commit 0502b65316)
2017-08-25 12:59:38 -07:00
Daniel Nelson 0a41db16f1
Update changelog
(cherry picked from commit e400fcf5da)
2017-08-25 11:56:30 -07:00
Jan Willem Janssen 7480267fd2
Fix parsing of SHM remotes in ntpq input (#3163)
(cherry picked from commit d449833de9)
2017-08-25 11:56:27 -07:00
Daniel Nelson 30949c4596
Update fail2ban documentation
(cherry picked from commit 58751fa4df)
2017-08-25 11:43:49 -07:00
Daniel Nelson 47264bc860
Fix amqp_consumer data_format documentation
closes #3164

(cherry picked from commit 656ce31d98)
2017-08-24 13:18:23 -07:00
Daniel Nelson 67e693e9a8
Update changelog
(cherry picked from commit f95c239a3f)
2017-08-23 15:22:29 -07:00
Daniel Nelson 851352bc8a
Escape backslash within string fields (#3161)
(cherry picked from commit ae24a0754b)
2017-08-23 15:22:25 -07:00
Daniel Nelson c807452c14
Update changelog
(cherry picked from commit f253623231)
2017-08-23 15:16:40 -07:00
Rickard von Essen 48e00f7ea0
Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.

(cherry picked from commit f0db4fd901)
2017-08-23 15:16:27 -07:00
Daniel Nelson 8ce901aaa4
Update changelog
(cherry picked from commit 8c68bd9ddb)
2017-08-22 17:03:28 -07:00
Daniel Nelson 78d1715601
Don't start Telegraf on install in Amazon Linux (#3156)
(cherry picked from commit 9fc7220c2e)
2017-08-22 17:03:17 -07:00
Daniel Nelson 1b0a18897d
Update changelog
(cherry picked from commit 6597b55477)
2017-08-22 16:55:37 -07:00
Daniel Nelson 257b6a09d9
Don't retry points beyond retention policy (#3155)
(cherry picked from commit 1f4a997164)
2017-08-22 16:55:33 -07:00
Rickard von Essen e6feac735c
Enable fail2ban on all platforms (#3151)
(cherry picked from commit 371638ce56)
2017-08-22 12:59:54 -07:00
Rickard von Essen 6616065acf
Enable chrony for all platforms (#3152)
(cherry picked from commit 53c5d3a290)
2017-08-22 11:50:16 -07:00
Daniel Nelson 98774d60e2
Cache intermediate objects during build
(cherry picked from commit ccf17a9f93)
2017-08-21 17:28:20 -07:00
Chris Goller d4cd1b7eb4
Add JSON input support to zipkin plugin (#3150)
(cherry picked from commit 13a6b917c3)
2017-08-21 17:28:14 -07:00
Daniel Nelson 7254111d37
Add win_services to the readme
(cherry picked from commit 1f1e9cc49f)
2017-08-18 17:58:18 -07:00
Daniel Nelson 4551efb459
Update histogram aggregator documentation (#3133)
(cherry picked from commit 70c2b83f00)
2017-08-18 13:25:22 -07:00
Daniel Nelson 2610eba0e3
Remove version test
(cherry picked from commit 4de264ffc8)
2017-08-18 11:09:34 -07:00
Daniel Nelson c277dc27a6
Update example config
(cherry picked from commit 36c2c88fd2)
2017-08-17 18:54:44 -07:00
Daniel Nelson a4f5c6fbc3
Update sample config 2017-08-16 16:48:10 -07:00
109 changed files with 5585 additions and 1658 deletions

View File

@ -1,4 +1,65 @@
## v1.4 [unreleased]
## v1.4.5 [2017-12-01]
### Bugfixes
- [#3500](https://github.com/influxdata/telegraf/issues/3500): Fix global variable collection when using interval_slow option in mysql input.
- [#3486](https://github.com/influxdata/telegraf/issues/3486): Fix error getting net connections info in netstat input.
- [#3529](https://github.com/influxdata/telegraf/issues/3529): Fix HOST_MOUNT_PREFIX in docker with disk input.
## v1.4.4 [2017-11-08]
### Bugfixes
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
- [#3311](https://github.com/influxdata/telegraf/issues/3311): Fix error getting pids in netstat input.
- [#3339](https://github.com/influxdata/telegraf/issues/3339): Support HOST_VAR envvar to locate /var in system input.
- [#3383](https://github.com/influxdata/telegraf/issues/3383): Use current time if docker container read time is zero value.
## v1.4.3 [2017-10-25]
### Bugfixes
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query.
- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux.
- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb.
- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output.
- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit.
- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value.
- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin.
- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6.
- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems.
## v1.4.2 [2017-10-10]
### Bugfixes
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
## v1.4.1 [2017-09-26]
### Bugfixes
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
## v1.4 [2017-09-05]
### Release Notes
@ -62,6 +123,7 @@
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
### Bugfixes
@ -93,6 +155,16 @@
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
## v1.3.5 [2017-07-26]

2
Godeps
View File

@ -60,7 +60,7 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d

View File

@ -25,7 +25,7 @@ deps:
gdm restore
telegraf:
go build -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf

View File

@ -172,7 +172,8 @@ configuration options.
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu

View File

@ -1,11 +1,14 @@
machine:
go:
version: 1.8.1
services:
- docker
- memcached
- redis
- rabbitmq-server
post:
- sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.8.4.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.8.4.linux-amd64.tar.gz
- go version
dependencies:
override:

View File

@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
### Graphite Configuration:
```toml

View File

@ -84,9 +84,7 @@
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
## The HTTP or UDP URL for your InfluxDB instance. Each item should be
## of the form:
## scheme "://" host [ ":" port]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
@ -118,6 +116,15 @@
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy Config
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
# # Configuration for Amon Server to send metrics to.
# [[outputs.amon]]
@ -272,11 +279,11 @@
# timeout = 2
#
# ## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# insecure_skip_verify = false
# # insecure_skip_verify = false
# # Send telegraf metrics to graylog(s)
@ -596,6 +603,32 @@
# AGGREGATOR PLUGINS #
###############################################################################
# # Create aggregate histograms.
# [[aggregators.histogram]]
# ## The period in which to flush the aggregator.
# period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
#
# ## Example config that aggregates all fields of the metric.
# # [[aggregators.histogram.config]]
# # ## The set of buckets.
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# # ## The name of metric.
# # measurement_name = "cpu"
#
# ## Example config that aggregates only specific fields of the metric.
# # [[aggregators.histogram.config]]
# # ## The set of buckets.
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# # ## The name of metric.
# # measurement_name = "diskio"
# # ## The concrete fields of metric
# # fields = ["io_time", "read_time", "write_time"]
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
@ -606,32 +639,6 @@
# drop_original = false
# # Configuration for aggregate histogram metrics
# [[aggregators.histogram]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
#
# ## The example of config to aggregate histogram for all fields of specified metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# metric_name = "cpu"
#
# ## The example of config to aggregate for specified fields of metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# metric_name = "diskio"
# ## The concrete fields of metric
# metric_fields = ["io_time", "read_time", "write_time"]
###############################################################################
# INPUT PLUGINS #
@ -645,6 +652,8 @@
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point
@ -720,15 +729,17 @@
# # Read Apache status information (mod_status)
# [[inputs.apache]]
# ## An array of Apache status URI to gather stats.
# ## An array of URLs to gather from, must be directed at the machine
# ## readable version of the mod_status page including the auto query string.
# ## Default is "http://localhost/server-status?auto".
# urls = ["http://localhost/server-status?auto"]
# ## user credentials for basic HTTP authentication
# username = "myuser"
# password = "mypassword"
#
# ## Timeout to the complete conection and reponse time in seconds
# response_timeout = "25s" ## default to 5 seconds
# ## Credentials for basic HTTP authentication.
# # username = "myuser"
# # password = "mypassword"
#
# ## Maximum time to receive response.
# # response_timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
@ -846,7 +857,7 @@
#
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# # metrics are made available to the 1 minute period. Some are collected at
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# # Note that if a period is configured that is smaller than the minimum for a
# # particular metric, that metric will not be returned by the Cloudwatch API
# # and will not be collected by Telegraf.
@ -958,20 +969,23 @@
# # Query given DNS server and gives statistics
# [[inputs.dns_query]]
# ## servers to query
# servers = ["8.8.8.8"] # required
# servers = ["8.8.8.8"]
#
# ## Domains or subdomains to query. "."(root) is default
# domains = ["."] # optional
# ## Network is the network protocol name.
# # network = "udp"
#
# ## Query record type. Default is "A"
# ## Domains or subdomains to query.
# # domains = ["."]
#
# ## Query record type.
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A" # optional
# # record_type = "A"
#
# ## Dns server port. 53 is default
# port = 53 # optional
# ## Dns server port.
# # port = 53
#
# ## Query timeout in seconds. Default is 2 seconds
# timeout = 2 # optional
# ## Query timeout in seconds.
# # timeout = 2
# # Read metrics about docker containers
@ -980,8 +994,15 @@
# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
# endpoint = "unix:///var/run/docker.sock"
#
# ## Only collect metrics for these containers, collect all if empty
# container_names = []
#
# ## Containers to include and exclude. Globs accepted.
# ## Note that an empty array for both will include all containers
# container_name_include = []
# container_name_exclude = []
#
# ## Timeout for docker list, info, and stats commands
# timeout = "5s"
#
@ -990,11 +1011,20 @@
# perdevice = true
# ## Whether to report for each container total blkio and network stats or not
# total = false
# ## Which environment variables should we use as a tag
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
#
# ## docker labels to include and exclude as tags. Globs accepted.
# ## Note that an empty array for both will include all labels as tags
# docker_label_include = []
# docker_label_exclude = []
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read statistics from one or many dovecot servers
@ -1064,6 +1094,12 @@
# data_format = "influx"
# # Read metrics from fail2ban.
# [[inputs.fail2ban]]
# ## Use sudo to run fail2ban-client
# use_sudo = false
# # Read stats about given file(s)
# [[inputs.filestat]]
# ## Files to gather stats about.
@ -1080,6 +1116,22 @@
# md5 = false
# # Read metrics exposed by fluentd in_monitor plugin
# [[inputs.fluentd]]
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
# ##
# ## Endpoint:
# ## - only one URI is allowed
# ## - https is not supported
# endpoint = "http://localhost:24220/api/plugins.json"
#
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
# exclude = [
# "monitor_agent",
# "dummy",
# ]
# # Read flattened metrics from one or more GrayLog HTTP endpoints
# [[inputs.graylog]]
# ## API endpoint, currently supported API:
@ -1161,25 +1213,26 @@
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
# ## Server address (default http://localhost)
# address = "http://github.com"
# # address = "http://localhost"
#
# ## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
# # response_timeout = "5s"
#
# ## HTTP Request Method
# method = "GET"
# # method = "GET"
#
# ## Whether to follow redirects from the server (defaults to false)
# follow_redirects = true
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
# # follow_redirects = false
#
# ## Optional HTTP Request Body
# # body = '''
# # {'fake':'data'}
# # '''
#
# ## Optional substring or regex match in body of the response
# ## response_string_match = "\"service_status\": \"up\""
# ## response_string_match = "ok"
# ## response_string_match = "\".*_status\".?:.?\"up\""
# # response_string_match = "\"service_status\": \"up\""
# # response_string_match = "ok"
# # response_string_match = "\".*_status\".?:.?\"up\""
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
@ -1187,6 +1240,10 @@
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
# # Read flattened metrics from one or more JSON HTTP endpoints
@ -1249,6 +1306,13 @@
# "http://localhost:8086/debug/vars"
# ]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## http request & header timeout
# timeout = "5s"
@ -1279,6 +1343,13 @@
# ## if no servers are specified, local machine sensor stats will be queried
# ##
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
#
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
# ## gaps or overlap in pulled data
# interval = "30s"
#
# ## Timeout for the ipmitool command to complete
# timeout = "20s"
# # Gather packets and bytes throughput from iptables
@ -1398,9 +1469,9 @@
# # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]]
# ## An array of URI to gather stats about LeoFS.
# ## Specify an ip or hostname with port. ie 127.0.0.1:4020
# servers = ["127.0.0.1:4021"]
# ## An array of URLs of the form:
# ## host [ ":" port]
# servers = ["127.0.0.1:4020"]
# # Provides Linux sysctl fs metrics
@ -1475,14 +1546,24 @@
# # ]
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
# [[inputs.minecraft]]
# ## server address for minecraft
# # server = "localhost"
# ## port for RCON
# # port = "25575"
# ## password RCON for mincraft server
# # password = ""
# # Read metrics from one or many MongoDB servers
# [[inputs.mongodb]]
# ## An array of URI to gather stats about. Specify an ip or hostname
# ## with optional port add password. ie,
# ## An array of URLs of the form:
# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
# ## For example:
# ## mongodb://user:auth_key@10.10.3.30:27017,
# ## mongodb://10.10.3.33:18832,
# ## 10.0.0.1:10000, etc.
# servers = ["127.0.0.1:27017"]
# servers = ["mongodb://127.0.0.1:27017"]
# gather_perdb_stats = false
#
# ## Optional SSL Config
@ -1496,7 +1577,7 @@
# # Read metrics from one or many mysql servers
# [[inputs.mysql]]
# ## specify servers via a url matching:
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
# ## e.g.
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
@ -1553,7 +1634,7 @@
# #
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
# interval_slow = "30m"
#
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
@ -1599,8 +1680,17 @@
# # Read Nginx's basic status information (ngx_http_stub_status_module)
# [[inputs.nginx]]
# ## An array of Nginx stub_status URI to gather stats.
# urls = ["http://localhost/status"]
# # An array of Nginx stub_status URI to gather stats.
# urls = ["http://localhost/server_status"]
#
# # TLS/SSL configuration
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.cer"
# ssl_key = "/etc/telegraf/key.key"
# insecure_skip_verify = false
#
# # HTTP response timeout (default: 5s)
# response_timeout = "5s"
# # Read NSQ topic and channel statistics.
@ -1627,6 +1717,27 @@
# dns_lookup = true
# # OpenLDAP cn=Monitor plugin
# [[inputs.openldap]]
# host = "localhost"
# port = 389
#
# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
# # note that port will likely need to be changed to 636 for ldaps
# # valid options: "" | "starttls" | "ldaps"
# ssl = ""
#
# # skip peer certificate verification. Default is false.
# insecure_skip_verify = false
#
# # Path to PEM-encoded Root certificate to use to verify server certificate
# ssl_ca = "/etc/ssl/certs.pem"
#
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
# bind_dn = ""
# bind_password = ""
# # Read metrics of passenger using passenger-status
# [[inputs.passenger]]
# ## Path of passenger-status.
@ -1820,10 +1931,13 @@
# location = "/var/lib/puppet/state/last_run_summary.yaml"
# # Read metrics from one or many RabbitMQ servers via the management API
# # Reads metrics from RabbitMQ servers via the Management Plugin
# [[inputs.rabbitmq]]
# ## Management Plugin url. (default: http://localhost:15672)
# # url = "http://localhost:15672"
# # name = "rmq-server-1" # optional tag
# ## Tag added to rabbitmq_overview series; deprecated: use tags
# # name = "rmq-server-1"
# ## Credentials
# # username = "guest"
# # password = "guest"
#
@ -1880,14 +1994,11 @@
# ##
# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
# servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
# ##
# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
# ## have to be named "rethinkdb".
# servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
# # Read metrics one or many Riak servers
@ -1896,6 +2007,26 @@
# servers = ["http://localhost:8098"]
# # Read API usage and limits for a Salesforce organisation
# [[inputs.salesforce]]
# ## specify your credentials
# ##
# username = "your_username"
# password = "your_password"
# ##
# ## (optional) security token
# # security_token = "your_security_token"
# ##
# ## (optional) environment type (sandbox or production)
# ## default is: production
# ##
# # environment = "production"
# ##
# ## (optional) API version (default: "39.0")
# ##
# # version = "39.0"
# # Monitor sensors, requires lm-sensors package
# [[inputs.sensors]]
# ## Remove numbers from field names.
@ -2141,6 +2272,26 @@
# # vg = "rootvg"
# # Gather metrics from the Tomcat server status page.
# [[inputs.tomcat]]
# ## URL of the Tomcat server status
# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
#
# ## HTTP Basic Auth Credentials
# # username = "tomcat"
# # password = "s3cret"
#
# ## Request timeout
# # timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Inserts sine and cosine waves for demonstration purposes
# [[inputs.trig]]
# ## Set the amplitude
@ -2157,6 +2308,9 @@
# # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]]
# ## If running as a restricted user you can prepend sudo for additional access:
# #use_sudo = false
#
# ## The default location of the varnishstat binary can be overridden with:
# binary = "/usr/bin/varnishstat"
#
@ -2222,10 +2376,10 @@
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
@ -2247,16 +2401,13 @@
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0
# # Read metrics from Kafka 0.9+ topic(s)
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]]
# ## topic(s) to consume
# topics = ["telegraf"]
# ## kafka servers
# brokers = ["localhost:9092"]
# ## the name of the consumer group
# consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
# ## topic(s) to consume
# topics = ["telegraf"]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
@ -2269,6 +2420,11 @@
# # sasl_username = "kafka"
# # sasl_password = "secret"
#
# ## the name of the consumer group
# consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
@ -2279,7 +2435,8 @@
# ## larger messages are dropped
# max_message_len = 65536
# # Read metrics from Kafka (0.8 or less) topic(s)
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer_legacy]]
# ## topic(s) to consume
# topics = ["telegraf"]
@ -2312,6 +2469,7 @@
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
# ## /var/log/apache.log -> only tail the apache log file
# files = ["/var/log/apache/access.log"]
#
# ## Read files that currently exist from the beginning. Files that are created
# ## while telegraf is running (and that match the "files" globs) will always
# ## be read from the beginning.
@ -2327,12 +2485,26 @@
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
# patterns = ["%{COMBINED_LOG_FORMAT}"]
#
# ## Name of the outputted measurement name.
# measurement = "apache_access_log"
#
# ## Full path(s) to custom pattern files.
# custom_pattern_files = []
#
# ## Custom patterns can also be defined here. Put one pattern per line.
# custom_patterns = '''
#
# ## Timezone allows you to provide an override for timestamps that
# ## don't already include an offset
# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
# ##
# ## Default: "" which renders UTC
# ## Options are as follows:
# ## 1. Local -- interpret based on machine localtime
# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
# timezone = "Canada/Eastern"
# '''
@ -2341,6 +2513,8 @@
# servers = ["localhost:1883"]
# ## MQTT QoS, must be 0, 1, or 2
# qos = 0
# ## Connection timeout for initial connection in seconds
# connection_timeout = "30s"
#
# ## Topics to subscribe to
# topics = [
@ -2431,6 +2605,11 @@
# ## 0 (default) is unlimited.
# # max_connections = 1024
#
# ## Read timeout.
# ## Only applies to stream sockets (e.g. TCP).
# ## 0 (default) is unlimited.
# # read_timeout = "30s"
#
# ## Maximum socket buffer size in bytes.
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
@ -2450,12 +2629,14 @@
# # data_format = "influx"
# # Statsd Server
# # Statsd UDP/TCP Server
# [[inputs.statsd]]
# ## Protocol, must be "tcp" or "udp"
# ## Protocol, must be "tcp" or "udp" (default=udp)
# protocol = "udp"
# ## Maximum number of concurrent TCP connections to allow
#
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
# max_tcp_connections = 250
#
# ## Address and port to host UDP listener on
# service_address = ":8125"
#
@ -2556,3 +2737,9 @@
# [inputs.webhooks.papertrail]
# path = "/papertrail"
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
# [[inputs.zipkin]]
# # path = "/api/v1/spans" # URL path for span data
# # port = 9411 # Port on which Telegraf listens

View File

@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter {
}
return &out
}
type IncludeExcludeFilter struct {
include Filter
exclude Filter
}
func NewIncludeExcludeFilter(
include []string,
exclude []string,
) (Filter, error) {
in, err := Compile(include)
if err != nil {
return nil, err
}
ex, err := Compile(exclude)
if err != nil {
return nil, err
}
return &IncludeExcludeFilter{in, ex}, nil
}
func (f *IncludeExcludeFilter) Match(s string) bool {
if f.include != nil {
if !f.include.Match(s) {
return false
}
}
if f.exclude != nil {
if f.exclude.Match(s) {
return false
}
}
return true
}

View File

@ -150,12 +150,6 @@ func makemetric(
continue
}
case string:
if strings.HasSuffix(val, `\`) {
log.Printf("D! Measurement [%s] field [%s] has a value "+
"ending with a backslash, skipping", measurement, k)
delete(fields, k)
continue
}
fields[k] = v
default:
fields[k] = v

View File

@ -370,16 +370,17 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
expectedTags: map[string]string{},
},
{
name: "Field value with trailing slash dropped",
name: "Field value with trailing slash okay",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
"bad": `xyzzy\`,
"ok": `xyzzy\`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
expectedTags: map[string]string{},
},
@ -387,7 +388,7 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
name: "Must have one field after dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"bad": `xyzzy\`,
"bad": math.NaN(),
},
tags: map[string]string{},
expectedNil: true,

View File

@ -20,8 +20,14 @@ var (
// stringFieldEscaper is for escaping string field values only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
stringFieldUnEscaper = strings.NewReplacer(
`\"`, `"`,
`\\`, `\`,
)
)
func escape(s string, t string) string {

View File

@ -21,14 +21,14 @@ func New(
t time.Time,
mType ...telegraf.ValueType,
) (telegraf.Metric, error) {
if len(fields) == 0 {
return nil, fmt.Errorf("Metric cannot be made without any fields")
}
if len(name) == 0 {
return nil, fmt.Errorf("Metric cannot be made with an empty name")
return nil, fmt.Errorf("missing measurement name")
}
if len(fields) == 0 {
return nil, fmt.Errorf("%s: must have one or more fields", name)
}
if strings.HasSuffix(name, `\`) {
return nil, fmt.Errorf("Metric cannot have measurement name ending with a backslash")
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
}
var thisType telegraf.ValueType
@ -49,10 +49,10 @@ func New(
taglen := 0
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have tag key ending with a backslash")
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
}
if strings.HasSuffix(v, `\`) {
return nil, fmt.Errorf("Metric cannot have tag value ending with a backslash")
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
}
if len(k) == 0 || len(v) == 0 {
@ -77,15 +77,9 @@ func New(
// pre-allocate capacity of the fields slice
fieldlen := 0
for k, v := range fields {
for k, _ := range fields {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
}
switch val := v.(type) {
case string:
if strings.HasSuffix(val, `\`) {
return nil, fmt.Errorf("Metric cannot have field value ending with a backslash")
}
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
}
// 10 bytes is completely arbitrary, but will at least prevent some
@ -108,7 +102,8 @@ func New(
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Returns -1 if not found.
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
// not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
@ -128,6 +123,46 @@ func indexUnescapedByte(buf []byte, b byte) int {
return keyi
}
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
// to b in buf that is not escaped. Allows for the escape char `\` to be
// escaped. Returns -1 if not found.
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if countBackslashes(buf, keyi-1)%2 == 0 {
break
} else {
keyi++
}
}
return keyi
}
// countBackslashes counts the number of preceding backslashes starting at
// the 'start' index.
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
} else {
break
}
}
return count
}
type metric struct {
name []byte
tags []byte
@ -289,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}

View File

@ -31,7 +31,7 @@ func TestNewMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@ -257,6 +257,8 @@ func TestNewMetric_Fields(t *testing.T) {
"string": "test",
"quote_string": `x"y`,
"backslash_quote_string": `x\"y`,
"backslash": `x\y`,
"ends_with_backslash": `x\`,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
@ -412,7 +414,7 @@ func TestNewGaugeMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@ -434,7 +436,7 @@ func TestNewCounterMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@ -708,12 +710,6 @@ func TestNewMetric_TrailingSlash(t *testing.T) {
`value\`: "x",
},
},
{
name: "cpu",
fields: map[string]interface{}{
"value": `x\`,
},
},
{
name: "cpu",
tags: map[string]string{

View File

@ -4,6 +4,7 @@ import (
"io"
"io/ioutil"
"regexp"
"strings"
"testing"
"time"
@ -620,6 +621,83 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
}
}
func TestReader_Read(t *testing.T) {
epoch := time.Unix(0, 0)
type args struct {
name string
tags map[string]string
fields map[string]interface{}
t time.Time
mType []telegraf.ValueType
}
tests := []struct {
name string
args args
expected []byte
}{
{
name: "escape backslashes in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\`},
t: epoch,
},
expected: []byte(`cpu value="test\\" 0`),
},
{
name: "escape quote in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test"`},
t: epoch,
},
expected: []byte(`cpu value="test\"" 0`),
},
{
name: "escape quote and backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\"`},
t: epoch,
},
expected: []byte(`cpu value="test\\\"" 0`),
},
{
name: "escape multiple backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\\`},
t: epoch,
},
expected: []byte(`cpu value="test\\\\" 0`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := make([]byte, 512)
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
require.NoError(t, err)
r := NewReader([]telegraf.Metric{m})
num, err := r.Read(buf)
if err != io.EOF {
require.NoError(t, err)
}
line := string(buf[:num])
// This is done so that we can use raw strings in the test spec
noeol := strings.TrimRight(line, "\n")
require.Equal(t, string(tt.expected), noeol)
require.Equal(t, len(tt.expected)+1, num)
})
}
}
func TestMetricRoundtrip(t *testing.T) {
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000

View File

@ -1,38 +1,25 @@
# Histogram Aggregator Plugin
#### Goal
The histogram aggregator plugin creates histograms containing the counts of
field values within a range.
This plugin was added for ability to build histograms.
Values added to a bucket are also added to the larger buckets in the
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
#### Description
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
Bucket counts however are not reset between periods and will be non-strictly
increasing while Telegraf is running.
The histogram aggregator plugin aggregates values of specified metric's
fields. The metric is emitted every `period` seconds. All you need to do
is to specify borders of histogram buckets and fields, for which you want
to aggregate histogram.
#### Design
#### How it works
The each metric is passed to the aggregator and this aggregator searches
Each metric is passed to the aggregator and this aggregator searches
histogram buckets for those fields, which have been specified in the
config. If buckets are found, the aggregator will put +1 to appropriate
bucket. Otherwise, nothing will happen. Every `period` seconds these data
will be pushed to output.
config. If buckets are found, the aggregator will increment +1 to the appropriate
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
seconds this data will be forwarded to the outputs.
Note, that the all hits of current bucket will be also added to all next
buckets in final result of distribution. Why does it work this way? In
configuration you define right borders for each bucket in a ascending
sequence. Internally buckets are presented as ranges with borders
(0..bucketBorder]: 0..1, 0..10, 0..50, …, 0..+Inf. So the value "+1" will be
put into those buckets, in which the metric value fell with such ranges of
buckets.
This plugin creates cumulative histograms. It means, that the hits in the
buckets will always increase from the moment of telegraf start. But if you
restart telegraf, all hits in the buckets will be reset to 0.
Also, the algorithm of hit counting to buckets was implemented on the base
of the algorithm, which is implemented in the Prometheus
The algorithm of hit counting to buckets was implemented on the base
of the algorithm which is implemented in the Prometheus
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
### Configuration
@ -40,61 +27,44 @@ of the algorithm, which is implemented in the Prometheus
```toml
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The example of config to aggregate histogram for all fields of specified metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
## The name of metric.
metric_name = "cpu"
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## The example of config to aggregate histogram for concrete fields of specified metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
## The name of metric.
metric_name = "diskio"
## The concrete fields of metric.
metric_fields = ["io_time", "read_time", "write_time"]
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
```
#### Explanation
The user is responsible for defining the bounds of the histogram bucket as
well as the measurement name and fields to aggregate.
The field `metric_fields` is the list of metric fields. For example, the
metric `cpu` has the following fields: usage_user, usage_system,
usage_idle, usage_nice, usage_iowait, usage_irq, usage_softirq, usage_steal,
usage_guest, usage_guest_nice.
Each histogram config section must contain a `buckets` and `measurement_name`
option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
Note that histogram metrics will be pushed every `period` seconds.
As you know telegraf calls aggregator `Reset()` func each `period` seconds.
Histogram aggregator ignores `Reset()` and continues to count hits.
The `buckets` option contains a list of floats which specify the bucket
boundaries. Each float value defines the inclusive upper bound of the bucket.
The `+Inf` bucket is added automatically and does not need to be defined.
#### Use cases
You can specify fields using two cases:
1. The specifying only metric name. In this case all fields of metric
will be aggregated.
2. The specifying metric name and concrete field.
#### Some rules
- The setting of each histogram must be in separate section with title
`aggregators.histogram.config`.
- The each value of bucket must be float value.
- Don\`t include the border bucket `+Inf`. It will be done automatically.
### Measurements & Fields:
The postfix `bucket` will be added to each field.
The postfix `bucket` will be added to each field key.
- measurement1
- field1_bucket
@ -102,16 +72,15 @@ The postfix `bucket` will be added to each field.
### Tags:
All measurements have tag `le`. This tag has the border value of bucket. It
means that the metric value is less or equal to the value of this tag. For
example, let assume that we have the metric value 10 and the following
buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value 10,
because the metrics value is passed into bucket with right border value `10`.
All measurements are given the tag `le`. This tag has the border value of
bucket. It means that the metric value is less than or equal to the value of
this tag. For example, let assume that we have the metric value 10 and the
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
10, because the metrics value is passed into bucket with right border value
`10`.
### Example Output:
The following output will return to the Prometheus client.
```
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000

View File

@ -24,8 +24,8 @@ type HistogramAggregator struct {
// config is the config, which contains name, field of metric and histogram buckets.
type config struct {
Metric string `toml:"metric_name"`
Fields []string `toml:"metric_fields"`
Metric string `toml:"measurement_name"`
Fields []string `toml:"fields"`
Buckets buckets `toml:"buckets"`
}
@ -65,28 +65,28 @@ func NewHistogramAggregator() telegraf.Aggregator {
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The example of config to aggregate histogram for all fields of specified metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
## The name of metric.
metric_name = "cpu"
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## The example of config to aggregate for specified fields of metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
## The name of metric.
metric_name = "diskio"
## The concrete fields of metric
metric_fields = ["io_time", "read_time", "write_time"]
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
`
// SampleConfig returns sample of config
@ -96,7 +96,7 @@ func (h *HistogramAggregator) SampleConfig() string {
// Description returns description of aggregator plugin
func (h *HistogramAggregator) Description() string {
return "Keep the aggregate histogram of each metric passing through."
return "Create aggregate histograms."
}
// Add adds new hit to the buckets

View File

@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}

View File

@ -26,7 +26,7 @@ func TestParseSockId(t *testing.T) {
func TestParseMonDump(t *testing.T) {
dump, err := parseDump(monPerfDump)
assert.NoError(t, err)
assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
}

View File

@ -225,7 +225,7 @@ var fileFormats = [...]fileFormat{
}
func numberOrString(s string) interface{} {
i, err := strconv.Atoi(s)
i, err := strconv.ParseInt(s, 10, 64)
if err == nil {
return i
}

View File

@ -31,17 +31,17 @@ func TestCgroupStatistics_1(t *testing.T) {
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.stat.cache": 1739362304123123123,
"memory.stat.rss": 1775325184,
"memory.stat.rss_huge": 778043392,
"memory.stat.mapped_file": 421036032,
"memory.stat.dirty": -307200,
"memory.max_usage_in_bytes.0": 0,
"memory.max_usage_in_bytes.1": -1,
"memory.max_usage_in_bytes.2": 2,
"memory.limit_in_bytes": 223372036854771712,
"memory.stat.cache": int64(1739362304123123123),
"memory.stat.rss": int64(1775325184),
"memory.stat.rss_huge": int64(778043392),
"memory.stat.mapped_file": int64(421036032),
"memory.stat.dirty": int64(-307200),
"memory.max_usage_in_bytes.0": int64(0),
"memory.max_usage_in_bytes.1": int64(-1),
"memory.max_usage_in_bytes.2": int64(2),
"memory.limit_in_bytes": int64(223372036854771712),
"memory.use_hierarchy": "12-781",
"notify_on_release": 0,
"notify_on_release": int64(0),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@ -63,10 +63,10 @@ func TestCgroupStatistics_2(t *testing.T) {
"path": "testdata/cpu",
}
fields := map[string]interface{}{
"cpuacct.usage_percpu.0": -1452543795404,
"cpuacct.usage_percpu.1": 1376681271659,
"cpuacct.usage_percpu.2": 1450950799997,
"cpuacct.usage_percpu.3": -1473113374257,
"cpuacct.usage_percpu.0": int64(-1452543795404),
"cpuacct.usage_percpu.1": int64(1376681271659),
"cpuacct.usage_percpu.2": int64(1450950799997),
"cpuacct.usage_percpu.3": int64(-1473113374257),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@ -88,7 +88,7 @@ func TestCgroupStatistics_3(t *testing.T) {
"path": "testdata/memory/group_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@ -115,7 +115,7 @@ func TestCgroupStatistics_4(t *testing.T) {
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@ -147,7 +147,7 @@ func TestCgroupStatistics_5(t *testing.T) {
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@ -174,9 +174,9 @@ func TestCgroupStatistics_6(t *testing.T) {
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.usage_in_bytes": 3513667584,
"memory.usage_in_bytes": int64(3513667584),
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": 9223372036854771712,
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}

View File

@ -1,5 +1,3 @@
// +build linux
package chrony
import (

View File

@ -1,3 +0,0 @@
// +build !linux
package chrony

View File

@ -1,5 +1,3 @@
// +build linux
package chrony
import (

View File

@ -69,6 +69,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
config.Datacenter = c.Datacentre
}
if c.Token != "" {
config.Token = c.Token
}
if c.Username != "" {
config.HttpAuth = &api.HttpBasicAuth{
Username: c.Username,

View File

@ -20,7 +20,7 @@ var sampleChecks = []*api.HealthCheck{
},
}
func TestGatherHealtCheck(t *testing.T) {
func TestGatherHealthCheck(t *testing.T) {
expectedFields := map[string]interface{}{
"check_name": "foo.health",
"status": "passing",

View File

@ -16,21 +16,21 @@ const metricName = "dmcache"
type cacheStatus struct {
device string
length int
length int64
target string
metadataBlocksize int
metadataUsed int
metadataTotal int
cacheBlocksize int
cacheUsed int
cacheTotal int
readHits int
readMisses int
writeHits int
writeMisses int
demotions int
promotions int
dirty int
metadataBlocksize int64
metadataUsed int64
metadataTotal int64
cacheBlocksize int64
cacheUsed int64
cacheTotal int64
readHits int64
readMisses int64
writeHits int64
writeMisses int64
demotions int64
promotions int64
dirty int64
}
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
@ -69,12 +69,12 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
}
status.device = strings.TrimRight(values[0], ":")
status.length, err = strconv.Atoi(values[2])
status.length, err = strconv.ParseInt(values[2], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.target = values[3]
status.metadataBlocksize, err = strconv.Atoi(values[4])
status.metadataBlocksize, err = strconv.ParseInt(values[4], 10, 64)
if err != nil {
return cacheStatus{}, err
}
@ -82,15 +82,15 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
if len(metadata) != 2 {
return cacheStatus{}, parseError
}
status.metadataUsed, err = strconv.Atoi(metadata[0])
status.metadataUsed, err = strconv.ParseInt(metadata[0], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.metadataTotal, err = strconv.Atoi(metadata[1])
status.metadataTotal, err = strconv.ParseInt(metadata[1], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.cacheBlocksize, err = strconv.Atoi(values[6])
status.cacheBlocksize, err = strconv.ParseInt(values[6], 10, 64)
if err != nil {
return cacheStatus{}, err
}
@ -98,39 +98,39 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
if len(cache) != 2 {
return cacheStatus{}, parseError
}
status.cacheUsed, err = strconv.Atoi(cache[0])
status.cacheUsed, err = strconv.ParseInt(cache[0], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.cacheTotal, err = strconv.Atoi(cache[1])
status.cacheTotal, err = strconv.ParseInt(cache[1], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.readHits, err = strconv.Atoi(values[8])
status.readHits, err = strconv.ParseInt(values[8], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.readMisses, err = strconv.Atoi(values[9])
status.readMisses, err = strconv.ParseInt(values[9], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.writeHits, err = strconv.Atoi(values[10])
status.writeHits, err = strconv.ParseInt(values[10], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.writeMisses, err = strconv.Atoi(values[11])
status.writeMisses, err = strconv.ParseInt(values[11], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.demotions, err = strconv.Atoi(values[12])
status.demotions, err = strconv.ParseInt(values[12], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.promotions, err = strconv.Atoi(values[13])
status.promotions, err = strconv.ParseInt(values[13], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.dirty, err = strconv.Atoi(values[14])
status.dirty, err = strconv.ParseInt(values[14], 10, 64)
if err != nil {
return cacheStatus{}, err
}

View File

@ -1,3 +1,5 @@
// +build linux
package dmcache
import (
@ -33,20 +35,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
"device": "cs-1",
}
fields1 := map[string]interface{}{
"length": 4883791872,
"metadata_blocksize": 8,
"metadata_used": 1018,
"metadata_total": 1501122,
"cache_blocksize": 512,
"cache_used": 7,
"cache_total": 464962,
"read_hits": 139,
"read_misses": 352643,
"write_hits": 15,
"write_misses": 46,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(4883791872),
"metadata_blocksize": int64(8),
"metadata_used": int64(1018),
"metadata_total": int64(1501122),
"cache_blocksize": int64(512),
"cache_used": int64(7),
"cache_total": int64(464962),
"read_hits": int64(139),
"read_misses": int64(352643),
"write_hits": int64(15),
"write_misses": int64(46),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
@ -54,20 +56,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
"device": "cs-2",
}
fields2 := map[string]interface{}{
"length": 4294967296,
"metadata_blocksize": 8,
"metadata_used": 72352,
"metadata_total": 1310720,
"cache_blocksize": 128,
"cache_used": 26,
"cache_total": 24327168,
"read_hits": 2409,
"read_misses": 286,
"write_hits": 265,
"write_misses": 524682,
"demotions": 0,
"promotions": 0,
"dirty": 0,
"length": int64(4294967296),
"metadata_blocksize": int64(8),
"metadata_used": int64(72352),
"metadata_total": int64(1310720),
"cache_blocksize": int64(128),
"cache_used": int64(26),
"cache_total": int64(24327168),
"read_hits": int64(2409),
"read_misses": int64(286),
"write_hits": int64(265),
"write_misses": int64(524682),
"demotions": int64(0),
"promotions": int64(0),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
@ -76,20 +78,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
}
fields3 := map[string]interface{}{
"length": 9178759168,
"metadata_blocksize": 16,
"metadata_used": 73370,
"metadata_total": 2811842,
"cache_blocksize": 640,
"cache_used": 33,
"cache_total": 24792130,
"read_hits": 2548,
"read_misses": 352929,
"write_hits": 280,
"write_misses": 524728,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(9178759168),
"metadata_blocksize": int64(16),
"metadata_used": int64(73370),
"metadata_total": int64(2811842),
"cache_blocksize": int64(640),
"cache_used": int64(33),
"cache_total": int64(24792130),
"read_hits": int64(2548),
"read_misses": int64(352929),
"write_hits": int64(280),
"write_misses": int64(524728),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
}
@ -111,20 +113,20 @@ func TestNotPerDeviceGoodOutput(t *testing.T) {
}
fields := map[string]interface{}{
"length": 9178759168,
"metadata_blocksize": 16,
"metadata_used": 73370,
"metadata_total": 2811842,
"cache_blocksize": 640,
"cache_used": 33,
"cache_total": 24792130,
"read_hits": 2548,
"read_misses": 352929,
"write_hits": 280,
"write_misses": 524728,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(9178759168),
"metadata_blocksize": int64(16),
"metadata_used": int64(73370),
"metadata_total": int64(2811842),
"cache_blocksize": int64(640),
"cache_used": int64(33),
"cache_total": int64(24792130),
"read_hits": int64(2548),
"read_misses": int64(352929),
"write_hits": int64(280),
"write_misses": int64(524728),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
}

View File

@ -20,16 +20,6 @@ import (
"github.com/influxdata/telegraf/plugins/inputs"
)
type DockerLabelFilter struct {
labelInclude filter.Filter
labelExclude filter.Filter
}
type DockerContainerFilter struct {
containerInclude filter.Filter
containerExclude filter.Filter
}
// Docker object
type Docker struct {
Endpoint string
@ -41,11 +31,9 @@ type Docker struct {
TagEnvironment []string `toml:"tag_env"`
LabelInclude []string `toml:"docker_label_include"`
LabelExclude []string `toml:"docker_label_exclude"`
LabelFilter DockerLabelFilter
ContainerInclude []string `toml:"container_name_include"`
ContainerExclude []string `toml:"container_name_exclude"`
ContainerFilter DockerContainerFilter
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
@ -55,10 +43,12 @@ type Docker struct {
newEnvClient func() (Client, error)
newClient func(string, *tls.Config) (Client, error)
client Client
httpClient *http.Client
engine_host string
filtersCreated bool
client Client
httpClient *http.Client
engine_host string
filtersCreated bool
labelFilter filter.Filter
containerFilter filter.Filter
}
// KB, MB, GB, TB, PB...human friendly
@ -291,12 +281,8 @@ func (d *Docker) gatherContainer(
"container_version": imageVersion,
}
if len(d.ContainerInclude) > 0 || len(d.ContainerExclude) > 0 {
if len(d.ContainerInclude) == 0 || !d.ContainerFilter.containerInclude.Match(cname) {
if len(d.ContainerExclude) == 0 || d.ContainerFilter.containerExclude.Match(cname) {
return nil
}
}
if !d.containerFilter.Match(cname) {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
@ -317,10 +303,8 @@ func (d *Docker) gatherContainer(
// Add labels to tags
for k, label := range container.Labels {
if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) {
if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) {
tags[k] = label
}
if d.labelFilter.Match(k) {
tags[k] = label
}
}
@ -355,7 +339,11 @@ func gatherContainerStats(
total bool,
daemonOSType string,
) {
now := stat.Read
tm := stat.Read
if tm.Before(time.Unix(0, 0)) {
tm = time.Now()
}
memfields := map[string]interface{}{
"container_id": id,
@ -415,7 +403,7 @@ func gatherContainerStats(
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
}
acc.AddFields("docker_container_mem", memfields, tags, now)
acc.AddFields("docker_container_mem", memfields, tags, tm)
cpufields := map[string]interface{}{
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
@ -440,7 +428,7 @@ func gatherContainerStats(
cputags := copyTags(tags)
cputags["cpu"] = "cpu-total"
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
acc.AddFields("docker_container_cpu", cpufields, cputags, tm)
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
@ -458,7 +446,7 @@ func gatherContainerStats(
"usage_total": percpu,
"container_id": id,
}
acc.AddFields("docker_container_cpu", fields, percputags, now)
acc.AddFields("docker_container_cpu", fields, percputags, tm)
}
totalNetworkStatMap := make(map[string]interface{})
@ -478,7 +466,7 @@ func gatherContainerStats(
if perDevice {
nettags := copyTags(tags)
nettags["network"] = network
acc.AddFields("docker_container_net", netfields, nettags, now)
acc.AddFields("docker_container_net", netfields, nettags, tm)
}
if total {
for field, value := range netfields {
@ -511,17 +499,17 @@ func gatherContainerStats(
nettags := copyTags(tags)
nettags["network"] = "total"
totalNetworkStatMap["container_id"] = id
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, now)
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm)
}
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total)
}
func gatherBlockIOMetrics(
stat *types.StatsJSON,
acc telegraf.Accumulator,
tags map[string]string,
now time.Time,
tm time.Time,
id string,
perDevice bool,
total bool,
@ -592,7 +580,7 @@ func gatherBlockIOMetrics(
if perDevice {
iotags := copyTags(tags)
iotags["device"] = device
acc.AddFields("docker_container_blkio", fields, iotags, now)
acc.AddFields("docker_container_blkio", fields, iotags, tm)
}
if total {
for field, value := range fields {
@ -623,7 +611,7 @@ func gatherBlockIOMetrics(
totalStatMap["container_id"] = id
iotags := copyTags(tags)
iotags["device"] = "total"
acc.AddFields("docker_container_blkio", totalStatMap, iotags, now)
acc.AddFields("docker_container_blkio", totalStatMap, iotags, tm)
}
}
@ -666,46 +654,25 @@ func parseSize(sizeStr string) (int64, error) {
}
func (d *Docker) createContainerFilters() error {
// Backwards compatibility for deprecated `container_names` parameter.
if len(d.ContainerNames) > 0 {
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
}
if len(d.ContainerInclude) != 0 {
var err error
d.ContainerFilter.containerInclude, err = filter.Compile(d.ContainerInclude)
if err != nil {
return err
}
filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude)
if err != nil {
return err
}
if len(d.ContainerExclude) != 0 {
var err error
d.ContainerFilter.containerExclude, err = filter.Compile(d.ContainerExclude)
if err != nil {
return err
}
}
d.containerFilter = filter
return nil
}
func (d *Docker) createLabelFilters() error {
if len(d.LabelInclude) != 0 {
var err error
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
if err != nil {
return err
}
filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude)
if err != nil {
return err
}
if len(d.LabelExclude) != 0 {
var err error
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
if err != nil {
return err
}
}
d.labelFilter = filter
return nil
}

View File

@ -44,21 +44,23 @@ func (c *MockClient) ContainerInspect(
return c.ContainerInspectF(ctx, containerID)
}
var baseClient = MockClient{
InfoF: func(context.Context) (types.Info, error) {
return info, nil
},
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
return containerList, nil
},
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
return containerStats(), nil
},
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
return containerInspect, nil
},
}
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
return &MockClient{
InfoF: func(context.Context) (types.Info, error) {
return info, nil
},
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
return containerList, nil
},
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
return containerStats(), nil
},
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
return containerInspect, nil
},
}, nil
return &baseClient, nil
}
func TestDockerGatherContainerStats(t *testing.T) {
@ -234,82 +236,291 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
require.NoError(t, err)
}
func TestDockerGatherLabels(t *testing.T) {
var gatherLabelsTests = []struct {
include []string
exclude []string
expected []string
notexpected []string
func TestContainerLabels(t *testing.T) {
var tests = []struct {
name string
container types.Container
include []string
exclude []string
expected map[string]string
}{
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
{
name: "Nil filters matches all",
container: types.Container{
Labels: map[string]string{
"a": "x",
},
},
include: nil,
exclude: nil,
expected: map[string]string{
"a": "x",
},
},
{
name: "Empty filters matches all",
container: types.Container{
Labels: map[string]string{
"a": "x",
},
},
include: []string{},
exclude: []string{},
expected: map[string]string{
"a": "x",
},
},
{
name: "Must match include",
container: types.Container{
Labels: map[string]string{
"a": "x",
"b": "y",
},
},
include: []string{"a"},
exclude: []string{},
expected: map[string]string{
"a": "x",
},
},
{
name: "Must not match exclude",
container: types.Container{
Labels: map[string]string{
"a": "x",
"b": "y",
},
},
include: []string{},
exclude: []string{"b"},
expected: map[string]string{
"a": "x",
},
},
{
name: "Include Glob",
container: types.Container{
Labels: map[string]string{
"aa": "x",
"ab": "y",
"bb": "z",
},
},
include: []string{"a*"},
exclude: []string{},
expected: map[string]string{
"aa": "x",
"ab": "y",
},
},
{
name: "Exclude Glob",
container: types.Container{
Labels: map[string]string{
"aa": "x",
"ab": "y",
"bb": "z",
},
},
include: []string{},
exclude: []string{"a*"},
expected: map[string]string{
"bb": "z",
},
},
{
name: "Excluded Includes",
container: types.Container{
Labels: map[string]string{
"aa": "x",
"ab": "y",
"bb": "z",
},
},
include: []string{"a*"},
exclude: []string{"*b"},
expected: map[string]string{
"aa": "x",
},
},
}
for _, tt := range gatherLabelsTests {
t.Run("", func(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
newClient: newClient,
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
client := baseClient
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
return []types.Container{tt.container}, nil
}
return &client, nil
}
for _, label := range tt.include {
d.LabelInclude = append(d.LabelInclude, label)
}
for _, label := range tt.exclude {
d.LabelExclude = append(d.LabelExclude, label)
d := Docker{
newClient: newClientFunc,
LabelInclude: tt.include,
LabelExclude: tt.exclude,
}
err := d.Gather(&acc)
require.NoError(t, err)
for _, label := range tt.expected {
if !acc.HasTag("docker_container_cpu", label) {
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
// Grab tags from a container metric
var actual map[string]string
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
actual = metric.Tags
}
}
for _, label := range tt.notexpected {
if acc.HasTag("docker_container_cpu", label) {
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
}
for k, v := range tt.expected {
require.Equal(t, v, actual[k])
}
})
}
}
func TestContainerNames(t *testing.T) {
var gatherContainerNames = []struct {
include []string
exclude []string
expected []string
notexpected []string
var tests = []struct {
name string
containers [][]string
include []string
exclude []string
expected []string
}{
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
{
name: "Nil filters matches all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: nil,
exclude: nil,
expected: []string{"etcd", "etcd2"},
},
{
name: "Empty filters matches all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{},
exclude: []string{},
expected: []string{"etcd", "etcd2"},
},
{
name: "Match all containers",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"*"},
exclude: []string{},
expected: []string{"etcd", "etcd2"},
},
{
name: "Include prefix match",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"etc*"},
exclude: []string{},
expected: []string{"etcd", "etcd2"},
},
{
name: "Exact match",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"etcd"},
exclude: []string{},
expected: []string{"etcd"},
},
{
name: "Star matches zero length",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"etcd2*"},
exclude: []string{},
expected: []string{"etcd2"},
},
{
name: "Exclude matches all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{},
exclude: []string{"etc*"},
expected: []string{},
},
{
name: "Exclude single",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{},
exclude: []string{"etcd"},
expected: []string{"etcd2"},
},
{
name: "Exclude all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"*"},
exclude: []string{"*"},
expected: []string{},
},
{
name: "Exclude item matching include",
containers: [][]string{
{"acme"},
{"foo"},
{"acme-test"},
},
include: []string{"acme*"},
exclude: []string{"*test*"},
expected: []string{"acme"},
},
{
name: "Exclude item no wildcards",
containers: [][]string{
{"acme"},
{"acme-test"},
},
include: []string{"acme*"},
exclude: []string{"test"},
expected: []string{"acme", "acme-test"},
},
}
for _, tt := range gatherContainerNames {
t.Run("", func(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
client := baseClient
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
var containers []types.Container
for _, names := range tt.containers {
containers = append(containers, types.Container{
Names: names,
})
}
return containers, nil
}
return &client, nil
}
d := Docker{
newClient: newClient,
newClient: newClientFunc,
ContainerInclude: tt.include,
ContainerExclude: tt.exclude,
}
@ -317,39 +528,21 @@ func TestContainerNames(t *testing.T) {
err := d.Gather(&acc)
require.NoError(t, err)
// Set of expected names
var expected = make(map[string]bool)
for _, v := range tt.expected {
expected[v] = true
}
// Set of actual names
var actual = make(map[string]bool)
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
if val, ok := metric.Tags["container_name"]; ok {
var found bool = false
for _, cname := range tt.expected {
if val == cname {
found = true
break
}
}
if !found {
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
}
}
if name, ok := metric.Tags["container_name"]; ok {
actual[name] = true
}
}
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
if val, ok := metric.Tags["container_name"]; ok {
var found bool = false
for _, cname := range tt.notexpected {
if val == cname {
found = true
break
}
}
if found {
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
}
}
}
}
require.Equal(t, expected, actual)
})
}
}

View File

@ -1,19 +1,19 @@
# Fail2ban Plugin
# Fail2ban Input Plugin
The fail2ban plugin gathers counts of failed and banned ip addresses from fail2ban.
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
This plugin run fail2ban-client command, and fail2ban-client require root access.
You have to grant telegraf to run fail2ban-client:
This plugin runs the `fail2ban-client` command which generally requires root access.
Acquiring the required permissions can be done using several methods:
- Run telegraf as root. (deprecate)
- Configure sudo to grant telegraf to fail2ban-client.
- Use sudo run fail2ban-client.
- Run telegraf as root. (not recommended)
### Using sudo
You may edit your sudo configuration with the following:
``` sudo
telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
```
### Configuration:
@ -21,10 +21,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
``` toml
# Read metrics from fail2ban.
[[inputs.fail2ban]]
## fail2ban-client require root access.
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
## This plugin run only "fail2ban-client status".
## Use sudo to run fail2ban-client
use_sudo = false
```
@ -38,7 +35,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
- All measurements have the following tags:
- jail
### Example Output:
```
@ -55,6 +52,5 @@ Status for the jail: sshd
```
```
$ ./telegraf --config telegraf.conf --input-filter fail2ban --test
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
```

View File

@ -1,5 +1,3 @@
// +build linux
package fail2ban
import (
@ -8,9 +6,10 @@ import (
"os/exec"
"strings"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"strconv"
)
var (
@ -23,10 +22,7 @@ type Fail2ban struct {
}
var sampleConfig = `
## fail2ban-client require root access.
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
## This plugin run only "fail2ban-client status".
## Use sudo to run fail2ban-client
use_sudo = false
`

View File

@ -1,3 +0,0 @@
// +build !linux
package fail2ban

View File

@ -148,15 +148,15 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
}
if p.BufferQueueLength != nil {
tmpFields["buffer_queue_length"] = p.BufferQueueLength
tmpFields["buffer_queue_length"] = *p.BufferQueueLength
}
if p.RetryCount != nil {
tmpFields["retry_count"] = p.RetryCount
tmpFields["retry_count"] = *p.RetryCount
}
if p.BufferTotalQueuedSize != nil {
tmpFields["buffer_total_queued_size"] = p.BufferTotalQueuedSize
tmpFields["buffer_total_queued_size"] = *p.BufferTotalQueuedSize
}
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {

View File

@ -122,12 +122,6 @@ func Test_parse(t *testing.T) {
}
func Test_Gather(t *testing.T) {
if testing.Short() {
t.Skip("Skipping Gather function test")
}
t.Log("Testing Gather function")
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -157,13 +151,13 @@ func Test_Gather(t *testing.T) {
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
assert.Equal(t, expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
assert.Equal(t, expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
assert.Equal(t, expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
assert.Equal(t, expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
}

View File

@ -1,5 +1,3 @@
// +build linux
package hddtemp
import (

View File

@ -1,3 +0,0 @@
// +build !linux
package hddtemp

View File

@ -98,6 +98,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
}
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
TLSClientConfig: tlsCfg,
},

View File

@ -1,6 +1,7 @@
package httpjson
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
@ -15,6 +16,10 @@ import (
"github.com/influxdata/telegraf/plugins/parsers"
)
var (
utf8BOM = []byte("\xef\xbb\xbf")
)
// HttpJson struct
type HttpJson struct {
Name string
@ -170,7 +175,6 @@ func (h *HttpJson) gatherServer(
serverURL string,
) error {
resp, responseTime, err := h.sendRequest(serverURL)
if err != nil {
return err
}
@ -266,6 +270,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
if err != nil {
return string(body), responseTime, err
}
body = bytes.TrimPrefix(body, utf8BOM)
// Process response
if resp.StatusCode != http.StatusOK {

View File

@ -477,15 +477,13 @@ func TestHttpJsonBadJson(t *testing.T) {
assert.Equal(t, 0, acc.NFields())
}
// Test response to empty string as response objectgT
// Test response to empty string as response object
func TestHttpJsonEmptyResponse(t *testing.T) {
httpjson := genMockHttpJson(empty, 200)
var acc testutil.Accumulator
err := acc.GatherError(httpjson[0].Gather)
assert.Error(t, err)
assert.Equal(t, 0, acc.NFields())
assert.NoError(t, err)
}
// Test that the proper values are ignored or collected
@ -560,3 +558,18 @@ func TestHttpJsonArray200Tags(t *testing.T) {
}
}
}
var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
// TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed
func TestHttpJsonBOM(t *testing.T) {
httpjson := genMockHttpJson(string(jsonBOM), 200)
for _, service := range httpjson {
if service.Name == "other_webapp" {
var acc testutil.Accumulator
err := acc.GatherError(service.Gather)
require.NoError(t, err)
}
}
}

View File

@ -95,7 +95,7 @@ const measurement = "iptables"
var errParse = errors.New("Cannot parse iptables list information")
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
var commentRe = regexp.MustCompile(`\s*/\*\s*(.+?)\s*\*/\s*`)
var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
lines := strings.Split(data, "\n")
@ -110,21 +110,14 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
return errParse
}
for _, line := range lines[2:] {
tokens := strings.Fields(line)
if len(tokens) < 10 {
matches := valuesRe.FindStringSubmatch(line)
if len(matches) != 4 {
continue
}
pkts := tokens[0]
bytes := tokens[1]
end := strings.Join(tokens[9:], " ")
matches := commentRe.FindStringSubmatch(end)
if matches == nil {
continue
}
comment := matches[1]
pkts := matches[1]
bytes := matches[2]
comment := matches[3]
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
fields := make(map[string]interface{})

View File

@ -154,68 +154,85 @@ func TestIptables_Gather(t *testing.T) {
tags: []map[string]string{},
fields: [][]map[string]interface{}{},
},
{ // 11 - all target and ports
table: "all_recv",
chains: []string{"accountfwd"},
values: []string{
`Chain accountfwd (1 references)
pkts bytes target prot opt in out source destination
123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */
`},
tags: []map[string]string{
map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}},
},
},
}
for i, tt := range tests {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
t.Run(tt.table, func(t *testing.T) {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
continue
}
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
continue
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
}
return
}
continue
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
return
}
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
}
return
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
}
}
})
}
}

View File

@ -3,8 +3,6 @@ package leofs
import (
"bufio"
"fmt"
"log"
"net/url"
"os/exec"
"strconv"
"strings"
@ -19,7 +17,7 @@ import (
const oid = ".1.3.6.1.4.1.35450"
// For Manager Master
const defaultEndpoint = "udp://127.0.0.1:4020"
const defaultEndpoint = "127.0.0.1:4020"
type ServerType int
@ -137,8 +135,8 @@ var serverTypeMapping = map[string]ServerType{
var sampleConfig = `
## An array of URLs of the form:
## "udp://" host [ ":" port]
servers = ["udp://127.0.0.1:4020"]
## host [ ":" port]
servers = ["127.0.0.1:4020"]
`
func (l *LeoFS) SampleConfig() string {
@ -155,28 +153,22 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
return nil
}
var wg sync.WaitGroup
for i, endpoint := range l.Servers {
if !strings.HasPrefix(endpoint, "udp://") {
// Preserve backwards compatibility for hostnames without a
// scheme, broken in go 1.8. Remove in Telegraf 2.0
endpoint = "udp://" + endpoint
log.Printf("W! [inputs.mongodb] Using %q as connection URL; please update your configuration to use an URL", endpoint)
l.Servers[i] = endpoint
}
u, err := url.Parse(endpoint)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address %q: %s", endpoint, err))
continue
}
if u.Host == "" {
for _, endpoint := range l.Servers {
results := strings.Split(endpoint, ":")
port := "4020"
if len(results) > 2 {
acc.AddError(fmt.Errorf("Unable to parse address %q", endpoint))
continue
} else if len(results) == 2 {
if _, err := strconv.Atoi(results[1]); err == nil {
port = results[1]
} else {
acc.AddError(fmt.Errorf("Unable to parse port from %q", endpoint))
continue
}
}
port := u.Port()
if port == "" {
port = "4020"
}
st, ok := serverTypeMapping[port]
if !ok {
st = ServerTypeStorage
@ -196,7 +188,7 @@ func (l *LeoFS) gatherServer(
serverType ServerType,
acc telegraf.Accumulator,
) error {
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", "-On", endpoint, oid)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err

View File

@ -16,21 +16,21 @@ package main
import "fmt"
const output = ` + "`" + `iso.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1"
iso.3.6.1.4.1.35450.15.2.0 = Gauge32: 186
iso.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519
iso.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525
iso.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068
iso.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968
iso.3.6.1.4.1.35450.15.7.0 = Gauge32: 186
iso.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006
iso.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867
iso.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995
iso.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634
iso.3.6.1.4.1.35450.15.12.0 = Gauge32: 60
iso.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904
iso.3.6.1.4.1.35450.15.14.0 = Gauge32: 60
iso.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" +
const output = ` + "`" + `.1.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1"
.1.3.6.1.4.1.35450.15.2.0 = Gauge32: 186
.1.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519
.1.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525
.1.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068
.1.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968
.1.3.6.1.4.1.35450.15.7.0 = Gauge32: 186
.1.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006
.1.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867
.1.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995
.1.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634
.1.3.6.1.4.1.35450.15.12.0 = Gauge32: 60
.1.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904
.1.3.6.1.4.1.35450.15.14.0 = Gauge32: 60
.1.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" +
`
func main() {
fmt.Println(output)
@ -42,34 +42,34 @@ package main
import "fmt"
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
iso.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
.1.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
.1.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
.1.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
`
func main() {
fmt.Println(output)
@ -81,31 +81,31 @@ package main
import "fmt"
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1"
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 465
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 465
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 827
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 34
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 1
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 53
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 53
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" +
const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1"
.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 465
.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335
.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415
.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011
.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855
.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 465
.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426
.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358
.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002
.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125
.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 827
.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650
.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256
.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158
.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 34
.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 1
.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 53
.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240
.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 53
.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" +
`
func main() {
fmt.Println(output)

View File

@ -514,7 +514,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
}
if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics != nil && oldStat.Metrics.TTL != nil {
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
}

View File

@ -10,9 +10,13 @@ The plugin expects messages in the
```toml
# Read metrics from MQTT topic(s)
[[inputs.mqtt_consumer]]
servers = ["localhost:1883"]
## MQTT broker URLs to be used. The format should be scheme://host:port,
## schema can be tcp, ssl, or ws.
servers = ["tcp://localhost:1883"]
## MQTT QoS, must be 0, 1, or 2
qos = 0
## Connection timeout for initial connection in seconds
connection_timeout = "30s"
## Topics to subscribe to
topics = [

View File

@ -15,12 +15,16 @@ import (
"github.com/eclipse/paho.mqtt.golang"
)
// 30 Seconds is the default used by paho.mqtt.golang
var defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second}
type MQTTConsumer struct {
Servers []string
Topics []string
Username string
Password string
QoS int `toml:"qos"`
Servers []string
Topics []string
Username string
Password string
QoS int `toml:"qos"`
ConnectionTimeout internal.Duration `toml:"connection_timeout"`
parser parsers.Parser
@ -48,13 +52,18 @@ type MQTTConsumer struct {
// keep the accumulator internally:
acc telegraf.Accumulator
started bool
connected bool
}
var sampleConfig = `
servers = ["localhost:1883"]
## MQTT broker URLs to be used. The format should be scheme://host:port,
## schema can be tcp, ssl, or ws.
servers = ["tcp://localhost:1883"]
## MQTT QoS, must be 0, 1, or 2
qos = 0
## Connection timeout for initial connection in seconds
connection_timeout = "30s"
## Topics to subscribe to
topics = [
@ -103,7 +112,7 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) {
func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
m.Lock()
defer m.Unlock()
m.started = false
m.connected = false
if m.PersistentSession && m.ClientID == "" {
return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" +
@ -115,26 +124,40 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS)
}
if m.ConnectionTimeout.Duration < 1*time.Second {
return fmt.Errorf("MQTT Consumer, invalid connection_timeout value: %s", m.ConnectionTimeout.Duration)
}
opts, err := m.createOpts()
if err != nil {
return err
}
m.client = mqtt.NewClient(opts)
if token := m.client.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
m.in = make(chan mqtt.Message, 1000)
m.done = make(chan struct{})
m.connect()
return nil
}
func (m *MQTTConsumer) connect() error {
if token := m.client.Connect(); token.Wait() && token.Error() != nil {
err := token.Error()
log.Printf("D! MQTT Consumer, connection error - %v", err)
return err
}
go m.receiver()
return nil
}
func (m *MQTTConsumer) onConnect(c mqtt.Client) {
log.Printf("I! MQTT Client Connected")
if !m.PersistentSession || !m.started {
if !m.PersistentSession || !m.connected {
topics := make(map[string]byte)
for _, topic := range m.Topics {
topics[topic] = byte(m.QoS)
@ -145,7 +168,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
strings.Join(m.Topics[:], ","), subscribeToken.Error()))
}
m.started = true
m.connected = true
}
return
}
@ -186,18 +209,27 @@ func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) {
func (m *MQTTConsumer) Stop() {
m.Lock()
defer m.Unlock()
close(m.done)
m.client.Disconnect(200)
m.started = false
if m.connected {
close(m.done)
m.client.Disconnect(200)
m.connected = false
}
}
func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
if !m.connected {
m.connect()
}
return nil
}
func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
opts := mqtt.NewClientOptions()
opts.ConnectTimeout = m.ConnectionTimeout.Duration
if m.ClientID == "" {
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
} else {
@ -210,9 +242,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
return nil, err
}
scheme := "tcp"
if tlsCfg != nil {
scheme = "ssl"
opts.SetTLSConfig(tlsCfg)
}
@ -228,8 +258,17 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
if len(m.Servers) == 0 {
return opts, fmt.Errorf("could not get host infomations")
}
for _, host := range m.Servers {
server := fmt.Sprintf("%s://%s", scheme, host)
for _, server := range m.Servers {
// Preserve support for host:port style servers; deprecated in Telegraf 1.4.4
if !strings.Contains(server, "://") {
log.Printf("W! mqtt_consumer server %q should be updated to use `scheme://host:port` format", server)
if tlsCfg == nil {
server = "tcp://" + server
} else {
server = "ssl://" + server
}
}
opts.AddBroker(server)
}
@ -238,11 +277,14 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
opts.SetCleanSession(!m.PersistentSession)
opts.SetOnConnectHandler(m.onConnect)
opts.SetConnectionLostHandler(m.onConnectionLost)
return opts, nil
}
func init() {
inputs.Add("mqtt_consumer", func() telegraf.Input {
return &MQTTConsumer{}
return &MQTTConsumer{
ConnectionTimeout: defaultConnectionTimeout,
}
})
}

View File

@ -22,11 +22,13 @@ const (
func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) {
in := make(chan mqtt.Message, 100)
n := &MQTTConsumer{
Topics: []string{"telegraf"},
Servers: []string{"localhost:1883"},
in: in,
done: make(chan struct{}),
Topics: []string{"telegraf"},
Servers: []string{"localhost:1883"},
in: in,
done: make(chan struct{}),
connected: true,
}
return n, in
}
@ -131,6 +133,7 @@ func TestRunParserAndGather(t *testing.T) {
n, in := newTestMQTTConsumer()
acc := testutil.Accumulator{}
n.acc = &acc
defer close(n.done)
n.parser, _ = parsers.NewInfluxParser()

View File

@ -588,17 +588,12 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
// Global Variables may be gathered less often
if len(m.IntervalSlow) > 0 {
if uint32(time.Since(lastT).Seconds()) > scanIntervalSlow {
if uint32(time.Since(lastT).Seconds()) >= scanIntervalSlow {
err = m.gatherGlobalVariables(db, serv, acc)
if err != nil {
return err
}
lastT = time.Now()
} else {
err = m.gatherGlobalVariables(db, serv, acc)
if err != nil {
return err
}
}
}

View File

@ -25,6 +25,7 @@ package nsq
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
@ -101,28 +102,42 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error {
return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status)
}
s := &NSQStats{}
err = json.NewDecoder(r.Body).Decode(s)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return fmt.Errorf(`Error reading body: %s`, err)
}
data := &NSQStatsData{}
err = json.Unmarshal(body, data)
if err != nil {
return fmt.Errorf(`Error parsing response: %s`, err)
}
// Data was not parsed correctly attempt to use old format.
if len(data.Version) < 1 {
wrapper := &NSQStats{}
err = json.Unmarshal(body, wrapper)
if err != nil {
return fmt.Errorf(`Error parsing response: %s`, err)
}
data = &wrapper.Data
}
tags := map[string]string{
`server_host`: u.Host,
`server_version`: s.Data.Version,
`server_version`: data.Version,
}
fields := make(map[string]interface{})
if s.Data.Health == `OK` {
if data.Health == `OK` {
fields["server_count"] = int64(1)
} else {
fields["server_count"] = int64(0)
}
fields["topic_count"] = int64(len(s.Data.Topics))
fields["topic_count"] = int64(len(data.Topics))
acc.AddFields("nsq_server", fields, tags)
for _, t := range s.Data.Topics {
topicStats(t, acc, u.Host, s.Data.Version)
for _, t := range data.Topics {
topicStats(t, acc, u.Host, data.Version)
}
return nil
@ -189,7 +204,6 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
"server_version": version,
"topic": topic,
"channel": channel,
"client_name": c.Name,
"client_id": c.ID,
"client_hostname": c.Hostname,
"client_version": c.Version,
@ -199,6 +213,9 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
"client_snappy": strconv.FormatBool(c.Snappy),
"client_deflate": strconv.FormatBool(c.Deflate),
}
if len(c.Name) > 0 {
tags["client_name"] = c.Name
}
fields := map[string]interface{}{
"ready_count": c.ReadyCount,
@ -248,7 +265,7 @@ type ChannelStats struct {
}
type ClientStats struct {
Name string `json:"name"`
Name string `json:"name"` // DEPRECATED 1.x+, still here as the structs are currently being shared for parsing v3.x and 1.x
ID string `json:"client_id"`
Hostname string `json:"hostname"`
Version string `json:"version"`

View File

@ -12,10 +12,267 @@ import (
"github.com/stretchr/testify/require"
)
func TestNSQStats(t *testing.T) {
func TestNSQStatsV1(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, response)
fmt.Fprintln(w, responseV1)
}))
defer ts.Close()
n := &NSQ{
Endpoints: []string{ts.URL},
}
var acc testutil.Accumulator
err := acc.GatherError(n.Gather)
require.NoError(t, err)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
host := u.Host
// actually validate the tests
tests := []struct {
m string
f map[string]interface{}
g map[string]string
}{
{
"nsq_server",
map[string]interface{}{
"server_count": int64(1),
"topic_count": int64(2),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
},
},
{
"nsq_topic",
map[string]interface{}{
"depth": int64(12),
"backend_depth": int64(13),
"message_count": int64(14),
"channel_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t1"},
},
{
"nsq_channel",
map[string]interface{}{
"depth": int64(0),
"backend_depth": int64(1),
"inflight_count": int64(2),
"deferred_count": int64(3),
"message_count": int64(4),
"requeue_count": int64(5),
"timeout_count": int64(6),
"client_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t1",
"channel": "c1",
},
},
{
"nsq_client",
map[string]interface{}{
"ready_count": int64(200),
"inflight_count": int64(7),
"message_count": int64(8),
"finish_count": int64(9),
"requeue_count": int64(10),
},
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
"topic": "t1", "channel": "c1",
"client_id": "373a715cd990", "client_hostname": "373a715cd990",
"client_version": "V2", "client_address": "172.17.0.11:35560",
"client_tls": "false", "client_snappy": "false",
"client_deflate": "false",
"client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"},
},
{
"nsq_topic",
map[string]interface{}{
"depth": int64(28),
"backend_depth": int64(29),
"message_count": int64(30),
"channel_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t2"},
},
{
"nsq_channel",
map[string]interface{}{
"depth": int64(15),
"backend_depth": int64(16),
"inflight_count": int64(17),
"deferred_count": int64(18),
"message_count": int64(19),
"requeue_count": int64(20),
"timeout_count": int64(21),
"client_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t2",
"channel": "c2",
},
},
{
"nsq_client",
map[string]interface{}{
"ready_count": int64(22),
"inflight_count": int64(23),
"message_count": int64(24),
"finish_count": int64(25),
"requeue_count": int64(26),
},
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
"topic": "t2", "channel": "c2",
"client_id": "377569bd462b", "client_hostname": "377569bd462b",
"client_version": "V2", "client_address": "172.17.0.8:48145",
"client_user_agent": "go-nsq/1.0.5", "client_tls": "true",
"client_snappy": "true", "client_deflate": "true"},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, test.m, test.f, test.g)
}
}
// v1 version of localhost/stats?format=json reesponse body
var responseV1 = `
{
"version": "1.0.0-compat",
"health": "OK",
"start_time": 1452021674,
"topics": [
{
"topic_name": "t1",
"channels": [
{
"channel_name": "c1",
"depth": 0,
"backend_depth": 1,
"in_flight_count": 2,
"deferred_count": 3,
"message_count": 4,
"requeue_count": 5,
"timeout_count": 6,
"clients": [
{
"client_id": "373a715cd990",
"hostname": "373a715cd990",
"version": "V2",
"remote_address": "172.17.0.11:35560",
"state": 3,
"ready_count": 200,
"in_flight_count": 7,
"message_count": 8,
"finish_count": 9,
"requeue_count": 10,
"connect_ts": 1452021675,
"sample_rate": 11,
"deflate": false,
"snappy": false,
"user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5",
"tls": false,
"tls_cipher_suite": "",
"tls_version": "",
"tls_negotiated_protocol": "",
"tls_negotiated_protocol_is_mutual": false
}
],
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
}
],
"depth": 12,
"backend_depth": 13,
"message_count": 14,
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
},
{
"topic_name": "t2",
"channels": [
{
"channel_name": "c2",
"depth": 15,
"backend_depth": 16,
"in_flight_count": 17,
"deferred_count": 18,
"message_count": 19,
"requeue_count": 20,
"timeout_count": 21,
"clients": [
{
"client_id": "377569bd462b",
"hostname": "377569bd462b",
"version": "V2",
"remote_address": "172.17.0.8:48145",
"state": 3,
"ready_count": 22,
"in_flight_count": 23,
"message_count": 24,
"finish_count": 25,
"requeue_count": 26,
"connect_ts": 1452021678,
"sample_rate": 27,
"deflate": true,
"snappy": true,
"user_agent": "go-nsq\/1.0.5",
"tls": true,
"tls_cipher_suite": "",
"tls_version": "",
"tls_negotiated_protocol": "",
"tls_negotiated_protocol_is_mutual": false
}
],
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
}
],
"depth": 28,
"backend_depth": 29,
"message_count": 30,
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
}
]
}
`
// TestNSQStatsPreV1 is for backwards compatibility with nsq versions < 1.0
func TestNSQStatsPreV1(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, responsePreV1)
}))
defer ts.Close()
@ -152,7 +409,7 @@ func TestNSQStats(t *testing.T) {
}
}
var response = `
var responsePreV1 = `
{
"status_code": 200,
"status_txt": "OK",

View File

@ -69,7 +69,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
// Due to problems with a parsing, we have to use regexp expression in order
// to remove string that starts from '(' and ends with space
// see: https://github.com/influxdata/telegraf/issues/2386
reg, err := regexp.Compile("\\([\\S]*")
reg, err := regexp.Compile("\\s+\\([\\S]*")
if err != nil {
return err
}

View File

@ -260,6 +260,57 @@ func TestParserNTPQ(t *testing.T) {
}
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{
"poll": int64(64),
"when": int64(60),
"reach": int64(377),
"delay": float64(0.0),
"offset": float64(0.045),
"jitter": float64(1.012),
}
tags := map[string]string{
"remote": "SHM(0)",
"state_prefix": "*",
"refid": ".PPS.",
"stratum": "1",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
fields = map[string]interface{}{
"poll": int64(128),
"when": int64(121),
"reach": int64(377),
"delay": float64(0.0),
"offset": float64(10.105),
"jitter": float64(2.012),
}
tags = map[string]string{
"remote": "SHM(1)",
"state_prefix": "-",
"refid": ".GPS.",
"stratum": "1",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
fields = map[string]interface{}{
"poll": int64(1024),
"when": int64(10),
"reach": int64(377),
"delay": float64(1.748),
"offset": float64(0.373),
"jitter": float64(0.101),
}
tags = map[string]string{
"remote": "37.58.57.238",
"state_prefix": "+",
"refid": "192.53.103.103",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestMultiNTPQ(t *testing.T) {
@ -480,7 +531,9 @@ var multiNTPQ = ` remote refid st t when poll reach delay
`
var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*SHM(0) .PPS. 1 u 60 64 377 0.000 0.045 1.012
+37.58.57.238 (d 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
+37.58.57.238 (domain) 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
+37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
-SHM(1) .GPS. 1 u 121 128 377 0.000 10.105 2.012
`

View File

@ -2,11 +2,12 @@ package sqlserver
import (
"database/sql"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
// go-mssqldb initialization
_ "github.com/zensqlmonitor/go-mssqldb"
)
@ -244,10 +245,10 @@ UNION ALL
SELECT 'Average pending disk IO', AveragePendingDiskIOCount = (SELECT AVG(pending_disk_io_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 )
UNION ALL
SELECT 'Buffer pool rate (bytes/sec)', BufferPoolRate = (1.0*cntr_value * 8 * 1024) /
(SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND lower(counter_name) = 'Page life expectancy')
(SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND counter_name = 'Page life expectancy')
FROM sys.dm_os_performance_counters
WHERE object_name like '%Buffer Manager%'
AND counter_name = 'database pages'
AND counter_name = 'Database pages'
UNION ALL
SELECT 'Memory grant pending', MemoryGrantPending = cntr_value
FROM sys.dm_os_performance_counters
@ -1022,7 +1023,7 @@ CREATE TABLE #PCounters
Primary Key(object_name, counter_name, instance_name)
);
INSERT #PCounters
SELECT RTrim(spi.object_name) object_name
SELECT DISTINCT RTrim(spi.object_name) object_name
, RTrim(spi.counter_name) counter_name
, RTrim(spi.instance_name) instance_name
, spi.cntr_value
@ -1044,7 +1045,7 @@ CREATE TABLE #CCounters
Primary Key(object_name, counter_name, instance_name)
);
INSERT #CCounters
SELECT RTrim(spi.object_name) object_name
SELECT DISTINCT RTrim(spi.object_name) object_name
, RTrim(spi.counter_name) counter_name
, RTrim(spi.instance_name) instance_name
, spi.cntr_value
@ -1436,16 +1437,16 @@ SELECT
, type = 'Wait stats'
---- values
, [I/O] = SUM([I/O])
, [Latch] = SUM([Latch])
, [Lock] = SUM([Lock])
, [Network] = SUM([Network])
, [Service broker] = SUM([Service broker])
, [Memory] = SUM([Memory])
, [Buffer] = SUM([Buffer])
, [Latch] = SUM([LATCH])
, [Lock] = SUM([LOCK])
, [Network] = SUM([NETWORK])
, [Service broker] = SUM([SERVICE BROKER])
, [Memory] = SUM([MEMORY])
, [Buffer] = SUM([BUFFER])
, [CLR] = SUM([CLR])
, [SQLOS] = SUM([SQLOS])
, [XEvent] = SUM([XEvent])
, [Other] = SUM([Other])
, [XEvent] = SUM([XEVENT])
, [Other] = SUM([OTHER])
, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
FROM
(
@ -1479,16 +1480,16 @@ SELECT
, type = 'Wait stats'
---- values
, [I/O] = SUM([I/O])
, [Latch] = SUM([Latch])
, [Lock] = SUM([Lock])
, [Network] = SUM([Network])
, [Service broker] = SUM([Service broker])
, [Memory] = SUM([Memory])
, [Buffer] = SUM([Buffer])
, [Latch] = SUM([LATCH])
, [Lock] = SUM([LOCK])
, [Network] = SUM([NETWORK])
, [Service broker] = SUM([SERVICE BROKER])
, [Memory] = SUM([MEMORY])
, [Buffer] = SUM([BUFFER])
, [CLR] = SUM([CLR])
, [SQLOS] = SUM([SQLOS])
, [XEvent] = SUM([XEvent])
, [Other] = SUM([Other])
, [XEvent] = SUM([XEVENT])
, [Other] = SUM([OTHER])
, [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER])
FROM
(

View File

@ -101,8 +101,15 @@ func (rs *RunningStats) Percentile(n int) float64 {
}
i := int(float64(len(rs.perc)) * float64(n) / float64(100))
if i < 0 {
i = 0
}
return rs.perc[i]
return rs.perc[clamp(i, 0, len(rs.perc)-1)]
}
func clamp(i int, min int, max int) int {
if i < min {
return min
}
if i > max {
return max
}
return i
}

View File

@ -23,12 +23,18 @@ func TestRunningStats_Single(t *testing.T) {
if rs.Lower() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
}
if rs.Percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
}
if rs.Percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
}
if rs.Percentile(50) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
}
if rs.Percentile(0) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0))
}
if rs.Count() != 1 {
t.Errorf("Expected %v, got %v", 1, rs.Count())
}
@ -58,12 +64,18 @@ func TestRunningStats_Duplicate(t *testing.T) {
if rs.Lower() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
}
if rs.Percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
}
if rs.Percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
}
if rs.Percentile(50) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
}
if rs.Percentile(0) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0))
}
if rs.Count() != 4 {
t.Errorf("Expected %v, got %v", 4, rs.Count())
}
@ -93,12 +105,18 @@ func TestRunningStats(t *testing.T) {
if rs.Lower() != 5 {
t.Errorf("Expected %v, got %v", 5, rs.Lower())
}
if rs.Percentile(100) != 45 {
t.Errorf("Expected %v, got %v", 45, rs.Percentile(100))
}
if rs.Percentile(90) != 32 {
t.Errorf("Expected %v, got %v", 32, rs.Percentile(90))
}
if rs.Percentile(50) != 11 {
t.Errorf("Expected %v, got %v", 11, rs.Percentile(50))
}
if rs.Percentile(0) != 5 {
t.Errorf("Expected %v, got %v", 5, rs.Percentile(0))
}
if rs.Count() != 16 {
t.Errorf("Expected %v, got %v", 4, rs.Count())
}

View File

@ -251,14 +251,14 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
}
for _, metric := range s.gauges {
acc.AddFields(metric.name, metric.fields, metric.tags, now)
acc.AddGauge(metric.name, metric.fields, metric.tags, now)
}
if s.DeleteGauges {
s.gauges = make(map[string]cachedgauge)
}
for _, metric := range s.counters {
acc.AddFields(metric.name, metric.fields, metric.tags, now)
acc.AddCounter(metric.name, metric.fields, metric.tags, now)
}
if s.DeleteCounters {
s.counters = make(map[string]cachedcounter)

View File

@ -20,9 +20,11 @@ Additionally, the behavior of resolving the `mount_points` can be configured by
When present, this variable is prepended to the mountpoints discovered by the plugin before retrieving stats.
The prefix is stripped from the reported `path` in the measurement.
This settings is useful when running `telegraf` inside a docker container to report host machine metrics.
In this case, the host's root volume should be mounted into the container and the `HOST_MOUNT_PREFIX` and `HOST_ETC` environment variables set.
In this case, the host's root volume should be mounted into the container and the `HOST_MOUNT_PREFIX` and `HOST_PROC` environment variables set.
`docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_ETC=/hostfs/etc telegraf-docker`
```
docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/proc telegraf
```
### Measurements & Fields:

View File

@ -11,7 +11,7 @@ import (
type CPUStats struct {
ps PS
lastStats []cpu.TimesStat
lastStats map[string]cpu.TimesStat
PerCPU bool `toml:"percpu"`
TotalCPU bool `toml:"totalcpu"`
@ -53,7 +53,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
}
now := time.Now()
for i, cts := range times {
for _, cts := range times {
tags := map[string]string{
"cpu": cts.CPU,
}
@ -86,14 +86,18 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
// If it's the 1st gather, can't get CPU Usage stats yet
continue
}
lastCts := s.lastStats[i]
lastCts, ok := s.lastStats[cts.CPU]
if !ok {
continue
}
lastTotal := totalCpuTime(lastCts)
lastActive := activeCpuTime(lastCts)
totalDelta := total - lastTotal
if totalDelta < 0 {
s.lastStats = times
return fmt.Errorf("Error: current total CPU time is less than previous total CPU time")
err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time")
break
}
if totalDelta == 0 {
@ -118,9 +122,12 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error {
acc.AddGauge("cpu", fieldsG, tags, now)
}
s.lastStats = times
s.lastStats = make(map[string]cpu.TimesStat)
for _, cts := range times {
s.lastStats[cts.CPU] = cts
}
return nil
return err
}
func totalCpuTime(t cpu.TimesStat) float64 {

View File

@ -149,3 +149,107 @@ func assertContainsTaggedFloat(
measurement, delta, expectedValue, actualValue)
assert.Fail(t, msg)
}
// TestCPUCountChange tests that no errors are encountered if the number of
// CPUs increases as reported with LXC.
func TestCPUCountIncrease(t *testing.T) {
var mps MockPS
var mps2 MockPS
var acc testutil.Accumulator
var err error
cs := NewCPUStats(&mps)
mps.On("CPUTimes").Return(
[]cpu.TimesStat{
cpu.TimesStat{
CPU: "cpu0",
},
}, nil)
err = cs.Gather(&acc)
require.NoError(t, err)
mps2.On("CPUTimes").Return(
[]cpu.TimesStat{
cpu.TimesStat{
CPU: "cpu0",
},
cpu.TimesStat{
CPU: "cpu1",
},
}, nil)
cs.ps = &mps2
err = cs.Gather(&acc)
require.NoError(t, err)
}
// TestCPUTimesDecrease tests that telegraf continue to works after
// CPU times decrease, which seems to occur when Linux system is suspended.
func TestCPUTimesDecrease(t *testing.T) {
var mps MockPS
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
cts := cpu.TimesStat{
CPU: "cpu0",
User: 18,
Idle: 80,
Iowait: 2,
}
cts2 := cpu.TimesStat{
CPU: "cpu0",
User: 38, // increased by 20
Idle: 40, // decreased by 40
Iowait: 1, // decreased by 1
}
cts3 := cpu.TimesStat{
CPU: "cpu0",
User: 56, // increased by 18
Idle: 120, // increased by 80
Iowait: 3, // increased by 2
}
mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil)
cs := NewCPUStats(&mps)
cputags := map[string]string{
"cpu": "cpu0",
}
err := cs.Gather(&acc)
require.NoError(t, err)
// Computed values are checked with delta > 0 becasue of floating point arithmatic
// imprecision
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags)
mps2 := MockPS{}
mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil)
cs.ps = &mps2
// CPU times decreased. An error should be raised
err = cs.Gather(&acc)
require.Error(t, err)
mps3 := MockPS{}
mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil)
cs.ps = &mps3
err = cs.Gather(&acc)
require.NoError(t, err)
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 56, 0, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 120, 0, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 3, 0, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 18, 0.0005, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 80, 0.0005, cputags)
assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 2, 0.0005, cputags)
}

View File

@ -2,6 +2,7 @@ package system
import (
"fmt"
"log"
"regexp"
"strings"
@ -164,14 +165,13 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error {
var varRegex = regexp.MustCompile(`\$(?:\w+|\{\w+\})`)
func (s *DiskIOStats) diskName(devName string) string {
di, err := s.diskInfo(devName)
if err != nil {
// discard error :-(
// We can't return error because it's non-fatal to the Gather().
// And we have no logger, so we can't log it.
if len(s.NameTemplates) == 0 {
return devName
}
if di == nil {
di, err := s.diskInfo(devName)
if err != nil {
log.Printf("W! Error gathering disk info: %s", err)
return devName
}
@ -198,14 +198,13 @@ func (s *DiskIOStats) diskName(devName string) string {
}
func (s *DiskIOStats) diskTags(devName string) map[string]string {
di, err := s.diskInfo(devName)
if err != nil {
// discard error :-(
// We can't return error because it's non-fatal to the Gather().
// And we have no logger, so we can't log it.
if len(s.DeviceTags) == 0 {
return nil
}
if di == nil {
di, err := s.diskInfo(devName)
if err != nil {
log.Printf("W! Error gathering disk info: %s", err)
return nil
}

View File

@ -5,25 +5,26 @@ import (
"fmt"
"os"
"strings"
"syscall"
"golang.org/x/sys/unix"
)
type diskInfoCache struct {
stat syscall.Stat_t
values map[string]string
udevDataPath string
values map[string]string
}
var udevPath = "/run/udev/data"
func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) {
fi, err := os.Stat("/dev/" + devName)
var err error
var stat unix.Stat_t
path := "/dev/" + devName
err = unix.Stat(path, &stat)
if err != nil {
return nil, err
}
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil, nil
}
if s.infoCache == nil {
s.infoCache = map[string]diskInfoCache{}
@ -31,25 +32,26 @@ func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) {
ic, ok := s.infoCache[devName]
if ok {
return ic.values, nil
} else {
ic = diskInfoCache{
stat: *stat,
values: map[string]string{},
}
s.infoCache[devName] = ic
}
di := ic.values
major := stat.Rdev >> 8 & 0xff
minor := stat.Rdev & 0xff
udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor)
f, err := os.Open(fmt.Sprintf("%s/b%d:%d", udevPath, major, minor))
di := map[string]string{}
s.infoCache[devName] = diskInfoCache{
udevDataPath: udevDataPath,
values: di,
}
f, err := os.Open(udevDataPath)
if err != nil {
return nil, err
}
defer f.Close()
scnr := bufio.NewScanner(f)
scnr := bufio.NewScanner(f)
for scnr.Scan() {
l := scnr.Text()
if len(l) < 4 || l[:2] != "E:" {

View File

@ -62,8 +62,6 @@ func TestDiskUsage(t *testing.T) {
mps.On("Partitions", true).Return(psAll, nil)
mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return("")
mps.On("OSStat", "/").Return(MockFileInfo{}, nil)
mps.On("OSStat", "/home").Return(MockFileInfo{}, nil)
mps.On("PSDiskUsage", "/").Return(&duAll[0], nil)
mps.On("PSDiskUsage", "/home").Return(&duAll[1], nil)

View File

@ -41,7 +41,7 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error {
// We only want the even number index as that contain the stat name.
if i%2 == 0 {
// Convert the stat value into an integer.
m, err := strconv.Atoi(string(dataFields[i+1]))
m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64)
if err != nil {
return err
}

View File

@ -48,7 +48,7 @@ func TestFullVmStatProcFile(t *testing.T) {
"nr_isolated_anon": int64(0),
"nr_isolated_file": int64(0),
"nr_shmem": int64(541689),
"numa_hit": int64(5113399878),
"numa_hit": int64(6690743595),
"numa_miss": int64(0),
"numa_foreign": int64(0),
"numa_interleave": int64(35793),
@ -200,7 +200,7 @@ nr_writeback_temp 0
nr_isolated_anon 0
nr_isolated_file 0
nr_shmem 541689
numa_hit 5113399878
numa_hit 6690743595
numa_miss 0
numa_foreign 0
numa_interleave 35793

View File

@ -2,6 +2,7 @@ package system
import (
"os"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
@ -84,9 +85,14 @@ func (s *systemPS) DiskUsage(
for _, filter := range fstypeExclude {
fstypeExcludeSet[filter] = true
}
paths := make(map[string]bool)
for _, part := range parts {
paths[part.Mountpoint] = true
}
var usage []*disk.UsageStat
var partitions []*disk.PartitionStat
hostMountPrefix := s.OSGetenv("HOST_MOUNT_PREFIX")
for i := range parts {
p := parts[i]
@ -105,15 +111,20 @@ func (s *systemPS) DiskUsage(
continue
}
mountpoint := s.OSGetenv("HOST_MOUNT_PREFIX") + p.Mountpoint
if _, err := s.OSStat(mountpoint); err != nil {
// If there's a host mount prefix, exclude any paths which conflict
// with the prefix.
if len(hostMountPrefix) > 0 &&
!strings.HasPrefix(p.Mountpoint, hostMountPrefix) &&
paths[hostMountPrefix+p.Mountpoint] {
continue
}
du, err := s.PSDiskUsage(mountpoint)
du, err := s.PSDiskUsage(p.Mountpoint)
if err != nil {
continue
}
du.Path = p.Mountpoint
du.Path = strings.TrimPrefix(p.Mountpoint, hostMountPrefix)
du.Fstype = p.Fstype
usage = append(usage, du)
partitions = append(partitions, &p)

View File

@ -165,7 +165,7 @@ func (s *Tomcat) Gather(acc telegraf.Accumulator) error {
for _, c := range status.TomcatConnectors {
name, err := strconv.Unquote(c.Name)
if err != nil {
return fmt.Errorf("Unable to unquote name '%s': %s", c.Name, err)
name = c.Name
}
tccTags := map[string]string{

View File

@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/require"
)
var tomcatStatus = `<?xml version="1.0" encoding="UTF-8"?>
var tomcatStatus8 = `<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="/manager/xform.xsl" ?>
<status>
<jvm>
@ -37,10 +37,10 @@ var tomcatStatus = `<?xml version="1.0" encoding="UTF-8"?>
</connector>
</status>`
func TestHTTPTomcat(t *testing.T) {
func TestHTTPTomcat8(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, tomcatStatus)
fmt.Fprintln(w, tomcatStatus8)
}))
defer ts.Close()
@ -91,5 +91,63 @@ func TestHTTPTomcat(t *testing.T) {
"name": "http-apr-8080",
}
acc.AssertContainsTaggedFields(t, "tomcat_connector", connectorFields, connectorTags)
}
var tomcatStatus6 = `<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="xform.xsl" ?>
<status>
<jvm>
<memory free="1942681600" total="2040070144" max="2040070144"/>
</jvm>
<connector name="http-8080">
<threadInfo maxThreads="150" currentThreadCount="2" currentThreadsBusy="2"/>
<requestInfo maxTime="1005" processingTime="2465" requestCount="436" errorCount="16" bytesReceived="0" bytesSent="550196"/>
<workers>
<worker stage="K" requestProcessingTime="526" requestBytesSent="0" requestBytesReceived="0" remoteAddr="127.0.0.1" virtualHost="?" method="?" currentUri="?" currentQueryString="?" protocol="?"/>
<worker stage="S" requestProcessingTime="1" requestBytesSent="0" requestBytesReceived="0" remoteAddr="127.0.0.1" virtualHost="127.0.0.1" method="GET" currentUri="/manager/status/all" currentQueryString="XML=true" protocol="HTTP/1.1"/>
</workers>
</connector>
</status>`
func TestHTTPTomcat6(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, tomcatStatus6)
}))
defer ts.Close()
tc := Tomcat{
URL: ts.URL,
Username: "tomcat",
Password: "s3cret",
}
var acc testutil.Accumulator
err := tc.Gather(&acc)
require.NoError(t, err)
// tomcat_jvm_memory
jvmMemoryFields := map[string]interface{}{
"free": int64(1942681600),
"total": int64(2040070144),
"max": int64(2040070144),
}
acc.AssertContainsFields(t, "tomcat_jvm_memory", jvmMemoryFields)
// tomcat_connector
connectorFields := map[string]interface{}{
"bytes_received": int64(0),
"bytes_sent": int64(550196),
"current_thread_count": int64(2),
"current_threads_busy": int64(2),
"error_count": int(16),
"max_threads": int64(150),
"max_time": int(1005),
"processing_time": int(2465),
"request_count": int(436),
}
connectorTags := map[string]string{
"name": "http-8080",
}
acc.AssertContainsTaggedFields(t, "tomcat_connector", connectorFields, connectorTags)
}

View File

@ -33,41 +33,48 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) {
tags := map[string]string{"pool": col[0], "health": col[8]}
fields := map[string]interface{}{}
size, err := strconv.ParseInt(col[1], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing size: %s", err)
}
fields["size"] = size
if tags["health"] == "UNAVAIL" {
alloc, err := strconv.ParseInt(col[2], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing allocation: %s", err)
}
fields["allocated"] = alloc
fields["size"] = int64(0)
free, err := strconv.ParseInt(col[3], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing free: %s", err)
}
fields["free"] = free
} else {
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
if err != nil { // This might be - for RO devs
frag = 0
}
fields["fragmentation"] = frag
size, err := strconv.ParseInt(col[1], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing size: %s", err)
}
fields["size"] = size
capval, err := strconv.ParseInt(col[6], 10, 0)
if err != nil {
return "", fmt.Errorf("Error parsing capacity: %s", err)
}
fields["capacity"] = capval
alloc, err := strconv.ParseInt(col[2], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing allocation: %s", err)
}
fields["allocated"] = alloc
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
if err != nil {
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
free, err := strconv.ParseInt(col[3], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing free: %s", err)
}
fields["free"] = free
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
if err != nil { // This might be - for RO devs
frag = 0
}
fields["fragmentation"] = frag
capval, err := strconv.ParseInt(col[6], 10, 0)
if err != nil {
return "", fmt.Errorf("Error parsing capacity: %s", err)
}
fields["capacity"] = capval
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
if err != nil {
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
}
fields["dedupratio"] = dedup
}
fields["dedupratio"] = dedup
acc.AddFields("zfs_pool", fields, tags)
}

View File

@ -22,6 +22,15 @@ func mock_zpool() ([]string, error) {
return zpool_output, nil
}
// $ zpool list -Hp
var zpool_output_unavail = []string{
"temp2 - - - - - - - UNAVAIL -",
}
func mock_zpool_unavail() ([]string, error) {
return zpool_output_unavail, nil
}
// sysctl -q kstat.zfs.misc.arcstats
// sysctl -q kstat.zfs.misc.vdev_cache_stats
@ -82,6 +91,41 @@ func TestZfsPoolMetrics(t *testing.T) {
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
}
func TestZfsPoolMetrics_unavail(t *testing.T) {
var acc testutil.Accumulator
z := &Zfs{
KstatMetrics: []string{"vdev_cache_stats"},
sysctl: mock_sysctl,
zpool: mock_zpool_unavail,
}
err := z.Gather(&acc)
require.NoError(t, err)
require.False(t, acc.HasMeasurement("zfs_pool"))
acc.Metrics = nil
z = &Zfs{
KstatMetrics: []string{"vdev_cache_stats"},
PoolMetrics: true,
sysctl: mock_sysctl,
zpool: mock_zpool_unavail,
}
err = z.Gather(&acc)
require.NoError(t, err)
//one pool, UNAVAIL
tags := map[string]string{
"pool": "temp2",
"health": "UNAVAIL",
}
poolMetrics := getTemp2PoolMetrics()
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
}
func TestZfsGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator
@ -128,6 +172,12 @@ func getFreeNasBootPoolMetrics() map[string]interface{} {
}
}
func getTemp2PoolMetrics() map[string]interface{} {
return map[string]interface{}{
"size": int64(0),
}
}
func getKstatMetricsVdevOnly() map[string]interface{} {
return map[string]interface{}{
"vdev_cache_stats_misses": int64(87789),

View File

@ -12,6 +12,9 @@ based on its main usage cases and the evolution of the OpenTracing standard.*
port = 9411 # Port on which Telegraf listens
```
The plugin accepts spans in `JSON` or `thrift` if the `Content-Type` is `application/json` or `application/x-thrift`, respectively.
If `Content-Type` is not set, then the plugin assumes it is `JSON` format.
## Tracing:
This plugin uses Annotations tags and fields to track data from spans

View File

@ -62,13 +62,17 @@ func main() {
if err != nil {
log.Fatalf("%v\n", err)
}
ioutil.WriteFile(outFileName, raw, 0644)
if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil {
log.Fatalf("%v", err)
}
case "thrift":
raw, err := thriftToJSONSpans(contents)
if err != nil {
log.Fatalf("%v\n", err)
}
ioutil.WriteFile(outFileName, raw, 0644)
if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil {
log.Fatalf("%v", err)
}
default:
log.Fatalf("Unsupported input type")
}

View File

@ -0,0 +1,210 @@
package codec
import (
"time"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
)
//now is a mockable time for now
var now = time.Now
// DefaultServiceName when the span does not have any serviceName
const DefaultServiceName = "unknown"
// Decoder decodes the bytes and returns a trace
type Decoder interface {
Decode(octets []byte) ([]Span, error)
}
// Span are created by instrumentation in RPC clients or servers
type Span interface {
Trace() (string, error)
SpanID() (string, error)
Parent() (string, error)
Name() string
Annotations() []Annotation
BinaryAnnotations() ([]BinaryAnnotation, error)
Timestamp() time.Time
Duration() time.Duration
}
// Annotation represents an event that explains latency with a timestamp.
type Annotation interface {
Timestamp() time.Time
Value() string
Host() Endpoint
}
// BinaryAnnotation represent tags applied to a Span to give it context
type BinaryAnnotation interface {
Key() string
Value() string
Host() Endpoint
}
// Endpoint represents the network context of a service recording an annotation
type Endpoint interface {
Host() string
Name() string
}
// DefaultEndpoint is used if the annotations have no endpoints
type DefaultEndpoint struct{}
// Host returns 0.0.0.0; used when the host is unknown
func (d *DefaultEndpoint) Host() string { return "0.0.0.0" }
// Name returns "unknown" when an endpoint doesn't exist
func (d *DefaultEndpoint) Name() string { return DefaultServiceName }
// MicroToTime converts zipkin's native time of microseconds into time.Time
func MicroToTime(micro int64) time.Time {
return time.Unix(0, micro*int64(time.Microsecond)).UTC()
}
// NewTrace converts a slice of []Span into a new Trace
func NewTrace(spans []Span) (trace.Trace, error) {
tr := make(trace.Trace, len(spans))
for i, span := range spans {
bin, err := span.BinaryAnnotations()
if err != nil {
return nil, err
}
endpoint := serviceEndpoint(span.Annotations(), bin)
id, err := span.SpanID()
if err != nil {
return nil, err
}
tid, err := span.Trace()
if err != nil {
return nil, err
}
pid, err := parentID(span)
if err != nil {
return nil, err
}
tr[i] = trace.Span{
ID: id,
TraceID: tid,
Name: span.Name(),
Timestamp: guessTimestamp(span),
Duration: convertDuration(span),
ParentID: pid,
ServiceName: endpoint.Name(),
Annotations: NewAnnotations(span.Annotations(), endpoint),
BinaryAnnotations: NewBinaryAnnotations(bin, endpoint),
}
}
return tr, nil
}
// NewAnnotations converts a slice of Annotation into a slice of new Annotations
func NewAnnotations(annotations []Annotation, endpoint Endpoint) []trace.Annotation {
formatted := make([]trace.Annotation, len(annotations))
for i, annotation := range annotations {
formatted[i] = trace.Annotation{
Host: endpoint.Host(),
ServiceName: endpoint.Name(),
Timestamp: annotation.Timestamp(),
Value: annotation.Value(),
}
}
return formatted
}
// NewBinaryAnnotations is very similar to NewAnnotations, but it
// converts BinaryAnnotations instead of the normal Annotation
func NewBinaryAnnotations(annotations []BinaryAnnotation, endpoint Endpoint) []trace.BinaryAnnotation {
formatted := make([]trace.BinaryAnnotation, len(annotations))
for i, annotation := range annotations {
formatted[i] = trace.BinaryAnnotation{
Host: endpoint.Host(),
ServiceName: endpoint.Name(),
Key: annotation.Key(),
Value: annotation.Value(),
}
}
return formatted
}
func minMax(span Span) (time.Time, time.Time) {
min := now().UTC()
max := time.Time{}.UTC()
for _, annotation := range span.Annotations() {
ts := annotation.Timestamp()
if !ts.IsZero() && ts.Before(min) {
min = ts
}
if !ts.IsZero() && ts.After(max) {
max = ts
}
}
if max.IsZero() {
max = min
}
return min, max
}
func guessTimestamp(span Span) time.Time {
ts := span.Timestamp()
if !ts.IsZero() {
return ts
}
min, _ := minMax(span)
return min
}
func convertDuration(span Span) time.Duration {
duration := span.Duration()
if duration != 0 {
return duration
}
min, max := minMax(span)
return max.Sub(min)
}
func parentID(span Span) (string, error) {
// A parent ID of "" means that this is a parent span. In this case,
// we set the parent ID of the span to be its own id, so it points to
// itself.
id, err := span.Parent()
if err != nil {
return "", err
}
if id != "" {
return id, nil
}
return span.SpanID()
}
func serviceEndpoint(ann []Annotation, bann []BinaryAnnotation) Endpoint {
for _, a := range ann {
switch a.Value() {
case zipkincore.SERVER_RECV, zipkincore.SERVER_SEND, zipkincore.CLIENT_RECV, zipkincore.CLIENT_SEND:
if a.Host() != nil && a.Host().Name() != "" {
return a.Host()
}
}
}
for _, a := range bann {
if a.Key() == zipkincore.LOCAL_COMPONENT && a.Host() != nil && a.Host().Name() != "" {
return a.Host()
}
}
// Unable to find any "standard" endpoint host, so, use any that exist in the regular annotations
for _, a := range ann {
if a.Host() != nil && a.Host().Name() != "" {
return a.Host()
}
}
return &DefaultEndpoint{}
}

View File

@ -0,0 +1,636 @@
package codec
import (
"fmt"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
)
func Test_MicroToTime(t *testing.T) {
type args struct {
micro int64
}
tests := []struct {
name string
micro int64
want time.Time
}{
{
name: "given zero micro seconds expected unix time zero",
micro: 0,
want: time.Unix(0, 0).UTC(),
},
{
name: "given a million micro seconds expected unix time one",
micro: 1000000,
want: time.Unix(1, 0).UTC(),
},
{
name: "given a million micro seconds expected unix time one",
micro: 1503031538791000,
want: time.Unix(0, 1503031538791000000).UTC(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := MicroToTime(tt.micro); !reflect.DeepEqual(got, tt.want) {
t.Errorf("microToTime() = %v, want %v", got, tt.want)
}
})
}
}
func Test_minMax(t *testing.T) {
tests := []struct {
name string
span *MockSpan
now func() time.Time
wantMin time.Time
wantMax time.Time
}{
{
name: "Single annotation",
span: &MockSpan{
Anno: []Annotation{
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(time.Second),
},
},
},
wantMin: time.Unix(1, 0).UTC(),
wantMax: time.Unix(1, 0).UTC(),
},
{
name: "Three annotations",
span: &MockSpan{
Anno: []Annotation{
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(1 * time.Second),
},
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(2 * time.Second),
},
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
},
},
},
wantMin: time.Unix(1, 0).UTC(),
wantMax: time.Unix(3, 0).UTC(),
},
{
name: "Annotations are in the future",
span: &MockSpan{
Anno: []Annotation{
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
},
},
},
wantMin: time.Unix(2, 0).UTC(),
wantMax: time.Unix(3, 0).UTC(),
now: func() time.Time {
return time.Unix(2, 0).UTC()
},
},
{
name: "No Annotations",
span: &MockSpan{
Anno: []Annotation{},
},
wantMin: time.Unix(2, 0).UTC(),
wantMax: time.Unix(2, 0).UTC(),
now: func() time.Time {
return time.Unix(2, 0).UTC()
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.now != nil {
now = tt.now
}
got, got1 := minMax(tt.span)
if !reflect.DeepEqual(got, tt.wantMin) {
t.Errorf("minMax() got = %v, want %v", got, tt.wantMin)
}
if !reflect.DeepEqual(got1, tt.wantMax) {
t.Errorf("minMax() got1 = %v, want %v", got1, tt.wantMax)
}
now = time.Now
})
}
}
func Test_guessTimestamp(t *testing.T) {
tests := []struct {
name string
span Span
now func() time.Time
want time.Time
}{
{
name: "simple timestamp",
span: &MockSpan{
Time: time.Unix(2, 0).UTC(),
},
want: time.Unix(2, 0).UTC(),
},
{
name: "zero timestamp",
span: &MockSpan{
Time: time.Time{},
},
now: func() time.Time {
return time.Unix(2, 0).UTC()
},
want: time.Unix(2, 0).UTC(),
},
{
name: "zero timestamp with single annotation",
span: &MockSpan{
Time: time.Time{},
Anno: []Annotation{
&MockAnnotation{
Time: time.Unix(0, 0).UTC(),
},
},
},
want: time.Unix(0, 0).UTC(),
},
{
name: "zero timestamp with two annotations",
span: &MockSpan{
Time: time.Time{},
Anno: []Annotation{
&MockAnnotation{
Time: time.Unix(0, 0).UTC(),
},
&MockAnnotation{
Time: time.Unix(2, 0).UTC(),
},
},
},
want: time.Unix(0, 0).UTC(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.now != nil {
now = tt.now
}
if got := guessTimestamp(tt.span); !reflect.DeepEqual(got, tt.want) {
t.Errorf("guessTimestamp() = %v, want %v", got, tt.want)
}
now = time.Now
})
}
}
func Test_convertDuration(t *testing.T) {
tests := []struct {
name string
span Span
want time.Duration
}{
{
name: "simple duration",
span: &MockSpan{
Dur: time.Hour,
},
want: time.Hour,
},
{
name: "no timestamp, but, 2 seconds between annotations",
span: &MockSpan{
Anno: []Annotation{
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(1 * time.Second),
},
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(2 * time.Second),
},
&MockAnnotation{
Time: time.Unix(0, 0).UTC().Add(3 * time.Second),
},
},
},
want: 2 * time.Second,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := convertDuration(tt.span); got != tt.want {
t.Errorf("convertDuration() = %v, want %v", got, tt.want)
}
})
}
}
func Test_parentID(t *testing.T) {
tests := []struct {
name string
span Span
want string
wantErr bool
}{
{
name: "has parent id",
span: &MockSpan{
ParentID: "6b221d5bc9e6496c",
},
want: "6b221d5bc9e6496c",
},
{
name: "no parent, so use id",
span: &MockSpan{
ID: "abceasyas123",
},
want: "abceasyas123",
},
{
name: "bad parent value",
span: &MockSpan{
Error: fmt.Errorf("Mommie Dearest"),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parentID(tt.span)
if (err != nil) != tt.wantErr {
t.Errorf("parentID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("parentID() = %v, want %v", got, tt.want)
}
})
}
}
func Test_serviceEndpoint(t *testing.T) {
tests := []struct {
name string
ann []Annotation
bann []BinaryAnnotation
want Endpoint
}{
{
name: "Annotation with server receive",
ann: []Annotation{
&MockAnnotation{
Val: "battery",
H: &MockEndpoint{
name: "aa",
},
},
&MockAnnotation{
Val: "sr",
H: &MockEndpoint{
name: "me",
},
},
},
want: &MockEndpoint{
name: "me",
},
},
{
name: "Annotation with no standard values",
ann: []Annotation{
&MockAnnotation{
Val: "noop",
},
&MockAnnotation{
Val: "aa",
H: &MockEndpoint{
name: "battery",
},
},
},
want: &MockEndpoint{
name: "battery",
},
},
{
name: "Annotation with no endpoints",
ann: []Annotation{
&MockAnnotation{
Val: "noop",
},
},
want: &DefaultEndpoint{},
},
{
name: "Binary annotation with local component",
bann: []BinaryAnnotation{
&MockBinaryAnnotation{
K: "noop",
H: &MockEndpoint{
name: "aa",
},
},
&MockBinaryAnnotation{
K: "lc",
H: &MockEndpoint{
name: "me",
},
},
},
want: &MockEndpoint{
name: "me",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := serviceEndpoint(tt.ann, tt.bann); !reflect.DeepEqual(got, tt.want) {
t.Errorf("serviceEndpoint() = %v, want %v", got, tt.want)
}
})
}
}
func TestNewBinaryAnnotations(t *testing.T) {
tests := []struct {
name string
annotations []BinaryAnnotation
endpoint Endpoint
want []trace.BinaryAnnotation
}{
{
name: "Should override annotation with endpoint",
annotations: []BinaryAnnotation{
&MockBinaryAnnotation{
K: "mykey",
V: "myvalue",
H: &MockEndpoint{
host: "noop",
name: "noop",
},
},
},
endpoint: &MockEndpoint{
host: "myhost",
name: "myservice",
},
want: []trace.BinaryAnnotation{
trace.BinaryAnnotation{
Host: "myhost",
ServiceName: "myservice",
Key: "mykey",
Value: "myvalue",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewBinaryAnnotations(tt.annotations, tt.endpoint); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewBinaryAnnotations() = %v, want %v", got, tt.want)
}
})
}
}
func TestNewAnnotations(t *testing.T) {
tests := []struct {
name string
annotations []Annotation
endpoint Endpoint
want []trace.Annotation
}{
{
name: "Should override annotation with endpoint",
annotations: []Annotation{
&MockAnnotation{
Time: time.Unix(0, 0).UTC(),
Val: "myvalue",
H: &MockEndpoint{
host: "noop",
name: "noop",
},
},
},
endpoint: &MockEndpoint{
host: "myhost",
name: "myservice",
},
want: []trace.Annotation{
trace.Annotation{
Host: "myhost",
ServiceName: "myservice",
Timestamp: time.Unix(0, 0).UTC(),
Value: "myvalue",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewAnnotations(tt.annotations, tt.endpoint); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewAnnotations() = %v, want %v", got, tt.want)
}
})
}
}
func TestNewTrace(t *testing.T) {
tests := []struct {
name string
spans []Span
now func() time.Time
want trace.Trace
wantErr bool
}{
{
name: "empty span",
spans: []Span{
&MockSpan{},
},
now: func() time.Time {
return time.Unix(0, 0).UTC()
},
want: trace.Trace{
trace.Span{
ServiceName: "unknown",
Timestamp: time.Unix(0, 0).UTC(),
Annotations: []trace.Annotation{},
BinaryAnnotations: []trace.BinaryAnnotation{},
},
},
},
{
name: "span has no id",
spans: []Span{
&MockSpan{
Error: fmt.Errorf("Span has no id"),
},
},
wantErr: true,
},
{
name: "complete span",
spans: []Span{
&MockSpan{
TraceID: "tid",
ID: "id",
ParentID: "",
ServiceName: "me",
Anno: []Annotation{
&MockAnnotation{
Time: time.Unix(1, 0).UTC(),
Val: "myval",
H: &MockEndpoint{
host: "myhost",
name: "myname",
},
},
},
Time: time.Unix(0, 0).UTC(),
Dur: 2 * time.Second,
},
},
now: func() time.Time {
return time.Unix(0, 0).UTC()
},
want: trace.Trace{
trace.Span{
ID: "id",
ParentID: "id",
TraceID: "tid",
Name: "me",
ServiceName: "myname",
Timestamp: time.Unix(0, 0).UTC(),
Duration: 2 * time.Second,
Annotations: []trace.Annotation{
{
Timestamp: time.Unix(1, 0).UTC(),
Value: "myval",
Host: "myhost",
ServiceName: "myname",
},
},
BinaryAnnotations: []trace.BinaryAnnotation{},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.now != nil {
now = tt.now
}
got, err := NewTrace(tt.spans)
if (err != nil) != tt.wantErr {
t.Errorf("NewTrace() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(tt.want, got) {
t.Errorf("NewTrace() = %s", cmp.Diff(tt.want, got))
}
now = time.Now
})
}
}
type MockSpan struct {
TraceID string
ID string
ParentID string
ServiceName string
Anno []Annotation
BinAnno []BinaryAnnotation
Time time.Time
Dur time.Duration
Error error
}
func (m *MockSpan) Trace() (string, error) {
return m.TraceID, m.Error
}
func (m *MockSpan) SpanID() (string, error) {
return m.ID, m.Error
}
func (m *MockSpan) Parent() (string, error) {
return m.ParentID, m.Error
}
func (m *MockSpan) Name() string {
return m.ServiceName
}
func (m *MockSpan) Annotations() []Annotation {
return m.Anno
}
func (m *MockSpan) BinaryAnnotations() ([]BinaryAnnotation, error) {
return m.BinAnno, m.Error
}
func (m *MockSpan) Timestamp() time.Time {
return m.Time
}
func (m *MockSpan) Duration() time.Duration {
return m.Dur
}
type MockAnnotation struct {
Time time.Time
Val string
H Endpoint
}
func (m *MockAnnotation) Timestamp() time.Time {
return m.Time
}
func (m *MockAnnotation) Value() string {
return m.Val
}
func (m *MockAnnotation) Host() Endpoint {
return m.H
}
type MockEndpoint struct {
host string
name string
}
func (e *MockEndpoint) Host() string {
return e.host
}
func (e *MockEndpoint) Name() string {
return e.name
}
type MockBinaryAnnotation struct {
Time time.Time
K string
V string
H Endpoint
}
func (b *MockBinaryAnnotation) Key() string {
return b.K
}
func (b *MockBinaryAnnotation) Value() string {
return b.V
}
func (b *MockBinaryAnnotation) Host() Endpoint {
return b.H
}

View File

@ -0,0 +1,252 @@
package jsonV1
import (
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
)
// JSON decodes spans from bodies `POST`ed to the spans endpoint
type JSON struct{}
// Decode unmarshals and validates the JSON body
func (j *JSON) Decode(octets []byte) ([]codec.Span, error) {
var spans []span
err := json.Unmarshal(octets, &spans)
if err != nil {
return nil, err
}
res := make([]codec.Span, len(spans))
for i := range spans {
if err := spans[i].Validate(); err != nil {
return nil, err
}
res[i] = &spans[i]
}
return res, nil
}
type span struct {
TraceID string `json:"traceId"`
SpanName string `json:"name"`
ParentID string `json:"parentId,omitempty"`
ID string `json:"id"`
Time *int64 `json:"timestamp,omitempty"`
Dur *int64 `json:"duration,omitempty"`
Debug bool `json:"debug,omitempty"`
Anno []annotation `json:"annotations"`
BAnno []binaryAnnotation `json:"binaryAnnotations"`
}
func (s *span) Validate() error {
var err error
check := func(f func() (string, error)) {
if err != nil {
return
}
_, err = f()
}
check(s.Trace)
check(s.SpanID)
check(s.Parent)
if err != nil {
return err
}
_, err = s.BinaryAnnotations()
return err
}
func (s *span) Trace() (string, error) {
if s.TraceID == "" {
return "", fmt.Errorf("Trace ID cannot be null")
}
return TraceIDFromString(s.TraceID)
}
func (s *span) SpanID() (string, error) {
if s.ID == "" {
return "", fmt.Errorf("Span ID cannot be null")
}
return IDFromString(s.ID)
}
func (s *span) Parent() (string, error) {
if s.ParentID == "" {
return "", nil
}
return IDFromString(s.ParentID)
}
func (s *span) Name() string {
return s.SpanName
}
func (s *span) Annotations() []codec.Annotation {
res := make([]codec.Annotation, len(s.Anno))
for i := range s.Anno {
res[i] = &s.Anno[i]
}
return res
}
func (s *span) BinaryAnnotations() ([]codec.BinaryAnnotation, error) {
res := make([]codec.BinaryAnnotation, len(s.BAnno))
for i, a := range s.BAnno {
if a.Key() != "" && a.Value() == "" {
return nil, fmt.Errorf("No value for key %s at binaryAnnotations[%d]", a.K, i)
}
if a.Value() != "" && a.Key() == "" {
return nil, fmt.Errorf("No key at binaryAnnotations[%d]", i)
}
res[i] = &s.BAnno[i]
}
return res, nil
}
func (s *span) Timestamp() time.Time {
if s.Time == nil {
return time.Time{}
}
return codec.MicroToTime(*s.Time)
}
func (s *span) Duration() time.Duration {
if s.Dur == nil {
return 0
}
return time.Duration(*s.Dur) * time.Microsecond
}
type annotation struct {
Endpoint *endpoint `json:"endpoint,omitempty"`
Time int64 `json:"timestamp"`
Val string `json:"value,omitempty"`
}
func (a *annotation) Timestamp() time.Time {
return codec.MicroToTime(a.Time)
}
func (a *annotation) Value() string {
return a.Val
}
func (a *annotation) Host() codec.Endpoint {
return a.Endpoint
}
type binaryAnnotation struct {
K string `json:"key"`
V json.RawMessage `json:"value"`
Type string `json:"type"`
Endpoint *endpoint `json:"endpoint,omitempty"`
}
func (b *binaryAnnotation) Key() string {
return b.K
}
func (b *binaryAnnotation) Value() string {
t, err := zipkincore.AnnotationTypeFromString(b.Type)
// Assume this is a string if we cannot tell the type
if err != nil {
t = zipkincore.AnnotationType_STRING
}
switch t {
case zipkincore.AnnotationType_BOOL:
var v bool
err := json.Unmarshal(b.V, &v)
if err == nil {
return strconv.FormatBool(v)
}
case zipkincore.AnnotationType_BYTES:
return string(b.V)
case zipkincore.AnnotationType_I16, zipkincore.AnnotationType_I32, zipkincore.AnnotationType_I64:
var v int64
err := json.Unmarshal(b.V, &v)
if err == nil {
return strconv.FormatInt(v, 10)
}
case zipkincore.AnnotationType_DOUBLE:
var v float64
err := json.Unmarshal(b.V, &v)
if err == nil {
return strconv.FormatFloat(v, 'f', -1, 64)
}
case zipkincore.AnnotationType_STRING:
var v string
err := json.Unmarshal(b.V, &v)
if err == nil {
return v
}
}
return ""
}
func (b *binaryAnnotation) Host() codec.Endpoint {
return b.Endpoint
}
type endpoint struct {
ServiceName string `json:"serviceName"`
Ipv4 string `json:"ipv4"`
Ipv6 string `json:"ipv6,omitempty"`
Port int `json:"port"`
}
func (e *endpoint) Host() string {
if e.Port != 0 {
return fmt.Sprintf("%s:%d", e.Ipv4, e.Port)
}
return e.Ipv4
}
func (e *endpoint) Name() string {
return e.ServiceName
}
// TraceIDFromString creates a TraceID from a hexadecimal string
func TraceIDFromString(s string) (string, error) {
var hi, lo uint64
var err error
if len(s) > 32 {
return "", fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
} else if len(s) > 16 {
hiLen := len(s) - 16
if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
return "", err
}
if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
return "", err
}
} else {
if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
return "", err
}
}
if hi == 0 {
return fmt.Sprintf("%x", lo), nil
}
return fmt.Sprintf("%x%016x", hi, lo), nil
}
// IDFromString creates a decimal id from a hexadecimal string
func IDFromString(s string) (string, error) {
if len(s) > 16 {
return "", fmt.Errorf("ID cannot be longer than 16 hex characters: %s", s)
}
id, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return "", err
}
return strconv.FormatUint(id, 10), nil
}

View File

@ -0,0 +1,920 @@
package jsonV1
import (
"encoding/json"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
)
func TestJSON_Decode(t *testing.T) {
addr := func(i int64) *int64 { return &i }
tests := []struct {
name string
octets []byte
want []codec.Span
wantErr bool
}{
{
name: "bad json is error",
octets: []byte(`
[
{
]`),
wantErr: true,
},
{
name: "Decodes simple trace",
octets: []byte(`
[
{
"traceId": "6b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c"
}
]`),
want: []codec.Span{
&span{
TraceID: "6b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
},
},
},
{
name: "Decodes two spans",
octets: []byte(`
[
{
"traceId": "6b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c"
},
{
"traceId": "6b221d5bc9e6496c",
"name": "get-traces",
"id": "c6946e9cb5d122b6",
"parentId": "6b221d5bc9e6496c",
"duration": 10000
}
]`),
want: []codec.Span{
&span{
TraceID: "6b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
},
&span{
TraceID: "6b221d5bc9e6496c",
SpanName: "get-traces",
ID: "c6946e9cb5d122b6",
ParentID: "6b221d5bc9e6496c",
Dur: addr(10000),
},
},
},
{
name: "Decodes trace with timestamp",
octets: []byte(`
[
{
"traceId": "6b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"timestamp": 1503031538791000
}
]`),
want: []codec.Span{
&span{
TraceID: "6b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
Time: addr(1503031538791000),
},
},
},
{
name: "Decodes simple trace with high and low trace id",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c"
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
},
},
},
{
name: "Error when trace id is null",
octets: []byte(`
[
{
"traceId": null,
"name": "get-traces",
"id": "6b221d5bc9e6496c"
}
]`),
wantErr: true,
},
{
name: "ignore null parentId",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"parentId": null
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
},
},
},
{
name: "ignore null timestamp",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"timestamp": null
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
},
},
},
{
name: "ignore null duration",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"duration": null
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
},
},
},
{
name: "ignore null annotation endpoint",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"annotations": [
{
"timestamp": 1461750491274000,
"value": "cs",
"endpoint": null
}
]
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
Anno: []annotation{
{
Time: 1461750491274000,
Val: "cs",
},
},
},
},
},
{
name: "ignore null binary annotation endpoint",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"key": "lc",
"value": "JDBCSpanStore",
"endpoint": null
}
]
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
BAnno: []binaryAnnotation{
{
K: "lc",
V: json.RawMessage(`"JDBCSpanStore"`),
},
},
},
},
},
{
name: "Error when binary annotation has no key",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"value": "JDBCSpanStore",
"endpoint": null
}
]
}
]`),
wantErr: true,
},
{
name: "Error when binary annotation has no value",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"key": "lc",
"endpoint": null
}
]
}
]`),
wantErr: true,
},
{
name: "binary annotation with endpoint",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"key": "lc",
"value": "JDBCSpanStore",
"endpoint": {
"serviceName": "service",
"port": 65535
}
}
]
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
BAnno: []binaryAnnotation{
{
K: "lc",
V: json.RawMessage(`"JDBCSpanStore"`),
Endpoint: &endpoint{
ServiceName: "service",
Port: 65535,
},
},
},
},
},
},
{
name: "binary annotation with double value",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"key": "num",
"value": 1.23456789,
"type": "DOUBLE"
}
]
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
BAnno: []binaryAnnotation{
{
K: "num",
V: json.RawMessage{0x31, 0x2e, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39},
Type: "DOUBLE",
},
},
},
},
},
{
name: "binary annotation with integer value",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"key": "num",
"value": 1,
"type": "I16"
}
]
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
BAnno: []binaryAnnotation{
{
K: "num",
V: json.RawMessage{0x31},
Type: "I16",
},
},
},
},
},
{
name: "binary annotation with bool value",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"key": "num",
"value": true,
"type": "BOOL"
}
]
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
BAnno: []binaryAnnotation{
{
K: "num",
V: json.RawMessage(`true`),
Type: "BOOL",
},
},
},
},
},
{
name: "binary annotation with bytes value",
octets: []byte(`
[
{
"traceId": "48485a3953bb61246b221d5bc9e6496c",
"name": "get-traces",
"id": "6b221d5bc9e6496c",
"binaryAnnotations": [
{
"key": "num",
"value": "1",
"type": "BYTES"
}
]
}
]`),
want: []codec.Span{
&span{
TraceID: "48485a3953bb61246b221d5bc9e6496c",
SpanName: "get-traces",
ID: "6b221d5bc9e6496c",
BAnno: []binaryAnnotation{
{
K: "num",
V: json.RawMessage(`"1"`),
Type: "BYTES",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
j := &JSON{}
got, err := j.Decode(tt.octets)
if (err != nil) != tt.wantErr {
t.Errorf("JSON.Decode() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(tt.want, got) {
t.Errorf("JSON.Decode() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
}
})
}
}
func Test_span_Trace(t *testing.T) {
tests := []struct {
name string
TraceID string
want string
wantErr bool
}{
{
name: "Trace IDs cannot be null",
TraceID: "",
wantErr: true,
},
{
name: "converts hex string correctly",
TraceID: "deadbeef",
want: "deadbeef",
},
{
name: "converts high and low trace id correctly",
TraceID: "48485a3953bb61246b221d5bc9e6496c",
want: "48485a3953bb61246b221d5bc9e6496c",
},
{
name: "errors when string isn't hex",
TraceID: "oxdeadbeef",
wantErr: true,
},
{
name: "errors when id is too long",
TraceID: "1234567890abcdef1234567890abcdef1",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &span{
TraceID: tt.TraceID,
}
got, err := s.Trace()
if (err != nil) != tt.wantErr {
t.Errorf("span.Trace() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(tt.want, got) {
t.Errorf("span.Trace() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
}
})
}
}
func Test_span_SpanID(t *testing.T) {
tests := []struct {
name string
ID string
want string
wantErr bool
}{
{
name: "Span IDs cannot be null",
ID: "",
wantErr: true,
},
{
name: "converts known id correctly",
ID: "b26412d1ac16767d",
want: "12854419928166856317",
},
{
name: "converts hex string correctly",
ID: "deadbeef",
want: "3735928559",
},
{
name: "errors when string isn't hex",
ID: "oxdeadbeef",
wantErr: true,
},
{
name: "errors when id is too long",
ID: "1234567890abcdef1",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &span{
ID: tt.ID,
}
got, err := s.SpanID()
if (err != nil) != tt.wantErr {
t.Errorf("span.SpanID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(tt.want, got) {
t.Errorf("span.SpanID() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
}
})
}
}
func Test_span_Parent(t *testing.T) {
tests := []struct {
name string
ParentID string
want string
wantErr bool
}{
{
name: "when there is no parent return empty string",
ParentID: "",
want: "",
},
{
name: "converts hex string correctly",
ParentID: "deadbeef",
want: "3735928559",
},
{
name: "errors when string isn't hex",
ParentID: "oxdeadbeef",
wantErr: true,
},
{
name: "errors when parent id is too long",
ParentID: "1234567890abcdef1",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &span{
ParentID: tt.ParentID,
}
got, err := s.Parent()
if (err != nil) != tt.wantErr {
t.Errorf("span.Parent() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(tt.want, got) {
t.Errorf("span.Parent() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
}
})
}
}
func Test_span_Timestamp(t *testing.T) {
tests := []struct {
name string
Time *int64
want time.Time
}{
{
name: "converts to microseconds",
Time: func(i int64) *int64 { return &i }(3000000),
want: time.Unix(3, 0).UTC(),
},
{
name: "nil time should be zero time",
Time: nil,
want: time.Time{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &span{
Time: tt.Time,
}
if got := s.Timestamp(); !cmp.Equal(tt.want, got) {
t.Errorf("span.Timestamp() = got(-)/want(+) %s", cmp.Diff(tt.want, got))
}
})
}
}
func Test_span_Duration(t *testing.T) {
tests := []struct {
name string
dur *int64
want time.Duration
}{
{
name: "converts from 3 microseconds",
dur: func(i int64) *int64 { return &i }(3000000),
want: 3 * time.Second,
},
{
name: "nil time should be zero duration",
dur: nil,
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &span{
Dur: tt.dur,
}
if got := s.Duration(); got != tt.want {
t.Errorf("span.Duration() = %v, want %v", got, tt.want)
}
})
}
}
func Test_annotation(t *testing.T) {
type fields struct {
Endpoint *endpoint
Time int64
Val string
}
tests := []struct {
name string
fields fields
tm time.Time
val string
endpoint *endpoint
}{
{
name: "returns all fields",
fields: fields{
Time: 3000000,
Val: "myvalue",
Endpoint: &endpoint{
ServiceName: "myservice",
Ipv4: "127.0.0.1",
Port: 443,
},
},
tm: time.Unix(3, 0).UTC(),
val: "myvalue",
endpoint: &endpoint{
ServiceName: "myservice",
Ipv4: "127.0.0.1",
Port: 443,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
an := annotation(tt.fields)
a := &an
if got := a.Timestamp(); got != tt.tm {
t.Errorf("annotation.Timestamp() = %v, want %v", got, tt.tm)
}
if got := a.Value(); got != tt.val {
t.Errorf("annotation.Value() = %v, want %v", got, tt.val)
}
if got := a.Host(); !cmp.Equal(tt.endpoint, got) {
t.Errorf("annotation.Endpoint() = %v, want %v", got, tt.endpoint)
}
})
}
}
func Test_binaryAnnotation(t *testing.T) {
type fields struct {
K string
V json.RawMessage
Type string
Endpoint *endpoint
}
tests := []struct {
name string
fields fields
key string
value string
endpoint *endpoint
}{
{
name: "returns all fields",
fields: fields{
K: "key",
V: json.RawMessage(`"value"`),
Endpoint: &endpoint{
ServiceName: "myservice",
Ipv4: "127.0.0.1",
Port: 443,
},
},
key: "key",
value: "value",
endpoint: &endpoint{
ServiceName: "myservice",
Ipv4: "127.0.0.1",
Port: 443,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
bin := binaryAnnotation(tt.fields)
b := &bin
if got := b.Key(); got != tt.key {
t.Errorf("binaryAnnotation.Key() = %v, want %v", got, tt.key)
}
if got := b.Value(); got != tt.value {
t.Errorf("binaryAnnotation.Value() = %v, want %v", got, tt.value)
}
if got := b.Host(); !cmp.Equal(tt.endpoint, got) {
t.Errorf("binaryAnnotation.Endpoint() = %v, want %v", got, tt.endpoint)
}
})
}
}
func Test_endpoint_Host(t *testing.T) {
type fields struct {
Ipv4 string
Port int
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "with port",
fields: fields{
Ipv4: "127.0.0.1",
Port: 443,
},
want: "127.0.0.1:443",
},
{
name: "no port",
fields: fields{
Ipv4: "127.0.0.1",
},
want: "127.0.0.1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &endpoint{
Ipv4: tt.fields.Ipv4,
Port: tt.fields.Port,
}
if got := e.Host(); got != tt.want {
t.Errorf("endpoint.Host() = %v, want %v", got, tt.want)
}
})
}
}
func Test_endpoint_Name(t *testing.T) {
tests := []struct {
name string
ServiceName string
want string
}{
{
name: "has service name",
ServiceName: "myservicename",
want: "myservicename",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := &endpoint{
ServiceName: tt.ServiceName,
}
if got := e.Name(); got != tt.want {
t.Errorf("endpoint.Name() = %v, want %v", got, tt.want)
}
})
}
}
func TestTraceIDFromString(t *testing.T) {
tests := []struct {
name string
s string
want string
wantErr bool
}{
{
name: "Convert hex string id",
s: "6b221d5bc9e6496c",
want: "6b221d5bc9e6496c",
},
{
name: "error : id too long",
s: "1234567890abcdef1234567890abcdef1",
wantErr: true,
},
{
name: "error : not parsable",
s: "howdyhowdyhowdy",
wantErr: true,
},
{
name: "Convert hex string with high/low",
s: "48485a3953bb61246b221d5bc9e6496c",
want: "48485a3953bb61246b221d5bc9e6496c",
},
{
name: "errors in high",
s: "ERR85a3953bb61246b221d5bc9e6496c",
wantErr: true,
},
{
name: "errors in low",
s: "48485a3953bb61246b221d5bc9e64ERR",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := TraceIDFromString(tt.s)
if (err != nil) != tt.wantErr {
t.Errorf("TraceIDFromString() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("TraceIDFromString() = %v, want %v", got, tt.want)
}
})
}
}
func TestIDFromString(t *testing.T) {
tests := []struct {
name string
s string
want string
wantErr bool
}{
{
name: "Convert hex string id",
s: "6b221d5bc9e6496c",
want: "7719764991332993388",
},
{
name: "error : id too long",
s: "1234567890abcdef1",
wantErr: true,
},
{
name: "error : not parsable",
s: "howdyhowdyhowdy",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := IDFromString(tt.s)
if (err != nil) != tt.wantErr {
t.Errorf("IDFromString() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("IDFromString() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -0,0 +1,203 @@
package thrift
import (
"encoding/binary"
"fmt"
"net"
"strconv"
"time"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
)
// UnmarshalThrift converts raw bytes in thrift format to a slice of spans
func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) {
buffer := thrift.NewTMemoryBuffer()
if _, err := buffer.Write(body); err != nil {
return nil, err
}
transport := thrift.NewTBinaryProtocolTransport(buffer)
_, size, err := transport.ReadListBegin()
if err != nil {
return nil, err
}
spans := make([]*zipkincore.Span, size)
for i := 0; i < size; i++ {
zs := &zipkincore.Span{}
if err = zs.Read(transport); err != nil {
return nil, err
}
spans[i] = zs
}
if err = transport.ReadListEnd(); err != nil {
return nil, err
}
return spans, nil
}
// Thrift decodes binary data to create a Trace
type Thrift struct{}
// Decode unmarshals and validates bytes in thrift format
func (t *Thrift) Decode(octets []byte) ([]codec.Span, error) {
spans, err := UnmarshalThrift(octets)
if err != nil {
return nil, err
}
res := make([]codec.Span, len(spans))
for i, s := range spans {
res[i] = &span{s}
}
return res, nil
}
var _ codec.Endpoint = &endpoint{}
type endpoint struct {
*zipkincore.Endpoint
}
func (e *endpoint) Host() string {
ipv4 := func(addr int32) string {
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(addr))
return net.IP(buf).String()
}
if e.Endpoint == nil {
return ipv4(int32(0))
}
if e.Endpoint.GetPort() == 0 {
return ipv4(e.Endpoint.GetIpv4())
}
// Zipkin uses a signed int16 for the port, but, warns us that they actually treat it
// as an unsigned int16. So, we convert from int16 to int32 followed by taking & 0xffff
// to convert from signed to unsigned
// https://github.com/openzipkin/zipkin/blob/57dc2ec9c65fe6144e401c0c933b4400463a69df/zipkin/src/main/java/zipkin/Endpoint.java#L44
return ipv4(e.Endpoint.GetIpv4()) + ":" + strconv.FormatInt(int64(int(e.Endpoint.GetPort())&0xffff), 10)
}
func (e *endpoint) Name() string {
if e.Endpoint == nil {
return codec.DefaultServiceName
}
return e.Endpoint.GetServiceName()
}
var _ codec.BinaryAnnotation = &binaryAnnotation{}
type binaryAnnotation struct {
*zipkincore.BinaryAnnotation
}
func (b *binaryAnnotation) Key() string {
return b.BinaryAnnotation.GetKey()
}
func (b *binaryAnnotation) Value() string {
return string(b.BinaryAnnotation.GetValue())
}
func (b *binaryAnnotation) Host() codec.Endpoint {
if b.BinaryAnnotation.Host == nil {
return nil
}
return &endpoint{b.BinaryAnnotation.Host}
}
var _ codec.Annotation = &annotation{}
type annotation struct {
*zipkincore.Annotation
}
func (a *annotation) Timestamp() time.Time {
ts := a.Annotation.GetTimestamp()
if ts == 0 {
return time.Time{}
}
return codec.MicroToTime(ts)
}
func (a *annotation) Value() string {
return a.Annotation.GetValue()
}
func (a *annotation) Host() codec.Endpoint {
if a.Annotation.Host == nil {
return nil
}
return &endpoint{a.Annotation.Host}
}
var _ codec.Span = &span{}
type span struct {
*zipkincore.Span
}
func (s *span) Trace() (string, error) {
if s.Span.GetTraceIDHigh() == 0 && s.Span.GetTraceID() == 0 {
return "", fmt.Errorf("Span does not have a trace ID")
}
if s.Span.GetTraceIDHigh() == 0 {
return fmt.Sprintf("%x", s.Span.GetTraceID()), nil
}
return fmt.Sprintf("%x%016x", s.Span.GetTraceIDHigh(), s.Span.GetTraceID()), nil
}
func (s *span) SpanID() (string, error) {
return formatID(s.Span.GetID()), nil
}
func (s *span) Parent() (string, error) {
id := s.Span.GetParentID()
if id != 0 {
return formatID(id), nil
}
return "", nil
}
func (s *span) Name() string {
return s.Span.GetName()
}
func (s *span) Annotations() []codec.Annotation {
res := make([]codec.Annotation, len(s.Span.Annotations))
for i := range s.Span.Annotations {
res[i] = &annotation{s.Span.Annotations[i]}
}
return res
}
func (s *span) BinaryAnnotations() ([]codec.BinaryAnnotation, error) {
res := make([]codec.BinaryAnnotation, len(s.Span.BinaryAnnotations))
for i := range s.Span.BinaryAnnotations {
res[i] = &binaryAnnotation{s.Span.BinaryAnnotations[i]}
}
return res, nil
}
func (s *span) Timestamp() time.Time {
ts := s.Span.GetTimestamp()
if ts == 0 {
return time.Time{}
}
return codec.MicroToTime(ts)
}
func (s *span) Duration() time.Duration {
return time.Duration(s.Span.GetDuration()) * time.Microsecond
}
func formatID(id int64) string {
return strconv.FormatInt(id, 10)
}

View File

@ -0,0 +1,211 @@
package thrift
import (
"io/ioutil"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
)
func Test_endpointHost(t *testing.T) {
type args struct {
h *zipkincore.Endpoint
}
tests := []struct {
name string
args args
want string
}{
{
name: "Host Found",
args: args{
h: &zipkincore.Endpoint{
Ipv4: 1234,
Port: 8888,
},
},
want: "0.0.4.210:8888",
},
{
name: "No Host",
args: args{
h: nil,
},
want: "0.0.0.0",
},
{
name: "int overflow zipkin uses an int16 type as an unsigned int 16.",
args: args{
h: &zipkincore.Endpoint{
Ipv4: 1234,
Port: -1,
},
},
want: "0.0.4.210:65535",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := endpoint{tt.args.h}
if got := e.Host(); got != tt.want {
t.Errorf("host() = %v, want %v", got, tt.want)
}
})
}
}
func Test_endpointName(t *testing.T) {
type args struct {
h *zipkincore.Endpoint
}
tests := []struct {
name string
args args
want string
}{
{
name: "Found ServiceName",
args: args{
h: &zipkincore.Endpoint{
ServiceName: "zipkin",
},
},
want: "zipkin",
},
{
name: "No ServiceName",
args: args{
h: nil,
},
want: "unknown",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := endpoint{tt.args.h}
if got := e.Name(); got != tt.want {
t.Errorf("serviceName() = %v, want %v", got, tt.want)
}
})
}
}
func TestUnmarshalThrift(t *testing.T) {
addr := func(i int64) *int64 { return &i }
tests := []struct {
name string
filename string
want []*zipkincore.Span
wantErr bool
}{
{
name: "threespans",
filename: "../../testdata/threespans.dat",
want: []*zipkincore.Span{
{
TraceID: 2505404965370368069,
Name: "Child",
ID: 8090652509916334619,
ParentID: addr(22964302721410078),
Timestamp: addr(1498688360851331),
Duration: addr(53106),
Annotations: []*zipkincore.Annotation{},
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
&zipkincore.BinaryAnnotation{
Key: "lc",
AnnotationType: zipkincore.AnnotationType_STRING,
Value: []byte("trivial"),
Host: &zipkincore.Endpoint{
Ipv4: 2130706433,
ServiceName: "trivial",
},
},
},
},
{
TraceID: 2505404965370368069,
Name: "Child",
ID: 103618986556047333,
ParentID: addr(22964302721410078),
Timestamp: addr(1498688360904552),
Duration: addr(50410),
Annotations: []*zipkincore.Annotation{},
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
&zipkincore.BinaryAnnotation{
Key: "lc",
AnnotationType: zipkincore.AnnotationType_STRING,
Value: []byte("trivial"),
Host: &zipkincore.Endpoint{
Ipv4: 2130706433,
ServiceName: "trivial",
},
},
},
},
{
TraceID: 2505404965370368069,
Name: "Parent",
ID: 22964302721410078,
Timestamp: addr(1498688360851318),
Duration: addr(103680),
Annotations: []*zipkincore.Annotation{
&zipkincore.Annotation{
Timestamp: 1498688360851325,
Value: "Starting child #0",
Host: &zipkincore.Endpoint{
Ipv4: 2130706433,
ServiceName: "trivial",
},
},
&zipkincore.Annotation{
Timestamp: 1498688360904545,
Value: "Starting child #1",
Host: &zipkincore.Endpoint{
Ipv4: 2130706433,
ServiceName: "trivial",
},
},
&zipkincore.Annotation{
Timestamp: 1498688360954992,
Value: "A Log",
Host: &zipkincore.Endpoint{
Ipv4: 2130706433,
ServiceName: "trivial",
},
},
},
BinaryAnnotations: []*zipkincore.BinaryAnnotation{
&zipkincore.BinaryAnnotation{
Key: "lc",
AnnotationType: zipkincore.AnnotationType_STRING,
Value: []byte("trivial"),
Host: &zipkincore.Endpoint{
Ipv4: 2130706433,
ServiceName: "trivial",
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dat, err := ioutil.ReadFile(tt.filename)
if err != nil {
t.Fatalf("Could not find file %s\n", tt.filename)
}
got, err := UnmarshalThrift(dat)
if (err != nil) != tt.wantErr {
t.Errorf("UnmarshalThrift() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !cmp.Equal(tt.want, got) {
t.Errorf("UnmarshalThrift() got(-)/want(+): %s", cmp.Diff(tt.want, got))
}
})
}
}

View File

@ -1,22 +1,10 @@
package zipkin
import (
"encoding/binary"
"fmt"
"net"
"strconv"
"time"
"github.com/influxdata/telegraf"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
)
// DefaultServiceName when the span does not have any serviceName
const DefaultServiceName = "unknown"
//now is a moackable time for now
var now = time.Now
// LineProtocolConverter implements the Recorder interface; it is a
// type meant to encapsulate the storage of zipkin tracing data in
// telegraf as line protocol.
@ -35,7 +23,7 @@ func NewLineProtocolConverter(acc telegraf.Accumulator) *LineProtocolConverter {
// Record is LineProtocolConverter's implementation of the Record method of
// the Recorder interface; it takes a trace as input, and adds it to an internal
// telegraf.Accumulator.
func (l *LineProtocolConverter) Record(t Trace) error {
func (l *LineProtocolConverter) Record(t trace.Trace) error {
for _, s := range t {
fields := map[string]interface{}{
"duration_ns": s.Duration.Nanoseconds(),
@ -83,167 +71,3 @@ func (l *LineProtocolConverter) Record(t Trace) error {
func (l *LineProtocolConverter) Error(err error) {
l.acc.AddError(err)
}
// NewTrace converts a slice of []*zipkincore.Spans into a new Trace
func NewTrace(spans []*zipkincore.Span) Trace {
trace := make(Trace, len(spans))
for i, span := range spans {
endpoint := serviceEndpoint(span.GetAnnotations(), span.GetBinaryAnnotations())
trace[i] = Span{
ID: formatID(span.GetID()),
TraceID: formatTraceID(span.GetTraceIDHigh(), span.GetTraceID()),
Name: span.GetName(),
Timestamp: guessTimestamp(span),
Duration: convertDuration(span),
ParentID: parentID(span),
ServiceName: serviceName(endpoint),
Annotations: NewAnnotations(span.GetAnnotations(), endpoint),
BinaryAnnotations: NewBinaryAnnotations(span.GetBinaryAnnotations(), endpoint),
}
}
return trace
}
// NewAnnotations converts a slice of *zipkincore.Annotation into a slice
// of new Annotations
func NewAnnotations(annotations []*zipkincore.Annotation, endpoint *zipkincore.Endpoint) []Annotation {
formatted := make([]Annotation, len(annotations))
for i, annotation := range annotations {
formatted[i] = Annotation{
Host: host(endpoint),
ServiceName: serviceName(endpoint),
Timestamp: microToTime(annotation.GetTimestamp()),
Value: annotation.GetValue(),
}
}
return formatted
}
// NewBinaryAnnotations is very similar to NewAnnotations, but it
// converts zipkincore.BinaryAnnotations instead of the normal zipkincore.Annotation
func NewBinaryAnnotations(annotations []*zipkincore.BinaryAnnotation, endpoint *zipkincore.Endpoint) []BinaryAnnotation {
formatted := make([]BinaryAnnotation, len(annotations))
for i, annotation := range annotations {
formatted[i] = BinaryAnnotation{
Host: host(endpoint),
ServiceName: serviceName(endpoint),
Key: annotation.GetKey(),
Value: string(annotation.GetValue()),
Type: annotation.GetAnnotationType().String(),
}
}
return formatted
}
func microToTime(micro int64) time.Time {
return time.Unix(0, micro*int64(time.Microsecond)).UTC()
}
func formatID(id int64) string {
return strconv.FormatInt(id, 10)
}
func formatTraceID(high, low int64) string {
if high == 0 {
return fmt.Sprintf("%x", low)
}
return fmt.Sprintf("%x%016x", high, low)
}
func minMax(span *zipkincore.Span) (time.Time, time.Time) {
min := now().UTC()
max := time.Time{}.UTC()
for _, annotation := range span.Annotations {
ts := microToTime(annotation.GetTimestamp())
if !ts.IsZero() && ts.Before(min) {
min = ts
}
if !ts.IsZero() && ts.After(max) {
max = ts
}
}
if max.IsZero() {
max = min
}
return min, max
}
func guessTimestamp(span *zipkincore.Span) time.Time {
if span.GetTimestamp() != 0 {
return microToTime(span.GetTimestamp())
}
min, _ := minMax(span)
return min
}
func convertDuration(span *zipkincore.Span) time.Duration {
duration := time.Duration(span.GetDuration()) * time.Microsecond
if duration != 0 {
return duration
}
min, max := minMax(span)
return max.Sub(min)
}
func parentID(span *zipkincore.Span) string {
// A parent ID of 0 means that this is a parent span. In this case,
// we set the parent ID of the span to be its own id, so it points to
// itself.
id := span.GetParentID()
if id != 0 {
return formatID(id)
}
return formatID(span.ID)
}
func ipv4(addr int32) string {
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(addr))
return net.IP(buf).String()
}
func host(h *zipkincore.Endpoint) string {
if h == nil {
return ipv4(int32(0))
}
if h.GetPort() == 0 {
return ipv4(h.GetIpv4())
}
// Zipkin uses a signed int16 for the port, but, warns us that they actually treat it
// as an unsigned int16. So, we convert from int16 to int32 followed by taking & 0xffff
// to convert from signed to unsigned
// https://github.com/openzipkin/zipkin/blob/57dc2ec9c65fe6144e401c0c933b4400463a69df/zipkin/src/main/java/zipkin/Endpoint.java#L44
return ipv4(h.GetIpv4()) + ":" + strconv.FormatInt(int64(int(h.GetPort())&0xffff), 10)
}
func serviceName(h *zipkincore.Endpoint) string {
if h == nil {
return DefaultServiceName
}
return h.GetServiceName()
}
func serviceEndpoint(ann []*zipkincore.Annotation, bann []*zipkincore.BinaryAnnotation) *zipkincore.Endpoint {
for _, a := range ann {
switch a.Value {
case zipkincore.SERVER_RECV, zipkincore.SERVER_SEND, zipkincore.CLIENT_RECV, zipkincore.CLIENT_SEND:
if a.Host != nil && a.Host.ServiceName != "" {
return a.Host
}
}
}
for _, a := range bann {
if a.Key == zipkincore.LOCAL_COMPONENT && a.Host != nil && a.Host.ServiceName != "" {
return a.Host
}
}
// Unable to find any "standard" endpoint host, so, use any that exist in the regular annotations
for _, a := range ann {
if a.Host != nil && a.Host.ServiceName != "" {
return a.Host
}
}
return nil
}

View File

@ -1,14 +1,13 @@
package zipkin
import (
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
"github.com/influxdata/telegraf/testutil"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
)
func TestLineProtocolConverter_Record(t *testing.T) {
@ -17,7 +16,7 @@ func TestLineProtocolConverter_Record(t *testing.T) {
acc telegraf.Accumulator
}
type args struct {
t Trace
t trace.Trace
}
tests := []struct {
name string
@ -32,8 +31,8 @@ func TestLineProtocolConverter_Record(t *testing.T) {
acc: &mockAcc,
},
args: args{
t: Trace{
Span{
t: trace.Trace{
{
ID: "8090652509916334619",
TraceID: "2505404965370368069",
Name: "Child",
@ -41,18 +40,17 @@ func TestLineProtocolConverter_Record(t *testing.T) {
Timestamp: time.Unix(0, 1498688360851331000).UTC(),
Duration: time.Duration(53106) * time.Microsecond,
ServiceName: "trivial",
Annotations: []Annotation{},
BinaryAnnotations: []BinaryAnnotation{
BinaryAnnotation{
Annotations: []trace.Annotation{},
BinaryAnnotations: []trace.BinaryAnnotation{
{
Key: "lc",
Value: "dHJpdmlhbA==",
Host: "2130706433:0",
ServiceName: "trivial",
Type: "STRING",
},
},
},
Span{
{
ID: "103618986556047333",
TraceID: "2505404965370368069",
Name: "Child",
@ -60,18 +58,17 @@ func TestLineProtocolConverter_Record(t *testing.T) {
Timestamp: time.Unix(0, 1498688360904552000).UTC(),
Duration: time.Duration(50410) * time.Microsecond,
ServiceName: "trivial",
Annotations: []Annotation{},
BinaryAnnotations: []BinaryAnnotation{
BinaryAnnotation{
Annotations: []trace.Annotation{},
BinaryAnnotations: []trace.BinaryAnnotation{
{
Key: "lc",
Value: "dHJpdmlhbA==",
Host: "2130706433:0",
ServiceName: "trivial",
Type: "STRING",
},
},
},
Span{
{
ID: "22964302721410078",
TraceID: "2505404965370368069",
Name: "Parent",
@ -79,33 +76,32 @@ func TestLineProtocolConverter_Record(t *testing.T) {
Timestamp: time.Unix(0, 1498688360851318000).UTC(),
Duration: time.Duration(103680) * time.Microsecond,
ServiceName: "trivial",
Annotations: []Annotation{
Annotation{
Annotations: []trace.Annotation{
{
Timestamp: time.Unix(0, 1498688360851325000).UTC(),
Value: "Starting child #0",
Host: "2130706433:0",
ServiceName: "trivial",
},
Annotation{
{
Timestamp: time.Unix(0, 1498688360904545000).UTC(),
Value: "Starting child #1",
Host: "2130706433:0",
ServiceName: "trivial",
},
Annotation{
{
Timestamp: time.Unix(0, 1498688360954992000).UTC(),
Value: "A Log",
Host: "2130706433:0",
ServiceName: "trivial",
},
},
BinaryAnnotations: []BinaryAnnotation{
BinaryAnnotation{
BinaryAnnotations: []trace.BinaryAnnotation{
{
Key: "lc",
Value: "dHJpdmlhbA==",
Host: "2130706433:0",
ServiceName: "trivial",
Type: "STRING",
},
},
},
@ -265,8 +261,8 @@ func TestLineProtocolConverter_Record(t *testing.T) {
acc: &mockAcc,
},
args: args{
t: Trace{
Span{
t: trace.Trace{
{
ID: "6802735349851856000",
TraceID: "0:6802735349851856000",
Name: "main.dud",
@ -274,15 +270,15 @@ func TestLineProtocolConverter_Record(t *testing.T) {
Timestamp: time.Unix(1, 0).UTC(),
Duration: 1,
ServiceName: "trivial",
Annotations: []Annotation{
Annotation{
Annotations: []trace.Annotation{
{
Timestamp: time.Unix(0, 1433330263415871000).UTC(),
Value: "cs",
Host: "0:9410",
ServiceName: "go-zipkin-testclient",
},
},
BinaryAnnotations: []BinaryAnnotation{},
BinaryAnnotations: []trace.BinaryAnnotation{},
},
},
},
@ -339,206 +335,3 @@ func TestLineProtocolConverter_Record(t *testing.T) {
})
}
}
func Test_microToTime(t *testing.T) {
type args struct {
micro int64
}
tests := []struct {
name string
args args
want time.Time
}{
{
name: "given zero micro seconds expected unix time zero",
args: args{
micro: 0,
},
want: time.Unix(0, 0).UTC(),
},
{
name: "given a million micro seconds expected unix time one",
args: args{
micro: 1000000,
},
want: time.Unix(1, 0).UTC(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := microToTime(tt.args.micro); !reflect.DeepEqual(got, tt.want) {
t.Errorf("microToTime() = %v, want %v", got, tt.want)
}
})
}
}
func newAnnotation(micro int64) *zipkincore.Annotation {
return &zipkincore.Annotation{
Timestamp: micro,
}
}
func Test_minMax(t *testing.T) {
type args struct {
span *zipkincore.Span
}
tests := []struct {
name string
args args
now func() time.Time
wantMin time.Time
wantMax time.Time
}{
{
name: "Single annotation",
args: args{
span: &zipkincore.Span{
Annotations: []*zipkincore.Annotation{
newAnnotation(1000000),
},
},
},
wantMin: time.Unix(1, 0).UTC(),
wantMax: time.Unix(1, 0).UTC(),
},
{
name: "Three annotations",
args: args{
span: &zipkincore.Span{
Annotations: []*zipkincore.Annotation{
newAnnotation(1000000),
newAnnotation(2000000),
newAnnotation(3000000),
},
},
},
wantMin: time.Unix(1, 0).UTC(),
wantMax: time.Unix(3, 0).UTC(),
},
{
name: "Annotations are in the future",
args: args{
span: &zipkincore.Span{
Annotations: []*zipkincore.Annotation{
newAnnotation(3000000),
},
},
},
wantMin: time.Unix(2, 0).UTC(),
wantMax: time.Unix(3, 0).UTC(),
now: func() time.Time {
return time.Unix(2, 0).UTC()
},
},
{
name: "No Annotations",
args: args{
span: &zipkincore.Span{
Annotations: []*zipkincore.Annotation{},
},
},
wantMin: time.Unix(2, 0).UTC(),
wantMax: time.Unix(2, 0).UTC(),
now: func() time.Time {
return time.Unix(2, 0).UTC()
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.now != nil {
now = tt.now
}
got, got1 := minMax(tt.args.span)
if !reflect.DeepEqual(got, tt.wantMin) {
t.Errorf("minMax() got = %v, want %v", got, tt.wantMin)
}
if !reflect.DeepEqual(got1, tt.wantMax) {
t.Errorf("minMax() got1 = %v, want %v", got1, tt.wantMax)
}
now = time.Now
})
}
}
func Test_host(t *testing.T) {
type args struct {
h *zipkincore.Endpoint
}
tests := []struct {
name string
args args
want string
}{
{
name: "Host Found",
args: args{
h: &zipkincore.Endpoint{
Ipv4: 1234,
Port: 8888,
},
},
want: "0.0.4.210:8888",
},
{
name: "No Host",
args: args{
h: nil,
},
want: "0.0.0.0",
},
{
name: "int overflow zipkin uses an int16 type as an unsigned int 16.",
args: args{
h: &zipkincore.Endpoint{
Ipv4: 1234,
Port: -1,
},
},
want: "0.0.4.210:65535",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := host(tt.args.h); got != tt.want {
t.Errorf("host() = %v, want %v", got, tt.want)
}
})
}
}
func Test_serviceName(t *testing.T) {
type args struct {
h *zipkincore.Endpoint
}
tests := []struct {
name string
args args
want string
}{
{
name: "Found ServiceName",
args: args{
h: &zipkincore.Endpoint{
ServiceName: "zipkin",
},
},
want: "zipkin",
},
{
name: "No ServiceName",
args: args{
h: nil,
},
want: "unknown",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := serviceName(tt.args.h); got != tt.want {
t.Errorf("serviceName() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -2,22 +2,23 @@ package zipkin
import (
"compress/gzip"
"fmt"
"io/ioutil"
"mime"
"net/http"
"strings"
"sync"
"github.com/apache/thrift/lib/go/thrift"
"github.com/gorilla/mux"
"github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/jsonV1"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift"
)
// SpanHandler is an implementation of a Handler which accepts zipkin thrift
// span data and sends it to the recorder
type SpanHandler struct {
Path string
recorder Recorder
waitGroup *sync.WaitGroup
Path string
recorder Recorder
}
// NewSpanHandler returns a new server instance given path to handle
@ -81,6 +82,12 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
defer body.Close()
}
decoder, err := ContentDecoder(r)
if err != nil {
s.recorder.Error(err)
w.WriteHeader(http.StatusUnsupportedMediaType)
}
octets, err := ioutil.ReadAll(body)
if err != nil {
s.recorder.Error(err)
@ -88,14 +95,19 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
return
}
spans, err := unmarshalThrift(octets)
spans, err := decoder.Decode(octets)
if err != nil {
s.recorder.Error(err)
w.WriteHeader(http.StatusInternalServerError)
w.WriteHeader(http.StatusBadRequest)
return
}
trace := NewTrace(spans)
trace, err := codec.NewTrace(spans)
if err != nil {
s.recorder.Error(err)
w.WriteHeader(http.StatusBadRequest)
return
}
if err = s.recorder.Record(trace); err != nil {
s.recorder.Error(err)
@ -106,30 +118,25 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
func unmarshalThrift(body []byte) ([]*zipkincore.Span, error) {
buffer := thrift.NewTMemoryBuffer()
if _, err := buffer.Write(body); err != nil {
return nil, err
// ContentDecoer returns a Decoder that is able to produce Traces from bytes.
// Failure should yield an HTTP 415 (`http.StatusUnsupportedMediaType`)
// If a Content-Type is not set, zipkin assumes application/json
func ContentDecoder(r *http.Request) (codec.Decoder, error) {
contentType := r.Header.Get("Content-Type")
if contentType == "" {
return &jsonV1.JSON{}, nil
}
transport := thrift.NewTBinaryProtocolTransport(buffer)
_, size, err := transport.ReadListBegin()
if err != nil {
return nil, err
}
spans := make([]*zipkincore.Span, size)
for i := 0; i < size; i++ {
zs := &zipkincore.Span{}
if err = zs.Read(transport); err != nil {
return nil, err
for _, v := range strings.Split(contentType, ",") {
t, _, err := mime.ParseMediaType(v)
if err != nil {
break
}
if t == "application/json" {
return &jsonV1.JSON{}, nil
} else if t == "application/x-thrift" {
return &thrift.Thrift{}, nil
}
spans[i] = zs
}
if err = transport.ReadListEnd(); err != nil {
return nil, err
}
return spans, nil
return nil, fmt.Errorf("Unknown Content-Type: %s", contentType)
}

View File

@ -10,14 +10,15 @@ import (
"time"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
)
type MockRecorder struct {
Data Trace
Data trace.Trace
Err error
}
func (m *MockRecorder) Record(t Trace) error {
func (m *MockRecorder) Record(t trace.Trace) error {
m.Data = t
return nil
}
@ -39,6 +40,7 @@ func TestSpanHandler(t *testing.T) {
ioutil.NopCloser(
bytes.NewReader(dat)))
r.Header.Set("Content-Type", "application/x-thrift")
handler := NewSpanHandler("/api/v1/spans")
mockRecorder := &MockRecorder{}
handler.recorder = mockRecorder
@ -51,8 +53,8 @@ func TestSpanHandler(t *testing.T) {
got := mockRecorder.Data
parentID := strconv.FormatInt(22964302721410078, 10)
want := Trace{
Span{
want := trace.Trace{
{
Name: "Child",
ID: "8090652509916334619",
TraceID: "22c4fc8ab3669045",
@ -60,18 +62,17 @@ func TestSpanHandler(t *testing.T) {
Timestamp: time.Unix(0, 1498688360851331*int64(time.Microsecond)).UTC(),
Duration: time.Duration(53106) * time.Microsecond,
ServiceName: "trivial",
Annotations: []Annotation{},
BinaryAnnotations: []BinaryAnnotation{
BinaryAnnotation{
Annotations: []trace.Annotation{},
BinaryAnnotations: []trace.BinaryAnnotation{
{
Key: "lc",
Value: "trivial",
Host: "127.0.0.1",
ServiceName: "trivial",
Type: "STRING",
},
},
},
Span{
{
Name: "Child",
ID: "103618986556047333",
TraceID: "22c4fc8ab3669045",
@ -79,18 +80,17 @@ func TestSpanHandler(t *testing.T) {
Timestamp: time.Unix(0, 1498688360904552*int64(time.Microsecond)).UTC(),
Duration: time.Duration(50410) * time.Microsecond,
ServiceName: "trivial",
Annotations: []Annotation{},
BinaryAnnotations: []BinaryAnnotation{
BinaryAnnotation{
Annotations: []trace.Annotation{},
BinaryAnnotations: []trace.BinaryAnnotation{
{
Key: "lc",
Value: "trivial",
Host: "127.0.0.1",
ServiceName: "trivial",
Type: "STRING",
},
},
},
Span{
{
Name: "Parent",
ID: "22964302721410078",
TraceID: "22c4fc8ab3669045",
@ -98,33 +98,32 @@ func TestSpanHandler(t *testing.T) {
Timestamp: time.Unix(0, 1498688360851318*int64(time.Microsecond)).UTC(),
Duration: time.Duration(103680) * time.Microsecond,
ServiceName: "trivial",
Annotations: []Annotation{
Annotation{
Annotations: []trace.Annotation{
{
Timestamp: time.Unix(0, 1498688360851325*int64(time.Microsecond)).UTC(),
Value: "Starting child #0",
Host: "127.0.0.1",
ServiceName: "trivial",
},
Annotation{
{
Timestamp: time.Unix(0, 1498688360904545*int64(time.Microsecond)).UTC(),
Value: "Starting child #1",
Host: "127.0.0.1",
ServiceName: "trivial",
},
Annotation{
{
Timestamp: time.Unix(0, 1498688360954992*int64(time.Microsecond)).UTC(),
Value: "A Log",
Host: "127.0.0.1",
ServiceName: "trivial",
},
},
BinaryAnnotations: []BinaryAnnotation{
BinaryAnnotation{
BinaryAnnotations: []trace.BinaryAnnotation{
{
Key: "lc",
Value: "trivial",
Host: "127.0.0.1",
ServiceName: "trivial",
Type: "STRING",
},
},
},

View File

@ -0,0 +1,188 @@
[
{
"traceId": "7312f822d43d0fd8",
"id": "b26412d1ac16767d",
"name": "http:/hi2",
"parentId": "7312f822d43d0fd8",
"annotations": [
{
"timestamp": 1503031538791000,
"value": "sr",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"timestamp": 1503031538794000,
"value": "ss",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
}
],
"binaryAnnotations": [
{
"key": "mvc.controller.class",
"value": "Demo2Application",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "mvc.controller.method",
"value": "hi2",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "spring.instance_id",
"value": "192.168.0.8:test:8010",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
}
]
},
{
"traceId": "7312f822d43d0fd8",
"id": "b26412d1ac16767d",
"name": "http:/hi2",
"parentId": "7312f822d43d0fd8",
"timestamp": 1503031538786000,
"duration": 10000,
"annotations": [
{
"timestamp": 1503031538786000,
"value": "cs",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"timestamp": 1503031538796000,
"value": "cr",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
}
],
"binaryAnnotations": [
{
"key": "http.host",
"value": "localhost",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "http.method",
"value": "GET",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "http.path",
"value": "/hi2",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "http.url",
"value": "http://localhost:8010/hi2",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "spring.instance_id",
"value": "192.168.0.8:test:8010",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
}
]
},
{
"traceId": "7312f822d43d0fd8",
"id": "7312f822d43d0fd8",
"name": "http:/hi",
"timestamp": 1503031538778000,
"duration": 23393,
"annotations": [
{
"timestamp": 1503031538778000,
"value": "sr",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"timestamp": 1503031538801000,
"value": "ss",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
}
],
"binaryAnnotations": [
{
"key": "mvc.controller.class",
"value": "Demo2Application",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "mvc.controller.method",
"value": "hi",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
},
{
"key": "spring.instance_id",
"value": "192.168.0.8:test:8010",
"endpoint": {
"serviceName": "test",
"ipv4": "192.168.0.8",
"port": 8010
}
}
]
}
]

View File

@ -0,0 +1,41 @@
package trace
import (
"time"
)
// Trace is an array (or a series) of spans
type Trace []Span
//Span represents a specific zipkin span. It holds the majority of the same
// data as a zipkin span sent via the thrift protocol, but is presented in a
// format which is more straightforward for storage purposes.
type Span struct {
ID string
TraceID string // zipkin traceid high concat with traceid
Name string
ParentID string
ServiceName string
Timestamp time.Time // If zipkin input is nil then time.Now()
Duration time.Duration
Annotations []Annotation
BinaryAnnotations []BinaryAnnotation
}
// BinaryAnnotation represents a zipkin binary annotation. It contains
// all of the same fields as might be found in its zipkin counterpart.
type BinaryAnnotation struct {
Key string
Value string
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
ServiceName string
}
// Annotation represents an ordinary zipkin annotation. It contains the data fields
// which will become fields/tags in influxdb
type Annotation struct {
Timestamp time.Time
Value string
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
ServiceName string
}

View File

@ -8,11 +8,11 @@ import (
"net/http"
"strconv"
"sync"
"time"
"github.com/gorilla/mux"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/inputs/zipkin/trace"
)
const (
@ -32,7 +32,7 @@ const (
// Recorder represents a type which can record zipkin trace data as well as
// any accompanying errors, and process that data.
type Recorder interface {
Record(Trace) error
Record(trace.Trace) error
Error(error)
}
@ -42,43 +42,6 @@ type Handler interface {
Register(router *mux.Router, recorder Recorder) error
}
// BinaryAnnotation represents a zipkin binary annotation. It contains
// all of the same fields as might be found in its zipkin counterpart.
type BinaryAnnotation struct {
Key string
Value string
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
ServiceName string
Type string
}
// Annotation represents an ordinary zipkin annotation. It contains the data fields
// which will become fields/tags in influxdb
type Annotation struct {
Timestamp time.Time
Value string
Host string // annotation.endpoint.ipv4 + ":" + annotation.endpoint.port
ServiceName string
}
//Span represents a specific zipkin span. It holds the majority of the same
// data as a zipkin span sent via the thrift protocol, but is presented in a
// format which is more straightforward for storage purposes.
type Span struct {
ID string
TraceID string // zipkin traceid high concat with traceid
Name string
ParentID string
ServiceName string
Timestamp time.Time // If zipkin input is nil then time.Now()
Duration time.Duration
Annotations []Annotation
BinaryAnnotations []BinaryAnnotation
}
// Trace is an array (or a series) of spans
type Trace []Span
const sampleConfig = `
# path = "/api/v1/spans" # URL path for span data
# port = 9411 # Port on which Telegraf listens
@ -122,7 +85,9 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error {
router := mux.NewRouter()
converter := NewLineProtocolConverter(acc)
z.handler.Register(router, converter)
if err := z.handler.Register(router, converter); err != nil {
return err
}
z.server = &http.Server{
Handler: router,

View File

@ -16,14 +16,16 @@ func TestZipkinPlugin(t *testing.T) {
mockAcc := testutil.Accumulator{}
tests := []struct {
name string
thriftDataFile string //path name to a binary thrift data file which contains test data
wantErr bool
want []testutil.Metric
name string
datafile string // data file which contains test data
contentType string
wantErr bool
want []testutil.Metric
}{
{
name: "threespan",
thriftDataFile: "testdata/threespans.dat",
name: "threespan",
datafile: "testdata/threespans.dat",
contentType: "application/x-thrift",
want: []testutil.Metric{
testutil.Metric{
Measurement: "zipkin",
@ -170,8 +172,9 @@ func TestZipkinPlugin(t *testing.T) {
wantErr: false,
},
{
name: "distributed_trace_sample",
thriftDataFile: "testdata/distributed_trace_sample.dat",
name: "distributed_trace_sample",
datafile: "testdata/distributed_trace_sample.dat",
contentType: "application/x-thrift",
want: []testutil.Metric{
testutil.Metric{
Measurement: "zipkin",
@ -185,7 +188,6 @@ func TestZipkinPlugin(t *testing.T) {
Fields: map[string]interface{}{
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
},
//Time: time.Unix(1, 0).UTC(),
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
},
testutil.Metric{
@ -202,7 +204,6 @@ func TestZipkinPlugin(t *testing.T) {
Fields: map[string]interface{}{
"duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(),
},
//Time: time.Unix(1, 0).UTC(),
Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(),
},
testutil.Metric{
@ -223,6 +224,337 @@ func TestZipkinPlugin(t *testing.T) {
},
},
},
{
name: "JSON rather than thrift",
datafile: "testdata/json/brave-tracer-example.json",
contentType: "application/json",
want: []testutil.Metric{
{
Measurement: "zipkin",
Tags: map[string]string{
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(3000000),
}, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "sr",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "ss",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "Demo2Application",
"annotation_key": "mvc.controller.class",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "hi2",
"annotation_key": "mvc.controller.method",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "192.168.0.8:test:8010",
"annotation_key": "spring.instance_id",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(3000000),
},
Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "cs",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "cr",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "localhost",
"annotation_key": "http.host",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "GET",
"annotation_key": "http.method",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "/hi2",
"annotation_key": "http.path",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "http://localhost:8010/hi2",
"annotation_key": "http.url",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "192.168.0.8:test:8010",
"annotation_key": "spring.instance_id",
"endpoint_host": "192.168.0.8:8010",
"id": "12854419928166856317",
"name": "http:/hi2",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(10000000),
},
Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"id": "8291962692415852504",
"name": "http:/hi",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
},
{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "sr",
"endpoint_host": "192.168.0.8:8010",
"id": "8291962692415852504",
"name": "http:/hi",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
},
testutil.Metric{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "ss",
"endpoint_host": "192.168.0.8:8010",
"id": "8291962692415852504",
"name": "http:/hi",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
},
testutil.Metric{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "Demo2Application",
"annotation_key": "mvc.controller.class",
"endpoint_host": "192.168.0.8:8010",
"id": "8291962692415852504",
"name": "http:/hi",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
},
testutil.Metric{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "hi",
"annotation_key": "mvc.controller.method",
"endpoint_host": "192.168.0.8:8010",
"id": "8291962692415852504",
"name": "http:/hi",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
},
testutil.Metric{
Measurement: "zipkin",
Tags: map[string]string{
"annotation": "192.168.0.8:test:8010",
"annotation_key": "spring.instance_id",
"endpoint_host": "192.168.0.8:8010",
"id": "8291962692415852504",
"name": "http:/hi",
"parent_id": "8291962692415852504",
"service_name": "test",
"trace_id": "7312f822d43d0fd8",
},
Fields: map[string]interface{}{
"duration_ns": int64(23393000),
},
Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(),
},
},
},
}
z := &Zipkin{
@ -240,7 +572,7 @@ func TestZipkinPlugin(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mockAcc.ClearMetrics()
if err := postThriftData(tt.thriftDataFile, z.address); err != nil {
if err := postThriftData(tt.datafile, z.address, tt.contentType); err != nil {
t.Fatalf("Posting data to http endpoint /api/v1/spans failed. Error: %s\n", err)
}
mockAcc.Wait(len(tt.want)) //Since the server is running concurrently, we need to wait for the number of data points we want to test to be added to the Accumulator.
@ -252,7 +584,6 @@ func TestZipkinPlugin(t *testing.T) {
for _, m := range mockAcc.Metrics {
got = append(got, *m)
}
if !cmp.Equal(tt.want, got) {
t.Fatalf("Got != Want\n %s", cmp.Diff(tt.want, got))
}
@ -266,19 +597,18 @@ func TestZipkinPlugin(t *testing.T) {
}
}
func postThriftData(datafile, address string) error {
func postThriftData(datafile, address, contentType string) error {
dat, err := ioutil.ReadFile(datafile)
if err != nil {
return fmt.Errorf("could not read from data file %s", datafile)
}
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/v1/spans", address), bytes.NewReader(dat))
if err != nil {
return fmt.Errorf("HTTP request creation failed")
}
req.Header.Set("Content-Type", "application/x-thrift")
req.Header.Set("Content-Type", contentType)
client := &http.Client{}
_, err = client.Do(req)
if err != nil {

View File

@ -48,6 +48,9 @@ func (a *Amon) Connect() error {
return fmt.Errorf("serverkey and amon_instance are required fields for amon output")
}
a.client = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
Timeout: a.Timeout.Duration,
}
return nil

View File

@ -193,6 +193,25 @@ func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum {
continue
}
// Do CloudWatch boundary checking
// Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html
if math.IsNaN(value) {
datums = datums[:len(datums)-1]
continue
}
if math.IsInf(value, 0) {
datums = datums[:len(datums)-1]
continue
}
if value > 0 && value < float64(8.515920e-109) {
datums = datums[:len(datums)-1]
continue
}
if value > float64(1.174271e+108) {
datums = datums[:len(datums)-1]
continue
}
datums[i] = &cloudwatch.MetricDatum{
MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")),
Value: aws.Float64(value),

View File

@ -1,6 +1,8 @@
package cloudwatch
import (
"fmt"
"math"
"sort"
"testing"
@ -51,22 +53,32 @@ func TestBuildDimensions(t *testing.T) {
func TestBuildMetricDatums(t *testing.T) {
assert := assert.New(t)
zero := 0.0
validMetrics := []telegraf.Metric{
testutil.TestMetric(1),
testutil.TestMetric(int32(1)),
testutil.TestMetric(int64(1)),
testutil.TestMetric(float64(1)),
testutil.TestMetric(float64(0)),
testutil.TestMetric(math.Copysign(zero, -1)), // the CW documentation does not call out -0 as rejected
testutil.TestMetric(float64(8.515920e-109)),
testutil.TestMetric(float64(1.174271e+108)), // largest should be 1.174271e+108
testutil.TestMetric(true),
}
invalidMetrics := []telegraf.Metric{
testutil.TestMetric("Foo"),
testutil.TestMetric(math.Log(-1.0)),
testutil.TestMetric(float64(8.515919e-109)), // smallest should be 8.515920e-109
testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108
}
for _, point := range validMetrics {
datums := BuildMetricDatum(point)
assert.Equal(1, len(datums), "Valid type should create a Datum")
assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point))
}
for _, point := range invalidMetrics {
datums := BuildMetricDatum(point)
assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point))
}
nonValidPoint := testutil.TestMetric("Foo")
assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum")
}
func TestPartitionDatums(t *testing.T) {
@ -78,10 +90,13 @@ func TestPartitionDatums(t *testing.T) {
Value: aws.Float64(1),
}
zeroDatum := []*cloudwatch.MetricDatum{}
oneDatum := []*cloudwatch.MetricDatum{&testDatum}
twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum}
threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum}
assert.Equal([][]*cloudwatch.MetricDatum{}, PartitionDatums(2, zeroDatum))
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum))
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum))

View File

@ -8,6 +8,7 @@ import (
"net/http"
"net/url"
"sort"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
@ -55,7 +56,11 @@ func (d *Datadog) Connect() error {
if d.Apikey == "" {
return fmt.Errorf("apikey is a required field for datadog output")
}
d.client = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
Timeout: d.Timeout.Duration,
}
return nil
@ -96,6 +101,7 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
}
}
redactedApiKey := "****************"
ts.Series = make([]*Metric, metricCounter)
copy(ts.Series, tempSeries[0:])
tsBytes, err := json.Marshal(ts)
@ -104,13 +110,13 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error {
}
req, err := http.NewRequest("POST", d.authenticatedUrl(), bytes.NewBuffer(tsBytes))
if err != nil {
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
return fmt.Errorf("unable to create http.Request, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1))
}
req.Header.Add("Content-Type", "application/json")
resp, err := d.client.Do(req)
if err != nil {
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
return fmt.Errorf("error POSTing metrics, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1))
}
defer resp.Body.Close()

View File

@ -7,9 +7,7 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
```toml
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
## The HTTP or UDP URL for your InfluxDB instance. Each item should be
## of the form:
## scheme "://" host [ ":" port]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
@ -44,6 +42,9 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
## HTTP Proxy Config
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
```
@ -70,4 +71,5 @@ to write to. Each URL should start with either `http://` or `udp://`
* `ssl_key`: SSL key
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
* `http_proxy`: HTTP Proxy URI
* `http_headers`: HTTP headers to add to each HTTP request
* `content_encoding`: Compress each HTTP request payload using gzip if set to: "gzip"

View File

@ -4,13 +4,7 @@ import "io"
type Client interface {
Query(command string) error
Write(b []byte) (int, error)
WriteWithParams(b []byte, params WriteParams) (int, error)
WriteStream(b io.Reader, contentLength int) (int, error)
WriteStreamWithParams(b io.Reader, contentLength int, params WriteParams) (int, error)
WriteStream(b io.Reader) error
Close() error
}

View File

@ -10,6 +10,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"path"
"time"
)
@ -53,6 +54,7 @@ func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) {
}
} else {
transport = http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: config.TLSConfig,
}
}
@ -68,6 +70,8 @@ func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) {
}, nil
}
type HTTPHeaders map[string]string
type HTTPConfig struct {
// URL should be of the form "http://host:port" (REQUIRED)
URL string
@ -95,6 +99,9 @@ type HTTPConfig struct {
// Proxy URL should be of the form "http://host:port"
HTTPProxy string
// HTTP headers to append to HTTP requests.
HTTPHeaders HTTPHeaders
// The content encoding mechanism to use for each request.
ContentEncoding string
}
@ -130,60 +137,13 @@ func (c *httpClient) Query(command string) error {
return c.doRequest(req, http.StatusOK)
}
func (c *httpClient) Write(b []byte) (int, error) {
req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), c.writeURL)
func (c *httpClient) WriteStream(r io.Reader) error {
req, err := c.makeWriteRequest(r, c.writeURL)
if err != nil {
return 0, nil
return err
}
err = c.doRequest(req, http.StatusNoContent)
if err == nil {
return len(b), nil
}
return 0, err
}
func (c *httpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) {
req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), writeURL(c.url, wp))
if err != nil {
return 0, nil
}
err = c.doRequest(req, http.StatusNoContent)
if err == nil {
return len(b), nil
}
return 0, err
}
func (c *httpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
req, err := c.makeWriteRequest(r, contentLength, c.writeURL)
if err != nil {
return 0, nil
}
err = c.doRequest(req, http.StatusNoContent)
if err == nil {
return contentLength, nil
}
return 0, err
}
func (c *httpClient) WriteStreamWithParams(
r io.Reader,
contentLength int,
wp WriteParams,
) (int, error) {
req, err := c.makeWriteRequest(r, contentLength, writeURL(c.url, wp))
if err != nil {
return 0, nil
}
err = c.doRequest(req, http.StatusNoContent)
if err == nil {
return contentLength, nil
}
return 0, err
return c.doRequest(req, http.StatusNoContent)
}
func (c *httpClient) doRequest(
@ -225,7 +185,6 @@ func (c *httpClient) doRequest(
func (c *httpClient) makeWriteRequest(
body io.Reader,
contentLength int,
writeURL string,
) (*http.Request, error) {
req, err := c.makeRequest(writeURL, body)
@ -234,8 +193,6 @@ func (c *httpClient) makeWriteRequest(
}
if c.config.ContentEncoding == "gzip" {
req.Header.Set("Content-Encoding", "gzip")
} else {
req.Header.Set("Content-Length", fmt.Sprint(contentLength))
}
return req, nil
}
@ -253,6 +210,11 @@ func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, err
if err != nil {
return nil, err
}
for header, value := range c.config.HTTPHeaders {
req.Header.Set(header, value)
}
req.Header.Set("Content-Type", "text/plain")
req.Header.Set("User-Agent", c.config.UserAgent)
if c.config.Username != "" && c.config.Password != "" {
@ -294,8 +256,11 @@ func writeURL(u *url.URL, wp WriteParams) string {
}
u.RawQuery = params.Encode()
u.Path = "write"
return u.String()
p := u.Path
u.Path = path.Join(p, "write")
s := u.String()
u.Path = p
return s
}
func queryURL(u *url.URL, command string) string {
@ -303,6 +268,9 @@ func queryURL(u *url.URL, command string) string {
params.Set("q", command)
u.RawQuery = params.Encode()
u.Path = "query"
return u.String()
p := u.Path
u.Path = path.Join(p, "query")
s := u.String()
u.Path = p
return s
}

View File

@ -55,6 +55,13 @@ func TestHTTPClient_Write(t *testing.T) {
fmt.Fprintln(w, `{"results":[{}],"error":"basic auth incorrect"}`)
}
// test that user-specified http header is set properly
if r.Header.Get("X-Test-Header") != "Test-Value" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong http header value"}`)
}
// Validate Content-Length Header
if r.ContentLength != 13 {
w.WriteHeader(http.StatusTeapot)
@ -90,6 +97,9 @@ func TestHTTPClient_Write(t *testing.T) {
UserAgent: "test-agent",
Username: "test-user",
Password: "test-password",
HTTPHeaders: HTTPHeaders{
"X-Test-Header": "Test-Value",
},
}
wp := WriteParams{
Database: "test",
@ -100,66 +110,8 @@ func TestHTTPClient_Write(t *testing.T) {
client, err := NewHTTP(config, wp)
defer client.Close()
assert.NoError(t, err)
n, err := client.Write([]byte("cpu value=99\n"))
assert.Equal(t, 13, n)
assert.NoError(t, err)
_, err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")), 13)
assert.NoError(t, err)
}
func TestHTTPClient_WriteParamsOverride(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
// test that database is set properly
if r.FormValue("db") != "override" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong db name"}`)
}
// Validate the request body:
buf := make([]byte, 100)
n, _ := r.Body.Read(buf)
expected := "cpu value=99"
got := string(buf[0 : n-1])
if expected != got {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
msg := fmt.Sprintf(`{"results":[{}],"error":"expected [%s], got [%s]"}`, expected, got)
fmt.Fprintln(w, msg)
}
w.WriteHeader(http.StatusNoContent)
w.Header().Set("Content-Type", "application/json")
case "/query":
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL,
}
defaultWP := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, defaultWP)
defer client.Close()
assert.NoError(t, err)
// test that WriteWithParams overrides the default write params
wp := WriteParams{
Database: "override",
}
n, err := client.WriteWithParams([]byte("cpu value=99\n"), wp)
assert.Equal(t, 13, n)
assert.NoError(t, err)
_, err = client.WriteStreamWithParams(bytes.NewReader([]byte("cpu value=99\n")), 13, wp)
err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")))
assert.NoError(t, err)
}
@ -187,23 +139,7 @@ func TestHTTPClient_Write_Errors(t *testing.T) {
assert.NoError(t, err)
lp := []byte("cpu value=99\n")
n, err := client.Write(lp)
assert.Equal(t, 0, n)
assert.Error(t, err)
n, err = client.WriteStream(bytes.NewReader(lp), 13)
assert.Equal(t, 0, n)
assert.Error(t, err)
wp := WriteParams{
Database: "override",
}
n, err = client.WriteWithParams(lp, wp)
assert.Equal(t, 0, n)
assert.Error(t, err)
n, err = client.WriteStreamWithParams(bytes.NewReader(lp), 13, wp)
assert.Equal(t, 0, n)
err = client.WriteStream(bytes.NewReader(lp))
assert.Error(t, err)
}
@ -363,3 +299,37 @@ func TestGzipCompression(t *testing.T) {
assert.Equal(t, []byte(influxLine), uncompressed.Bytes())
}
func TestHTTPClient_PathPrefix(t *testing.T) {
prefix := "/some/random/prefix"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case prefix + "/write":
w.WriteHeader(http.StatusNoContent)
w.Header().Set("Content-Type", "application/json")
case prefix + "/query":
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}]}`)
default:
w.WriteHeader(http.StatusNotFound)
msg := fmt.Sprintf("Path not found: %s", r.URL.Path)
fmt.Fprintln(w, msg)
}
}))
defer ts.Close()
config := HTTPConfig{
URL: ts.URL + prefix,
}
wp := WriteParams{
Database: "test",
}
client, err := NewHTTP(config, wp)
defer client.Close()
assert.NoError(t, err)
err = client.Query("CREATE DATABASE test")
assert.NoError(t, err)
err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")))
assert.NoError(t, err)
}

View File

@ -1,7 +1,6 @@
package client
import (
"bytes"
"fmt"
"io"
"log"
@ -62,18 +61,8 @@ func (c *udpClient) Query(command string) error {
return nil
}
// Write will send the byte stream to the given UDP client endpoint
func (c *udpClient) Write(b []byte) (int, error) {
return c.WriteStream(bytes.NewReader(b), -1)
}
// WriteWithParams are ignored by the UDP client, will forward to WriteStream
func (c *udpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) {
return c.WriteStream(bytes.NewReader(b), -1)
}
// WriteStream will send the provided data through to the client, contentLength is ignored by the UDP client
func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
func (c *udpClient) WriteStream(r io.Reader) error {
var totaln int
for {
nR, err := r.Read(c.buffer)
@ -81,14 +70,14 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
break
}
if err != io.EOF && err != nil {
return totaln, err
return err
}
if c.buffer[nR-1] == uint8('\n') {
nW, err := c.conn.Write(c.buffer[0:nR])
totaln += nW
if err != nil {
return totaln, err
return err
}
} else {
log.Printf("E! Could not fit point into UDP payload; dropping")
@ -99,7 +88,7 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
break
}
if err != io.EOF && err != nil {
return totaln, err
return err
}
if c.buffer[nR-1] == uint8('\n') {
break
@ -107,13 +96,7 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) {
}
}
}
return totaln, nil
}
// WriteStreamWithParams will forward the stream to the client backend, contentLength is ignored by the UDP client
// write params are ignored by the UDP client
func (c *udpClient) WriteStreamWithParams(r io.Reader, contentLength int, wp WriteParams) (int, error) {
return c.WriteStream(r, -1)
return nil
}
// Close will terminate the provided client connection

View File

@ -9,7 +9,6 @@ import (
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestUDPClient(t *testing.T) {
@ -65,43 +64,6 @@ func TestUDPClient_Write(t *testing.T) {
}
}()
// test sending simple metric
n, err := client.Write([]byte("cpu value=99\n"))
assert.Equal(t, n, 13)
assert.NoError(t, err)
pkt := <-packets
assert.Equal(t, "cpu value=99\n", pkt)
wp := WriteParams{}
//
// Using WriteStream() & a metric.Reader:
config3 := UDPConfig{
URL: "udp://localhost:8199",
PayloadSize: 40,
}
client3, err := NewUDP(config3)
assert.NoError(t, err)
now := time.Unix(1484142942, 0)
m1, _ := metric.New("test", map[string]string{},
map[string]interface{}{"value": 1.1}, now)
m2, _ := metric.New("test", map[string]string{},
map[string]interface{}{"value": 1.1}, now)
m3, _ := metric.New("test", map[string]string{},
map[string]interface{}{"value": 1.1}, now)
ms := []telegraf.Metric{m1, m2, m3}
mReader := metric.NewReader(ms)
n, err = client3.WriteStreamWithParams(mReader, 10, wp)
// 3 metrics at 35 bytes each (including the newline)
assert.Equal(t, 105, n)
assert.NoError(t, err)
pkt = <-packets
assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt)
pkt = <-packets
assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt)
pkt = <-packets
assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt)
assert.NoError(t, client.Close())
config = UDPConfig{
@ -112,17 +74,15 @@ func TestUDPClient_Write(t *testing.T) {
assert.NoError(t, err)
ts := time.Unix(1484142943, 0)
m1, _ = metric.New("test", map[string]string{},
m1, _ := metric.New("test", map[string]string{},
map[string]interface{}{"this_is_a_very_long_field_name": 1.1}, ts)
m2, _ = metric.New("test", map[string]string{},
m2, _ := metric.New("test", map[string]string{},
map[string]interface{}{"value": 1.1}, ts)
ms = []telegraf.Metric{m1, m2}
ms := []telegraf.Metric{m1, m2}
reader := metric.NewReader(ms)
n, err = client4.WriteStream(reader, 0)
err = client4.WriteStream(reader)
assert.NoError(t, err)
require.Equal(t, 35, n)
assert.NoError(t, err)
pkt = <-packets
pkt := <-packets
assert.Equal(t, "test value=1.1 1484142943000000000\n", pkt)
assert.NoError(t, client4.Close())

View File

@ -32,9 +32,10 @@ type InfluxDB struct {
RetentionPolicy string
WriteConsistency string
Timeout internal.Duration
UDPPayload int `toml:"udp_payload"`
HTTPProxy string `toml:"http_proxy"`
ContentEncoding string `toml:"content_encoding"`
UDPPayload int `toml:"udp_payload"`
HTTPProxy string `toml:"http_proxy"`
HTTPHeaders map[string]string `toml:"http_headers"`
ContentEncoding string `toml:"content_encoding"`
// Path to CA file
SSLCA string `toml:"ssl_ca"`
@ -52,9 +53,7 @@ type InfluxDB struct {
}
var sampleConfig = `
## The HTTP or UDP URL for your InfluxDB instance. Each item should be
## of the form:
## scheme "://" host [ ":" port]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
@ -88,7 +87,10 @@ var sampleConfig = `
## HTTP Proxy Config
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
`
@ -132,8 +134,12 @@ func (i *InfluxDB) Connect() error {
Username: i.Username,
Password: i.Password,
HTTPProxy: i.HTTPProxy,
HTTPHeaders: client.HTTPHeaders{},
ContentEncoding: i.ContentEncoding,
}
for header, value := range i.HTTPHeaders {
config.HTTPHeaders[header] = value
}
wp := client.WriteParams{
Database: i.Database,
RetentionPolicy: i.RetentionPolicy,
@ -177,12 +183,6 @@ func (i *InfluxDB) Description() string {
// Write will choose a random server in the cluster to write to until a successful write
// occurs, logging each unsuccessful. If all servers fail, return error.
func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
bufsize := 0
for _, m := range metrics {
bufsize += m.Len()
}
r := metric.NewReader(metrics)
// This will get set to nil if a successful write occurs
@ -190,7 +190,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
p := rand.Perm(len(i.clients))
for _, n := range p {
if _, e := i.clients[n].WriteStream(r, bufsize); e != nil {
if e := i.clients[n].WriteStream(r); e != nil {
// If the database was not found, try to recreate it:
if strings.Contains(e.Error(), "database not found") {
errc := i.clients[n].Query(fmt.Sprintf(`CREATE DATABASE "%s"`, qiReplacer.Replace(i.Database)))
@ -199,6 +199,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
i.Database)
}
}
if strings.Contains(e.Error(), "field type conflict") {
log.Printf("E! Field type conflict, dropping conflicted points: %s", e)
// setting err to nil, otherwise we will keep retrying and points
@ -206,6 +207,31 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
err = nil
break
}
if strings.Contains(e.Error(), "points beyond retention policy") {
log.Printf("W! Points beyond retention policy: %s", e)
// This error is indicates the point is older than the
// retention policy permits, and is probably not a cause for
// concern. Retrying will not help unless the retention
// policy is modified.
err = nil
break
}
if strings.Contains(e.Error(), "unable to parse") {
log.Printf("E! Parse error; dropping points: %s", e)
// This error indicates a bug in Telegraf or InfluxDB parsing
// of line protocol. Retries will not be successful.
err = nil
break
}
if strings.Contains(e.Error(), "hinted handoff queue not empty") {
// This is an informational message
err = nil
break
}
// Log write failure
log.Printf("E! InfluxDB Output Error: %s", e)
} else {

View File

@ -178,28 +178,107 @@ func TestHTTPError_DatabaseNotFound(t *testing.T) {
require.NoError(t, i.Close())
}
// field type conflict does not return an error, instead we
func TestHTTPError_FieldTypeConflict(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/write":
w.WriteHeader(http.StatusNotFound)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"field type conflict: input field \"value\" on measurement \"test\" is type integer, already exists as type float dropped=1"}`)
}
}))
defer ts.Close()
i := InfluxDB{
URLs: []string{ts.URL},
Database: "test",
func TestHTTPError_WriteErrors(t *testing.T) {
var testCases = []struct {
name string
status int
contentType string
body string
err error
}{
{
// HTTP/1.1 400 Bad Request
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3
//
// {
// "error": "partial write: points beyond retention policy dropped=1"
// }
name: "beyond retention policy is not an error",
status: http.StatusBadRequest,
contentType: "application/json",
body: `{"error":"partial write: points beyond retention policy dropped=1"}`,
err: nil,
},
{
// HTTP/1.1 400 Bad Request
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3
//
// {
// "error": "unable to parse 'foo bar=': missing field value"
// }
name: "unable to parse is not an error",
status: http.StatusBadRequest,
contentType: "application/json",
body: `{"error":"unable to parse 'foo bar=': missing field value"}`,
err: nil,
},
{
// HTTP/1.1 400 Bad Request
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3
//
// {
// "error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"
// }
name: "field type conflict is not an error",
status: http.StatusBadRequest,
contentType: "application/json",
body: `{"error": "partial write: field type conflict: input field \"bar\" on measurement \"foo\" is type float, already exists as type integer dropped=1"}`,
err: nil,
},
{
// HTTP/1.1 500 Internal Server Error
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3-c1.3.3
//
// {
// "error": "write failed: hinted handoff queue not empty"
// }
name: "hinted handoff queue not empty is not an error",
status: http.StatusInternalServerError,
contentType: "application/json",
body: `{"error":"write failed: hinted handoff queue not empty"}`,
err: nil,
},
{
// HTTP/1.1 500 Internal Server Error
// Content-Type: application/json
// X-Influxdb-Version: 1.3.3-c1.3.3
//
// {
// "error": "partial write"
// }
name: "plain partial write is an error",
status: http.StatusInternalServerError,
contentType: "application/json",
body: `{"error":"partial write"}`,
err: fmt.Errorf("Could not write to any InfluxDB server in cluster"),
},
}
err := i.Connect()
require.NoError(t, err)
err = i.Write(testutil.MockMetrics())
require.NoError(t, err)
require.NoError(t, i.Close())
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(tt.status)
rw.Header().Set("Content-Type", tt.contentType)
fmt.Fprintln(rw, tt.body)
}))
defer ts.Close()
influx := InfluxDB{
URLs: []string{ts.URL},
Database: "test",
}
err := influx.Connect()
require.NoError(t, err)
err = influx.Write(testutil.MockMetrics())
require.Equal(t, tt.err, err)
require.NoError(t, influx.Close())
})
}
}
type MockClient struct {

View File

@ -80,6 +80,9 @@ func (l *Librato) Connect() error {
"api_user and api_token are required fields for librato output")
}
l.client = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
Timeout: l.Timeout.Duration,
}
return nil

View File

@ -5,6 +5,7 @@ import (
"log"
"net"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
@ -13,6 +14,16 @@ import (
"github.com/influxdata/telegraf/plugins/outputs"
)
var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
hypenChars = strings.NewReplacer(
"@", "-",
"*", "-",
`%`, "-",
"#", "-",
"$", "-")
)
type OpenTSDB struct {
Prefix string
@ -24,9 +35,6 @@ type OpenTSDB struct {
Debug bool
}
var sanitizedChars = strings.NewReplacer("@", "-", "*", "-", " ", "_",
`%`, "-", "#", "-", "$", "-", ":", "_")
var sampleConfig = `
## prefix for metrics keys
prefix = "my.specific.prefix."
@ -125,8 +133,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
}
metric := &HttpMetric{
Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s",
o.Prefix, m.Name(), fieldName)),
Metric: sanitize(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
Tags: tags,
Timestamp: now,
Value: value,
@ -176,7 +183,7 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
}
messageLine := fmt.Sprintf("put %s %v %s %s\n",
sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
sanitize(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
now, metricValue, tags)
_, err := connection.Write([]byte(messageLine))
@ -192,7 +199,7 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
func cleanTags(tags map[string]string) map[string]string {
tagSet := make(map[string]string, len(tags))
for k, v := range tags {
tagSet[sanitizedChars.Replace(k)] = sanitizedChars.Replace(v)
tagSet[sanitize(k)] = sanitize(v)
}
return tagSet
}
@ -236,6 +243,13 @@ func (o *OpenTSDB) Close() error {
return nil
}
func sanitize(value string) string {
// Apply special hypenation rules to preserve backwards compatibility
value = hypenChars.Replace(value)
// Replace any remaining illegal chars
return allowedChars.ReplaceAllLiteralString(value, "_")
}
func init() {
outputs.Add("opentsdb", func() telegraf.Output {
return &OpenTSDB{}

Some files were not shown because too many files have changed in this diff Show More