Compare commits

...

340 Commits

Author SHA1 Message Date
Daniel Nelson
a1668bbf9a Set release date for 1.5.0 2017-12-14 10:59:13 -08:00
Daniel Nelson
fe91c779e9 Remove AWS credential check from cloudwatch output (#3583)
This method is reported to not work with IAM Instance Profiles, and we
do not want to make any calls that would require additional permissions.

(cherry picked from commit 5b40173bcb)
2017-12-13 17:52:45 -08:00
Daniel Nelson
425b6f7d63 Update changelog
(cherry picked from commit 15266bb7eb)
2017-12-13 11:18:34 -08:00
Ildar Svetlov
c322ddb4b0 Don't add system input uptime_format as a counter (#3578)
(cherry picked from commit d935dfa9ed)
2017-12-13 11:17:55 -08:00
Daniel Nelson
648d3bde33 Update changelog
(cherry picked from commit ff634c5056)
2017-12-13 10:58:16 -08:00
Daniel Nelson
d8da77cb42 Add idle state to processes test
(cherry picked from commit 14b31a2354)
2017-12-13 10:57:28 -08:00
Ted Zlatanov
fdb04702eb Support I (idle) process state on procfs+Linux (#3530)
(cherry picked from commit 663a5b1f50)
2017-12-13 10:56:47 -08:00
Steve Banik
ecf43f4986 Fixed typo in README.md (#3574)
(cherry picked from commit d7d224d511)
2017-12-12 11:22:33 -08:00
Daniel Nelson
e307e92e86 Update changelog
(cherry picked from commit abcad439eb)
2017-12-11 18:02:35 -08:00
Daniel Nelson
8d4a09c3ea Fix separation of multiple prometheus_client outputs (#3570)
(cherry picked from commit 8484de6c12)
2017-12-11 18:02:30 -08:00
Daniel Nelson
fd964bd4eb Use auto type detection for scanned devices in smart input (#3561)
(cherry picked from commit 93d16a4603)
2017-12-08 18:03:39 -08:00
Daniel Nelson
994e75f1f0 Update changelog
(cherry picked from commit 88746b01c3)
2017-12-08 18:02:17 -08:00
Daniel Nelson
2e2efafbfc Update sarama-cluster to latest release (#3560)
(cherry picked from commit 37095ef47d)
2017-12-08 18:02:17 -08:00
Daniel Nelson
39537ed86e Use device name instead of abs path for devices tag in smart input (#3550)
(cherry picked from commit 574034c301)
2017-12-08 13:26:15 -08:00
Daniel Nelson
558ce25c94 Log connect error only in wavefront output (#3549)
(cherry picked from commit 177e7e2c73)
2017-12-06 14:56:28 -08:00
Daniel Nelson
0438f412a9 Fix formatting in changelog 2017-12-04 13:17:23 -08:00
Daniel Nelson
ca8911fec0 Update example config 2017-12-01 11:49:07 -08:00
Daniel Nelson
2c5a5373f6 Update changelog 2017-12-01 11:42:00 -08:00
Daniel Nelson
cabe10b88a Update changelog 2017-12-01 11:23:18 -08:00
Daniel Nelson
7f66863b87 Fix HOST_MOUNT_PREFIX in docker with disk input (#3529) 2017-12-01 11:21:39 -08:00
Daniel Nelson
e400ec2b57 Update changelog 2017-11-30 18:42:14 -08:00
Daniel Nelson
44320a5421 Add option to amqp output to publish persistent messages (#3528) 2017-11-30 18:40:12 -08:00
Daniel Nelson
a9951710b3 Add time import 2017-11-29 17:05:13 -08:00
Daniel Nelson
6426bca1f8 Update changelog 2017-11-29 16:36:00 -08:00
Nathan Ferch
f92a4f528f Add input plugin for OpenBSD/FreeBSD pf (#3405) 2017-11-29 16:32:50 -08:00
Daniel Nelson
3ba5458220 Update changelog 2017-11-29 12:17:46 -08:00
Bob Shannon
beb9d7560d Add support for glob patterns in net input plugin (#3140) 2017-11-29 12:16:34 -08:00
Daniel Nelson
24d82aebe6 Update changelog 2017-11-29 12:10:56 -08:00
Daniel Nelson
7dc256e845 Update gopsutil version to include netstat fix (#3513) 2017-11-29 12:06:47 -08:00
Daniel Nelson
297897ae0a Add dcos plugin to changelog and readme 2017-11-29 11:54:33 -08:00
Daniel Nelson
414a7e34fb Add input plugin for DC/OS (#3519) 2017-11-29 11:50:32 -08:00
Patrick Hemmer
bf65e19486 Fix postfix plugin age to use ctime, not mtime (#3525) 2017-11-29 11:25:31 -08:00
Daniel Nelson
2c70958c24 Update changelog 2017-11-29 10:52:59 -08:00
Daniel Nelson
d727a6f85c Add slab to mem plugin (#3518) 2017-11-29 10:49:45 -08:00
Daniel Nelson
4e9b19f7a6 Add bond input to readme and update changelog 2017-11-28 15:19:30 -08:00
Ildar Svetlov
132fb50150 Add bond input plugin (#3424) 2017-11-28 15:16:19 -08:00
Daniel Nelson
d1ba75176d Update changelog 2017-11-28 10:10:36 -08:00
Patrick Hemmer
76240b9f18 Add postfix input plugin (#2553) 2017-11-28 10:08:41 -08:00
Daniel Nelson
06e22ee7ac Update changelog 2017-11-27 17:06:50 -08:00
Lukasz Jagiello
a18eedb970 Use deb-systemd-invoke to restart service (#3506)
From man page:
```
deb-systemd-invoke is a Debian-specific helper script which asks
       /usr/sbin/policy-rc.d before performing a systemctl call.

deb-systemd-invoke is intended to be used from maintscripts to start
       systemd unit files. It is specifically NOT intended to be used
       interactively by users. Instead, users should run systemd and use
       systemctl, or not bother about the systemd enabled state in case they
       are not running systemd.
```

This PR replace regular `systemctl` with `deb-systemd-invoke`.
2017-11-27 17:05:32 -08:00
Lukasz Jagiello
6514399baf Add shadow-utils dependency to rpm package (#3505) 2017-11-27 17:02:16 -08:00
Dylan Meissner
27994abcb5 Jolokia2 handles unordered mbean object name properties (#3504) 2017-11-27 13:43:19 -08:00
Daniel Nelson
a9ada5f65b Update changelog 2017-11-27 12:32:36 -08:00
Laurent Gosselin
f758d0c6c3 Fix global variable collection when using interval_slow option in mysql input (#3500) 2017-11-27 12:29:51 -08:00
Daniel Nelson
7442b5645f Update changelog 2017-11-20 16:50:18 -08:00
Daniel Nelson
d5bd426e0c Fix snmp tools output parsing when they contain Windows eols (#3396) 2017-11-20 16:48:30 -08:00
Daniel Nelson
154b263f14 Update changelog 2017-11-20 16:27:18 -08:00
Leandro Piccilli
92ca661662 Add support for tags in the index name in elasticsearch output (#3470) 2017-11-20 16:25:36 -08:00
Daniel Nelson
54b0b9e727 Update changelog 2017-11-20 14:40:45 -08:00
aromeyer
dc2c8791d0 Add opensmtpd input plugin (#3449) 2017-11-20 14:39:13 -08:00
Daniel Nelson
367bbdeb7e Update changelog 2017-11-20 14:37:09 -08:00
aromeyer
e544d742f9 Add unbound input plugin (#3434) 2017-11-20 14:32:06 -08:00
Daniel Nelson
393c4c6c2d Update changelog 2017-11-20 14:23:16 -08:00
Leandro Piccilli
4d1bc620b2 Add index by week number to Elasticsearch output (#3490) 2017-11-20 14:22:29 -08:00
Daniel Nelson
db8e767f1f Update changelog 2017-11-20 14:20:05 -08:00
Chris Goller
afe05fcfef Use hexadecimal ids and lowercase names in zipkin input (#3488) 2017-11-20 14:19:32 -08:00
Daniel Nelson
9422cca2cc Update changelog 2017-11-16 16:51:02 -08:00
erayaslan
a06ee58785 Use MAX() instead of SUM() for latency measurements in sqlserver (#3471) 2017-11-16 16:49:51 -08:00
Daniel Nelson
b13eea89b1 Update changelog and add particle webhook to readme 2017-11-16 16:11:20 -08:00
David G. Simmons
b813e2ecae Add Particle Webhook Plugin (#3477) 2017-11-16 16:03:19 -08:00
Pierre Fersing
8364417009 Whitelist allowed char classes for graphite output (#3473) 2017-11-15 14:44:20 -08:00
Daniel Nelson
136c15ba33 Skip test requiring cratedb server in short test mode 2017-11-13 15:22:57 -08:00
Daniel Nelson
19839c0167 Update changelog 2017-11-13 15:09:05 -08:00
Daniel Nelson
72682973bd Fix typo in error message 2017-11-13 15:07:54 -08:00
faye-sama
a411306fba Fail metrics parsing on unescaped quotes (#3409)
Before this change Fields() method on a metric parsed from a line with
unescaped quotes could panic. This change makes such line unparseable.

Fixes #3326
2017-11-13 15:06:47 -08:00
Patrick Hemmer
cbd346117a Add tests for procstat systemd & cgroup matching (#3469) 2017-11-13 14:45:31 -08:00
Daniel Nelson
181a56018f Update changelog 2017-11-13 11:02:01 -08:00
Patrick Hemmer
6ee6d55751 Add systemd unit pid and cgroup matching to procstat (#3459) 2017-11-13 10:59:27 -08:00
Daniel Nelson
ebd73b7279 Update changelog 2017-11-10 14:39:11 -08:00
Trevor Pounds
6a57395731 Compile with Go 1.9.2 (#3458) 2017-11-10 14:39:00 -08:00
Daniel Nelson
be13f69305 Update changelog 2017-11-09 14:05:36 -08:00
Felix Geisendörfer
62ec3e50d9 Add CrateDB output plugin (#3210) 2017-11-09 14:03:16 -08:00
Daniel Nelson
07297e80a8 Set 1.4.4 release date 2017-11-08 15:21:20 -08:00
Daniel Nelson
f0578b8c83 Update changelog 2017-11-07 16:48:44 -08:00
Lukasz Jagiello
493af043d3 Add Solr input plugin (#2019) 2017-11-07 16:44:09 -08:00
Daniel Nelson
47d013132a Update changelog 2017-11-07 14:37:04 -08:00
Pierre Tessier
dcff769fed Add modification_time field to filestat input plugin (#3305) 2017-11-07 14:32:48 -08:00
Daniel Nelson
5141f8a2a0 Update contributing documentation 2017-11-07 13:59:06 -08:00
Daniel Nelson
bb14589469 Update changelog 2017-11-07 13:59:06 -08:00
Daniel Nelson
b81bea658f Always ignore autofs filesystems in disk input (#3440) 2017-11-07 11:45:09 -08:00
Daniel Nelson
2c2dc97702 Update changelog 2017-11-07 11:43:15 -08:00
Daniel Nelson
cbbdf1043b Use current time if container read time is zero value (#3437) 2017-11-07 11:41:53 -08:00
Daniel Nelson
c55f285de0 Update changelog 2017-11-07 11:36:29 -08:00
Daniel Nelson
e1295c41c8 Update gopsutil to v2.17.10 (#3441) 2017-11-07 11:26:11 -08:00
Daniel Nelson
e0df62c27b Update changelog 2017-11-06 17:42:42 -08:00
Bob Shannon
fdf12ce6b4 Redact datadog API key in log output (#3420) 2017-11-06 17:41:14 -08:00
Daniel Nelson
e5a265c8c7 Revert particle webhook changes on master 2017-11-06 10:47:10 -08:00
David G. Simmons
112955a9f5 Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-04 09:30:17 -04:00
David G. Simmons
da0ca8a870 Revert "Undo Revert "Revert changes since 9b0af4478""
This reverts commit 6e6aefe5da.
2017-11-04 09:19:37 -04:00
David G. Simmons
6e6aefe5da Undo Revert "Revert changes since 9b0af4478"
This reverts commit 2c31345c70.
2017-11-04 09:14:52 -04:00
David G. Simmons
ae2635b547 Readme update 2017-11-04 08:43:13 -04:00
Daniel Nelson
c14478f025 Update http_listener certs 2017-11-03 21:52:45 -07:00
Daniel Nelson
2c31345c70 Revert changes since 9b0af4478 2017-11-03 21:10:56 -07:00
David G. Simmons
4a9fa7ef4b Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-03 13:48:45 -04:00
David G. Simmons
7db06d2aa4 Revert "New Particle Plugin"
This reverts commit ba462f5c94.
2017-11-03 13:28:54 -04:00
David G. Simmons
871fae6eb3 Revert "bug fixes and refactoring"
This reverts commit 86961cc814.
2017-11-03 13:28:35 -04:00
David G. Simmons
8e587e74f5 Revert "Update README.md"
This reverts commit 8ed00af10a.
2017-11-03 13:28:00 -04:00
David G. Simmons
440918a03b Revert "Updated README.md"
This reverts commit a6ada03b91.
2017-11-03 13:27:06 -04:00
David G. Simmons
f64b23b724 Revert "Small fixes"
This reverts commit a987118b01.
2017-11-03 13:27:06 -04:00
David G. Simmons
c11739d143 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:27:06 -04:00
David G. Simmons
883696c224 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:16:09 -04:00
David G. Simmons
0ea0519e89 Merge branch 'master' into dn-particle-plugin 2017-11-03 12:13:49 -04:00
David G. Simmons
4596ae70a9 ignore mac-files 2017-11-03 12:07:03 -04:00
David G. Simmons
92caf33fff Updated Test JSON 2017-11-03 12:07:03 -04:00
David G. Simmons
a987118b01 Small fixes
Hoping to pass CircleCI this time
2017-11-03 12:07:03 -04:00
David G. Simmons
a6ada03b91 Updated README.md 2017-11-03 12:07:03 -04:00
David G. Simmons
8ed00af10a Update README.md 2017-11-03 12:07:03 -04:00
David Norton
86961cc814 bug fixes and refactoring 2017-11-03 12:07:03 -04:00
David G. Simmons
ba462f5c94 New Particle Plugin 2017-11-03 12:07:03 -04:00
David G. Simmons
1d1d5e6089 Updated Test JSON 2017-11-02 17:21:50 -04:00
David G. Simmons
8560c2f88d Fixed Readme 2017-11-02 17:19:37 -04:00
David G. Simmons
5d135cece3 test son update 2017-11-02 14:19:01 -04:00
Daniel Nelson
9b0af4478b Remove incorrect comment about linker options 2017-11-01 18:17:51 -07:00
Daniel Nelson
26ccc1f205 Add teamspeak to readme and update changelog 2017-11-01 13:30:43 -07:00
Patric Kanngießer
76ed70340b Add Teamspeak 3 input plugin (#3315) 2017-11-01 13:27:59 -07:00
Maximilien Richer
5f215c22fe Fix typos in comments (#3415) 2017-10-31 17:00:06 -07:00
Maximilien Richer
63842d48fd Add config to input-varnish README (#3414) 2017-10-31 16:58:45 -07:00
Daniel Nelson
777b84d1dc Clarify what it means to filter metrics from processors 2017-10-30 16:32:39 -07:00
Daniel Nelson
c116af35c7 Update changelog 2017-10-30 15:35:34 -07:00
Daniel Nelson
fcfcc803b1 Use explicit schemas in mqtt_consumer input (#3401) 2017-10-30 15:33:20 -07:00
Daniel Nelson
4d5de8698b Update changelog 2017-10-30 13:53:45 -07:00
Aditya C S
23ad959d71 Add support for SSL settings to ElasticSearch output plugin (#3406) 2017-10-30 13:52:40 -07:00
Aditya C S
d9fa916711 Update docker plugin README (#3404) 2017-10-30 12:26:39 -07:00
Daniel Nelson
53b13a20d0 Update changelog 2017-10-27 11:55:17 -07:00
Maximilien Richer
ffa8a4a716 Add instance name option to varnish plugin (#3398)
This change add a new configuration option to allow probing of
namespaced varnish instances, usually reached using the '-n' switch on
the varnish cli.
2017-10-27 11:53:59 -07:00
Daniel Nelson
8b4708c82a Update changelog 2017-10-26 13:37:54 -07:00
Vladimir S
88ec171293 Perform DNS lookup before ping (#3385) 2017-10-26 13:35:37 -07:00
Daniel Nelson
5885ef2c1c Update changelog 2017-10-25 15:29:56 -07:00
Daniel Nelson
a519abf13f Gather concurrently from snmp agents (#3365) 2017-10-25 15:28:55 -07:00
Daniel Nelson
6ea61b55d9 Set release date for 1.4.3 2017-10-25 14:15:10 -07:00
Daniel Nelson
206397d475 Update changelog 2017-10-24 16:31:22 -07:00
Jeremy Doupe
a6797a44d5 Add history and summary types to telegraf and prometheus plugins (#3337) 2017-10-24 16:28:52 -07:00
Daniel Nelson
13c1f1524a Update changelog 2017-10-24 16:25:49 -07:00
Daniel Nelson
9a062498e7 Use golang.org/x/sys/unix instead of syscall in diskio (#3384) 2017-10-24 16:22:31 -07:00
Daniel Nelson
f64cf89db1 Update changelog 2017-10-24 15:46:47 -07:00
Daniel Nelson
6d1777276c If the connector name cannot be unquoted, use the raw value (#3371) 2017-10-24 15:36:23 -07:00
Daniel Nelson
65580759fc Update changelog 2017-10-23 12:36:31 -07:00
Sergei Smolianinov
d2f9fc7d8c Fix ACL token usage in consul input plugin (#3376) 2017-10-23 12:31:27 -07:00
Daniel Nelson
77cc071796 Update changelog 2017-10-19 17:06:14 -07:00
Daniel Nelson
4deb6238a3 Add support for decimal timestamps to ts-epoch modifier (#3358) 2017-10-19 16:36:32 -07:00
Daniel Nelson
7088d98304 Update changelog 2017-10-19 16:27:29 -07:00
Daniel Nelson
4243403432 Remove warning when JSON contains null value (#3359) 2017-10-19 16:25:58 -07:00
Mamat Rahmat
3bbc2beeed Fix small typo in documentation (#3364) 2017-10-19 14:47:40 -07:00
Daniel Nelson
0e6a70b199 Update changelog 2017-10-18 17:43:01 -07:00
Daniel Nelson
ec4efe5b03 Use labels in prometheus output for string fields (#3350) 2017-10-18 17:42:30 -07:00
Daniel Nelson
adb1f5588c Update changelog 2017-10-18 14:53:34 -07:00
Daniel Nelson
6e5915c59f Fix prometheus passthrough for existing value types (#3351) 2017-10-18 14:51:08 -07:00
Daniel Nelson
9b59cdd10e Update changelog 2017-10-18 13:57:58 -07:00
clheikes
02baa696c3 Fix TELEGRAF_OPTS expansion in systemd service unit (#3354) 2017-10-18 13:57:32 -07:00
Daniel Nelson
a4fa19252f Update changelog 2017-10-18 12:47:58 -07:00
Daniel Nelson
7ba376964c Update changelog 2017-10-18 12:25:46 -07:00
Ayrdrie
a75ab3e190 Fix mongodb input panic when restarting mongodb (#3355) 2017-10-18 12:24:30 -07:00
Daniel Nelson
2208657d73 Add release date info to FAQ 2017-10-17 10:43:53 -07:00
Daniel Nelson
9d8e935734 Update changelog 2017-10-16 14:26:12 -07:00
Pierre Fersing
f5a9d1bc75 Fix CPU system plugin gets stuck after system suspend (#3342) 2017-10-16 14:25:00 -07:00
Daniel Nelson
4b05edea53 Update changelog 2017-10-16 14:19:16 -07:00
Craig Wickesser
246ffab3e0 Add UDP IPv6 support to statsd input (#3344) 2017-10-16 14:18:36 -07:00
Daniel Nelson
3ea41e885c Update changelog 2017-10-16 11:27:00 -07:00
Daniel Nelson
1f348037b7 Fix case sensitivity issue in sqlserver query (#3336) 2017-10-16 11:26:16 -07:00
Daniel Nelson
86f19dee2b Fix typo in ipmi_sensor readme 2017-10-16 11:10:06 -07:00
Daniel Nelson
a1796989f7 Add ipmi_sensor permission documentation 2017-10-13 13:53:18 -07:00
Daniel Nelson
6b67fedfdc Remove timing sensitive riemann test 2017-10-13 11:30:30 -07:00
Daniel Nelson
5cd3327d5f Update changelog 2017-10-13 11:12:27 -07:00
Adam Johnson
bf9f94eb9d Fix cloudwatch output requires unneeded permissions (#3335) 2017-10-13 11:04:40 -07:00
Daniel Nelson
0f9f757da7 Update changelog 2017-10-12 17:26:58 -07:00
Windkit Li
2f8d0f4d47 Fix snmpwalk address format in leofs input (#3328) 2017-10-12 17:26:14 -07:00
Daniel Nelson
024dea2ff9 Update changelog 2017-10-12 15:52:01 -07:00
Daniel Nelson
fa25e123d8 Fix container name filters in docker input (#3331) 2017-10-12 15:50:09 -07:00
Patrick Hemmer
bed14e5037 Fix documented equation for diskio average queue depth (#3334) 2017-10-12 15:08:51 -07:00
Daniel Nelson
c74c29b164 Remove suggested plugins from readme.
These are confusing since we don't support all of the examples.
2017-10-11 12:56:33 -07:00
Daniel Nelson
4e0c8e6026 Set 1.4.2 release date 2017-10-10 13:29:31 -07:00
Daniel Nelson
d7ea83f39b Update readme and changelog for basicstats aggregator 2017-10-10 12:04:41 -07:00
Toni Moreno
b641f06552 Add new basicstats aggregator (#2167) 2017-10-10 12:02:01 -07:00
Pierre Tessier
c7a6d4eaa4 Fix link for wavefront plugin in changelog (#3317) 2017-10-10 11:21:46 -07:00
Daniel Nelson
61b0336d97 Use 5 second timeout overhead when waiting for ping to complete 2017-10-09 15:09:07 -07:00
Daniel Nelson
761544f56d Add HasPoint method to testutil.Accumulator 2017-10-09 15:02:57 -07:00
Daniel Nelson
0f452ad0df Document /etc/default/telegraf file 2017-10-06 16:57:57 -07:00
Daniel Nelson
4093bc98b7 Update changelog 2017-10-06 16:17:09 -07:00
Christian Meilke
75567d5b51 Add ability to limit node stats in elasticsearch input (#3304) 2017-10-06 16:16:32 -07:00
Daniel Nelson
59bb31e765 Use golang 1.9.1 2017-10-05 16:19:53 -07:00
Daniel Nelson
13c7802b84 Update changelog 2017-10-05 16:15:43 -07:00
Daniel Nelson
cce40c515a Use chunked transfer encoding in InfluxDB output (#3307) 2017-10-05 16:14:21 -07:00
Daniel Nelson
6e1fa559a3 Update changelog 2017-10-05 16:05:51 -07:00
Daniel Nelson
f56dda0ac8 Fix panic in cpu input if number of cpus changes (#3306) 2017-10-05 16:02:21 -07:00
Daniel Nelson
4fab572b6b Release buffer back to pool earlier 2017-10-05 12:12:14 -07:00
Daniel Nelson
b9f319529f Update changelog 2017-10-04 15:30:11 -07:00
Christian Meilke
0bb32570ba Add cluster health level configuration to elasticsearch input (#3269) 2017-10-04 15:29:32 -07:00
Daniel Nelson
a4ea4c7a25 Add smart to changelog and readme 2017-10-04 15:18:15 -07:00
Rickard von Essen
e69c3f9d1c Add smart input plugin for collecting S.M.A.R.T. data (#2449) 2017-10-04 15:15:58 -07:00
Daniel Nelson
002ccf3295 Update changelog 2017-10-03 15:25:19 -07:00
Daniel Nelson
a163effa6d Add support for proxy environment variables to http_response (#3302) 2017-10-03 15:22:57 -07:00
Daniel Nelson
93ff811358 Update changelog 2017-10-03 14:37:02 -07:00
Aditya C S
dd4299e925 Collect Docker Swarm service metrics in docker input plugin (#3141) 2017-10-03 14:36:26 -07:00
Daniel Nelson
b610276485 Skip invalid urls in nginx input 2017-10-03 10:54:31 -07:00
David Norton
6aee40fac1 bug fixes and refactoring 2017-10-03 09:07:15 -04:00
Pierre Tessier
79f66dc5b3 Added newline to each metric line in wavefront output (#3290) 2017-10-02 17:42:21 -07:00
Daniel Nelson
0a55ab42b4 Update changelog 2017-10-02 17:39:32 -07:00
Jimena Cabrera Notari
aba269e94c Add extra wired tiger cache metrics to mongodb input (#3281) 2017-10-02 17:38:51 -07:00
Daniel Nelson
f67350107d Update changelog 2017-10-02 17:16:38 -07:00
Daniel Nelson
8e3ed96d6f Fix case sensitivity error in sqlserver input (#3287) 2017-10-02 17:15:34 -07:00
Daniel Nelson
771fbc311a Regenerate TLS certs due to expiration 2017-10-02 15:44:55 -07:00
David G. Simmons
d7b88b10ad New Particle Plugin 2017-10-02 16:50:23 -04:00
Daniel Nelson
cdca81c999 Fix mqtt_consumer connection_timeout test 2017-10-02 12:28:31 -07:00
Daniel Nelson
ed6f438c9d Add Wavefront output to changelog and readme 2017-09-29 16:15:48 -07:00
Pierre Tessier
366f3f560c Add Wavefront output plugin (#3160) 2017-09-29 16:13:08 -07:00
Daniel Nelson
e4f5547d37 Update example config 2017-09-29 16:09:31 -07:00
Daniel Nelson
e1bf655ef9 Add deprecation notice to jolokia sample config 2017-09-29 16:08:31 -07:00
Daniel Nelson
29b6f4168c Update changelog 2017-09-29 15:59:56 -07:00
Daniel Nelson
3d62e045af Fix format of connection_timeout in mqtt_consumer (#3286) 2017-09-29 15:58:38 -07:00
Daniel Nelson
ad4a5aa7a0 Document how to exclude kubernetes annotation 2017-09-29 14:07:19 -07:00
Daniel Nelson
f2cb1da7cf Update changelog 2017-09-29 11:50:15 -07:00
François de Metz
c3d15f0aff Add support for the rollbar occurrence webhook event. (#1692) 2017-09-29 11:49:22 -07:00
David G. Simmons
b2453e3ec3 Revert "New Particle.io Plugin for Telegraf"
This reverts commit c3b11f9cfb.
Accidentally pushed to master, instead of my fork. Backing it out.
2017-09-29 12:57:13 -04:00
David G. Simmons
c3b11f9cfb New Particle.io Plugin for Telegraf
Only the tests need to be fixed.
2017-09-29 12:45:06 -04:00
Daniel Nelson
cd1791494a Update changelog 2017-09-27 11:38:43 -07:00
Daniel Nelson
402460f038 Use underscore as default opentsdb seperator
Preserves backwards compatibility
2017-09-27 11:36:41 -07:00
owlet123
f85db90780 Add configurable separator for metrics and fields in opentsdb output (#3106) 2017-09-27 11:29:40 -07:00
Daniel Nelson
9bddd50a64 Add deprecation notice to jolokia plugin 2017-09-27 10:52:10 -07:00
Daniel Nelson
b8a0b8461a Update changelog and readme for jolokia2 plugin 2017-09-26 17:42:38 -07:00
Dylan Meissner
ee26191eb5 Add redesigned Jolokia input plugin (#2278) 2017-09-26 17:34:46 -07:00
Daniel Nelson
cadafa6405 Update changelog 2017-09-26 16:03:04 -07:00
Daniel Nelson
22a9ffbb9d Allow JSON data format to contain zero metrics (#3268) 2017-09-26 15:58:33 -07:00
Daniel Nelson
2e1457a496 Update changelog 2017-09-26 15:38:22 -07:00
Daniel Nelson
8614445235 Fix parsing of JSON with a UTF8 BOM in httpjson (#3267) 2017-09-26 15:36:00 -07:00
Daniel Nelson
f23d1eb078 Update changelog 2017-09-26 15:28:07 -07:00
Daniel Nelson
ef5c12bd86 Fix dmcache tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
c013cc1497 Fix cgroup tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
bb665cf013 Fix ceph tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
5dff5932fd Fix nginx_plus tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
f823fc73f6 Allow 64bit integers in kernel_vmstat 2017-09-26 15:25:57 -07:00
Daniel Nelson
fd702e6bb8 Set 1.4.1 release date in changelog 2017-09-26 14:19:02 -07:00
Daniel Nelson
50024c1860 Update changelog 2017-09-25 16:34:04 -07:00
Lukasz Jagiello
a4b8805f7f Add support for NSQLookupd to nsq_consumer (#3215) 2017-09-25 16:33:05 -07:00
James
837e6b1a32 Add additional numeric type handling tests for postgresql_extensible (#3066) 2017-09-25 10:58:10 -07:00
Agniva De Sarker
063f3f68df Improve statsd plugin perf by using a byte buffer pool (#3254) 2017-09-25 10:55:02 -07:00
Daniel Nelson
b24663b0bd Remove nightly versioning scheme 2017-09-22 18:07:08 -07:00
Daniel Nelson
366bda45c3 Remove out of date Vagrantfile 2017-09-22 17:35:58 -07:00
Daniel Nelson
c010fb1c3c Fix build versioning; add dev.docker file 2017-09-22 17:35:58 -07:00
Daniel Nelson
08c197f73a Fix golang version 2017-09-22 17:35:58 -07:00
Daniel Nelson
cafb22d145 Fix unittest for golang 1.9 2017-09-22 17:35:58 -07:00
Christian Meilke
73df179bd6 Tag original URL for k8s services in prometheus input (#3257) 2017-09-22 17:26:19 -07:00
Daniel Nelson
c3bea59f3b Update changelog 2017-09-22 11:46:47 -07:00
Daniel Nelson
52393582d2 Unlock Statsd when stopping to prevent deadlock (#3258) 2017-09-22 11:45:45 -07:00
Daniel Nelson
ce29ca78e3 Add nginx_plus to changelog and readme 2017-09-19 11:49:55 -07:00
Patrick O'Brien
6e6ed075dc Add new nginx_plus input plugin (#3214) 2017-09-19 11:46:01 -07:00
Daniel Nelson
c0a4bd99a1 Update changelog 2017-09-19 11:27:57 -07:00
Paulo Cabido
decb09e760 Add configurable metrics endpoint to prometheus output (#3245) 2017-09-19 11:27:11 -07:00
Daniel Nelson
a63f80e017 Build with go 1.9 on circleci 2017-09-18 16:30:09 -07:00
Daniel Nelson
daee48c861 Update prometheus input documentation 2017-09-18 16:21:45 -07:00
Daniel Nelson
dea8bf7ac0 Update changelog 2017-09-18 15:07:18 -07:00
Christian Meilke
292c5229bf Add support for k8s service DNS discovery to prometheus input (#3236) 2017-09-18 15:06:11 -07:00
Daniel Nelson
0048bf2120 Update changelog 2017-09-18 14:25:17 -07:00
Daniel Nelson
b8e134cd37 Fix arm64 packages contain 32-bit executable (#3246) 2017-09-18 14:22:54 -07:00
Patrick Hemmer
0339dc7faf Add process resource limits to procstat input (#3231) 2017-09-15 11:16:44 -07:00
Daniel Nelson
575a07c985 Update input plugin example readme. 2017-09-14 15:50:55 -07:00
Daniel Nelson
b94cda6b46 Update changelog 2017-09-14 15:28:47 -07:00
Trevor Pounds
73372872c2 Fix panic in statsd p100 calculation (#3230) 2017-09-14 15:27:42 -07:00
Daniel Nelson
103ae3b710 Update changelog 2017-09-14 15:22:46 -07:00
Trevor Pounds
171332c579 Add support for timing sums in statsd input (#3234) 2017-09-14 15:21:54 -07:00
Daniel Nelson
875ab3c4b7 Update changelog 2017-09-14 15:05:03 -07:00
Mark Wilkinson - m82labs
1c5ebd4be3 Fix duplicate keys in perf counters sqlserver query (#3175) 2017-09-14 15:04:13 -07:00
Daniel Nelson
103d24bfba Update changelog 2017-09-14 15:00:55 -07:00
Daniel Nelson
d5f48e3e96 Fix skipped line with empty target in iptables (#3235) 2017-09-14 14:59:28 -07:00
Daniel Nelson
7a41d2c586 Update changelog 2017-09-14 13:06:58 -07:00
Trevor Pounds
fa1982323a Fix counter and gauge metric types. (#3232) 2017-09-14 13:05:37 -07:00
Daniel Nelson
cdf63c5776 Update changelog 2017-09-13 17:31:39 -07:00
Daniel Nelson
0a8c2e0b3b Whitelist allowed char classes for opentsdb output. (#3227) 2017-09-13 17:30:52 -07:00
Daniel Nelson
9197a59cdb Update changelog 2017-09-13 17:28:33 -07:00
Dimitris Rozakis
9c8f4afa37 Respect path prefix in influx output uri (#3224) 2017-09-13 17:27:01 -07:00
Daniel Nelson
eebee9759f Fix fluentd test 2017-09-12 17:57:55 -07:00
Daniel Nelson
ee85f9275e Update changelog 2017-09-12 17:27:50 -07:00
Daniel Nelson
4e53464fe2 Remove unneeded error check 2017-09-12 17:24:57 -07:00
Adrián López
2163981872 Add timeout option for kubernetes (#3211) 2017-09-12 17:22:15 -07:00
Daniel Nelson
c5cfde667a Update changelog 2017-09-12 17:17:41 -07:00
Daniel Nelson
8a68e7424c Fix optional field types in fluentd input 2017-09-12 17:15:19 -07:00
Daniel Nelson
cc63b3b667 Update changelog 2017-09-11 12:27:39 -07:00
DanKans
5488f4b3ac Fix MQTT input exits if Broker is not available on startup (#3202) 2017-09-11 12:24:51 -07:00
Daniel Nelson
14a4b108b4 Update changelog 2017-09-11 11:57:18 -07:00
Daniel Nelson
32f313a6a6 Add polling method to logparser and tail inputs (#3213) 2017-09-11 11:56:04 -07:00
Daniel Nelson
c720200883 Update changelog 2017-09-11 11:54:18 -07:00
DanKans
f62e543003 Fix address already in use with webhooks input during reload (#3206) 2017-09-11 11:51:45 -07:00
Daniel Nelson
be83c8c8f0 Update changelog 2017-09-08 16:02:15 -07:00
Jeff Nickoloff
c809debfd4 TLS and MTLS enhancements to HTTPListener input plugin (#3191) 2017-09-08 16:01:16 -07:00
Daniel Nelson
247c2e71fd Update changelog 2017-09-08 15:36:26 -07:00
Daniel Nelson
7b08f9d099 Add support for standard proxy env vars in outputs. (#3212) 2017-09-08 15:35:20 -07:00
Daniel Nelson
d0b690f040 Fix short tests on darwin (#3099) 2017-09-08 13:03:37 -07:00
Daniel Nelson
98ca22597d Update changelog 2017-09-06 14:29:03 -07:00
Raúl Benencia
99dfc69fbb Include mount mode option in disk metrics (#3027) 2017-09-06 14:28:11 -07:00
Daniel Nelson
144862354a Update changelog 2017-09-06 14:20:38 -07:00
Daniel Nelson
402a0f16e1 Fix typo 2017-09-06 14:19:42 -07:00
Pavel Gurkov
5d4eec606f Add Kafka output plugin topic_suffix option (#3196) 2017-09-06 14:18:26 -07:00
Daniel Nelson
ab1c11b06d Add 1.4.0 release date 2017-09-05 17:14:11 -07:00
Daniel Nelson
864ea1efaf Improve question title in FAQ 2017-09-05 17:12:36 -07:00
Daniel Nelson
4fb1c3a2bc Add FAQ doc with dns resolver information 2017-09-05 13:12:11 -07:00
Daniel Nelson
9796d3c99d Use ip address for default InfluxDB ip in config
Helps with initial setup if localhost cannot be resolved due to the pure
go resolver.
2017-09-05 12:55:21 -07:00
Daniel Nelson
98e784faf3 Sort metrics before comparing in graphite test 2017-09-05 12:50:30 -07:00
rdxmb
16d6011ca1 Fix docker image name in docs (#3193) 2017-09-05 11:44:51 -07:00
Daniel Nelson
f43af72785 Update changelog 2017-08-31 13:43:47 -07:00
Daniel Nelson
28d16188b3 Fix panic when handling string fields with escapes (#3188) 2017-08-30 21:16:37 -07:00
Daniel Nelson
19f3264073 Update changelog 2017-08-29 16:27:02 -07:00
Daniel Nelson
8225bd0173 Convert bool fields to int in graphite serializer 2017-08-29 16:22:03 -07:00
Seua Polyakov
3806424aab Skip non-numerical values in graphite format (#3179) 2017-08-29 15:59:38 -07:00
Daniel Nelson
ef8876b70b Move changelog item to 1.4 2017-08-28 17:17:03 -07:00
Daniel Nelson
5fd8ab36d3 Update changelog 2017-08-28 17:08:44 -07:00
Jeff Nickoloff
ac1fa05672 Added CloudWatch metric constraint validation (#3183) 2017-08-28 16:56:03 -07:00
Daniel Nelson
73d57c8a02 Update changelog 2017-08-28 16:30:51 -07:00
Nevins
95fe0e43f5 Add support for sharding based on metric name (#3170) 2017-08-28 16:24:38 -07:00
Daniel Nelson
02f7b0d030 Update changelog 2017-08-28 16:11:00 -07:00
Dylan Meissner
a9a40cbf87 HTTP headers can be added to InfluxDB output (#3182) 2017-08-28 16:08:50 -07:00
Daniel Nelson
a98496591a Update changelog 2017-08-25 18:08:33 -07:00
Ashton Kinslow
0a6541dfa8 Fix NSQ input plugin when used with version 1.0.0-compat 2017-08-25 18:06:48 -07:00
Daniel Nelson
8ecc58639a Close response bodies in http_listener test 2017-08-25 13:58:45 -07:00
Daniel Nelson
6abecd0ac7 Update changelog 2017-08-25 12:59:19 -07:00
Rickard von Essen
0502b65316 Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149) 2017-08-25 12:57:35 -07:00
Daniel Nelson
e400fcf5da Update changelog 2017-08-25 11:55:59 -07:00
Jan Willem Janssen
d449833de9 Fix parsing of SHM remotes in ntpq input (#3163) 2017-08-25 11:54:06 -07:00
Daniel Nelson
58751fa4df Update fail2ban documentation 2017-08-25 11:42:07 -07:00
Daniel Nelson
656ce31d98 Fix amqp_consumer data_format documentation
closes #3164
2017-08-24 13:17:29 -07:00
Daniel Nelson
485e273187 Add links to nightly builds 2017-08-23 15:42:25 -07:00
Daniel Nelson
f95c239a3f Update changelog 2017-08-23 15:21:48 -07:00
Daniel Nelson
ae24a0754b Escape backslash within string fields (#3161) 2017-08-23 15:17:26 -07:00
Daniel Nelson
f253623231 Update changelog 2017-08-23 15:16:04 -07:00
Rickard von Essen
f0db4fd901 Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.
2017-08-23 15:14:32 -07:00
Daniel Nelson
8c68bd9ddb Update changelog 2017-08-22 17:03:00 -07:00
Daniel Nelson
9fc7220c2e Don't start Telegraf on install in Amazon Linux (#3156) 2017-08-22 17:01:59 -07:00
Daniel Nelson
6597b55477 Update changelog 2017-08-22 16:55:15 -07:00
Daniel Nelson
1f4a997164 Don't retry points beyond retention policy (#3155) 2017-08-22 16:52:26 -07:00
Daniel Nelson
5224b526f4 Hide output of git describe 2017-08-22 13:32:52 -07:00
Rickard von Essen
371638ce56 Enable fail2ban on all platforms (#3151) 2017-08-22 12:58:00 -07:00
Rickard von Essen
53c5d3a290 Enable chrony for all platforms (#3152) 2017-08-22 11:49:51 -07:00
Daniel Nelson
b480022330 Update config directory documentation 2017-08-22 11:33:26 -07:00
Daniel Nelson
ccf17a9f93 Cache intermediate objects during build 2017-08-21 17:26:55 -07:00
Chris Goller
13a6b917c3 Add JSON input support to zipkin plugin (#3150) 2017-08-21 17:24:54 -07:00
Daniel Nelson
1f1e9cc49f Add win_services to the readme 2017-08-18 17:57:30 -07:00
Daniel Nelson
70c2b83f00 Update histogram aggregator documentation (#3133) 2017-08-18 13:24:05 -07:00
Daniel Nelson
4de264ffc8 Remove version test 2017-08-18 11:08:48 -07:00
Daniel Nelson
36c2c88fd2 Update example config 2017-08-17 18:54:06 -07:00
Daniel Nelson
e31d91f0f9 Add queues to rabbitmq documentation (#3135) 2017-08-17 18:52:27 -07:00
Daniel Nelson
3006ccbf2f Update master for 1.5 development 2017-08-16 16:54:15 -07:00
Daniel Nelson
8b588ea37f Update sample config 2017-08-16 16:46:40 -07:00
303 changed files with 22841 additions and 2729 deletions

View File

@@ -1,4 +1,164 @@
## v1.4 [unreleased]
## v1.5 [2017-12-14]
### New Plugins
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
- [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv
- [cratedb](./plugins/outputs/wavefront/README.md) - Thanks to @felixge
- [dcos](./plugins/inputs/dcos/README.md) - Thanks to @influxdata
- [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
- [teamspeak](./plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1
- [unbound](./plugins/inputs/unbound/README.md) - Thanks to @aromeyer
- [wavefront](./plugins/outputs/wavefront/README.md) - Thanks to @puckpuck
### Release Notes
- In the `kinesis` output, use of the `partition_key` and
`use_random_partitionkey` options has been deprecated in favor of the
`partition` subtable. This allows for more flexible methods to set the
partition key such as by metric name or by tag.
- With the release of the new improved `jolokia2` input, the legacy `jolokia`
plugin is deprecated and will be removed in a future release. Users of this
plugin are encouraged to update to the new `jolokia2` plugin.
### Features
- [#3170](https://github.com/influxdata/telegraf/pull/3170): Add support for sharding based on metric name.
- [#3196](https://github.com/influxdata/telegraf/pull/3196): Add Kafka output plugin topic_suffix option.
- [#3027](https://github.com/influxdata/telegraf/pull/3027): Include mount mode option in disk metrics.
- [#3191](https://github.com/influxdata/telegraf/pull/3191): TLS and MTLS enhancements to HTTPListener input plugin.
- [#3213](https://github.com/influxdata/telegraf/pull/3213): Add polling method to logparser and tail inputs.
- [#3211](https://github.com/influxdata/telegraf/pull/3211): Add timeout option for kubernetes input.
- [#3234](https://github.com/influxdata/telegraf/pull/3234): Add support for timing sums in statsd input.
- [#2617](https://github.com/influxdata/telegraf/issues/2617): Add resource limit monitoring to procstat.
- [#3236](https://github.com/influxdata/telegraf/pull/3236): Add support for k8s service DNS discovery to prometheus input.
- [#3245](https://github.com/influxdata/telegraf/pull/3245): Add configurable metrics endpoint to prometheus output.
- [#3214](https://github.com/influxdata/telegraf/pull/3214): Add new nginx_plus input plugin.
- [#3215](https://github.com/influxdata/telegraf/pull/3215): Add support for NSQLookupd to nsq_consumer.
- [#2278](https://github.com/influxdata/telegraf/pull/2278): Add redesigned Jolokia input plugin.
- [#3106](https://github.com/influxdata/telegraf/pull/3106): Add configurable separator for metrics and fields in opentsdb output.
- [#1692](https://github.com/influxdata/telegraf/pull/1692): Add support for the rollbar occurrence webhook event.
- [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin.
- [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input.
- [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin.
- [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data.
- [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input.
- [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input.
- [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator.
- [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input.
- [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields.
- [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier.
- [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins.
- [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents.
- [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result.
- [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin.
- [#3406](https://github.com/influxdata/telegraf/pull/3406): Add support for SSL settings to ElasticSearch output plugin.
- [#3315](https://github.com/influxdata/telegraf/pull/3315): Add Teamspeak 3 input plugin.
- [#3305](https://github.com/influxdata/telegraf/pull/3305): Add modification_time field to filestat input plugin.
- [#2019](https://github.com/influxdata/telegraf/pull/2019): Add Solr input plugin.
- [#3210](https://github.com/influxdata/telegraf/pull/3210): Add CrateDB output plugin.
- [#3459](https://github.com/influxdata/telegraf/pull/3459): Add systemd unit pid and cgroup matching to procstat.
- [#3477](https://github.com/influxdata/telegraf/pull/3477): Add Particle Webhook Plugin.
- [#3471](https://github.com/influxdata/telegraf/pull/3471): Use MAX() instead of SUM() for latency measurements in sqlserver.
- [#3490](https://github.com/influxdata/telegraf/pull/3490): Add index by week number to Elasticsearch output.
- [#3434](https://github.com/influxdata/telegraf/pull/3434): Add unbound input plugin.
- [#3449](https://github.com/influxdata/telegraf/pull/3449): Add opensmtpd input plugin.
- [#3470](https://github.com/influxdata/telegraf/pull/3470): Add support for tags in the index name in elasticsearch output.
- [#2553](https://github.com/influxdata/telegraf/pull/2553): Add postfix input plugin.
- [#3424](https://github.com/influxdata/telegraf/pull/3424): Add bond input plugin.
- [#3518](https://github.com/influxdata/telegraf/pull/3518): Add slab to mem plugin.
- [#3519](https://github.com/influxdata/telegraf/pull/3519): Add input plugin for DC/OS.
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
### Bugfixes
- [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload.
- [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock.
- [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions.
- [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types.
- [#3430](https://github.com/influxdata/telegraf/issues/3430): Always ignore autofs filesystems in disk input.
- [#3326](https://github.com/influxdata/telegraf/issues/3326): Fail metrics parsing on unescaped quotes.
- [#3473](https://github.com/influxdata/telegraf/pull/3473): Whitelist allowed char classes for graphite output.
- [#3488](https://github.com/influxdata/telegraf/pull/3488): Use hexadecimal ids and lowercase names in zipkin input.
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
## v1.4.5 [2017-12-01]
### Bugfixes
- [#3500](https://github.com/influxdata/telegraf/issues/3500): Fix global variable collection when using interval_slow option in mysql input.
- [#3486](https://github.com/influxdata/telegraf/issues/3486): Fix error getting net connections info in netstat input.
- [#3529](https://github.com/influxdata/telegraf/issues/3529): Fix HOST_MOUNT_PREFIX in docker with disk input.
## v1.4.4 [2017-11-08]
### Bugfixes
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
- [#3311](https://github.com/influxdata/telegraf/issues/3311): Fix error getting pids in netstat input.
- [#3339](https://github.com/influxdata/telegraf/issues/3339): Support HOST_VAR envvar to locate /var in system input.
- [#3383](https://github.com/influxdata/telegraf/issues/3383): Use current time if docker container read time is zero value.
## v1.4.3 [2017-10-25]
### Bugfixes
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query.
- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux.
- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb.
- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output.
- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit.
- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value.
- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin.
- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6.
- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems.
## v1.4.2 [2017-10-10]
### Bugfixes
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
## v1.4.1 [2017-09-26]
### Bugfixes
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
## v1.4 [2017-09-05]
### Release Notes
@@ -62,6 +222,7 @@
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
### Bugfixes
@@ -93,6 +254,16 @@
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
## v1.3.5 [2017-07-26]

View File

@@ -12,7 +12,7 @@ but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
## GoDoc
@@ -52,7 +52,7 @@ See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
plugin can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this plugin does.
Let's say you've written a plugin that emits metrics about processes on the
@@ -183,7 +183,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
output can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this output does.
### Output Example
@@ -287,7 +287,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in `telegraf -sample-config`.
processor can be configured. This is include in the output of `telegraf config`.
* The `Description` function should say in one line what this processor does.
### Processor Example
@@ -344,7 +344,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
aggregator can be configured. This is include in `telegraf -sample-config`.
aggregator can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of each
@@ -457,29 +457,28 @@ func init() {
## Unit Tests
Before opening a pull request you should run the linter checks and
the short tests.
### Execute linter
execute `make lint`
### Execute short tests
execute `make test-short`
execute `make test`
### Execute long tests
### Execute integration tests
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
Running the integration tests requires several docker containers to be
running. You can start the containers with:
```
make docker-run
```
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
a simple mock will suffice.
And run the full test suite with:
```
make test-all
```
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running
Use `make docker-kill` to stop the containers.

13
Godeps
View File

@@ -4,12 +4,13 @@ github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
@@ -32,7 +33,7 @@ github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
@@ -40,11 +41,13 @@ github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
@@ -60,9 +63,9 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5

View File

@@ -15,7 +15,6 @@ ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
endif
all:
$(MAKE) deps
$(MAKE) telegraf
@@ -25,7 +24,7 @@ deps:
gdm restore
telegraf:
go build -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
@@ -49,12 +48,17 @@ test-all: lint
go test ./...
package:
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
./scripts/build.py --package --platform=all --arch=all
clean:
-rm -f telegraf
-rm -f telegraf.exe
docker-image:
./scripts/build.py --package --platform=linux --arch=amd64
cp build/telegraf*$(COMMIT)*.deb .
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
# Run all docker containers necessary for integration tests
docker-run:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
@@ -82,6 +86,12 @@ docker-run:
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
docker run --name cratedb \
-p "6543:5432" \
-d crate crate \
-Cnetwork.host=0.0.0.0 \
-Ctransport.host=localhost \
-Clicense.enterprise=false
# Run docker containers necessary for integration tests; skipping services provided
# by CircleCI
@@ -106,12 +116,18 @@ docker-run-circle:
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
docker run --name cratedb \
-p "6543:5432" \
-d crate crate \
-Cnetwork.host=0.0.0.0 \
-Ctransport.host=localhost \
-Clicense.enterprise=false
docker-kill:
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper
openldap postgres rabbitmq redis riemann zookeeper cratedb
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper
openldap postgres rabbitmq redis riemann zookeeper cratedb
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
package clean docker-run docker-run-circle docker-kill
package clean docker-run docker-run-circle docker-kill docker-image

View File

@@ -5,8 +5,7 @@ and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from well known services (like Hadoop, Postgres, or Redis) and third party
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
from local or remote services.
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
@@ -52,6 +51,33 @@ which is installed by the Makefile if you don't have it already.
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
### Nightly Builds
These builds are generated from the master branch:
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb)
- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm)
- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb)
- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm)
- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz)
- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz)
- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb)
- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm)
- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz)
- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz)
- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz)
- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz)
- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz)
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz)
## How to use it:
See usage with:
@@ -103,6 +129,7 @@ configuration options.
* [apache](./plugins/inputs/apache)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
* [bond](./plugins/inputs/bond)
* [cassandra](./plugins/inputs/cassandra)
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
@@ -111,6 +138,7 @@ configuration options.
* [conntrack](./plugins/inputs/conntrack)
* [couchbase](./plugins/inputs/couchbase)
* [couchdb](./plugins/inputs/couchdb)
* [DC/OS](./plugins/inputs/dcos)
* [disque](./plugins/inputs/disque)
* [dmcache](./plugins/inputs/dmcache)
* [dns query time](./plugins/inputs/dns_query)
@@ -131,7 +159,8 @@ configuration options.
* [interrupts](./plugins/inputs/interrupts)
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
* [iptables](./plugins/inputs/iptables)
* [jolokia](./plugins/inputs/jolokia)
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [jolokia2](./plugins/inputs/jolokia2)
* [kapacitor](./plugins/inputs/kapacitor)
* [kubernetes](./plugins/inputs/kubernetes)
* [leofs](./plugins/inputs/leofs)
@@ -144,15 +173,19 @@ configuration options.
* [mysql](./plugins/inputs/mysql)
* [net_response](./plugins/inputs/net_response)
* [nginx](./plugins/inputs/nginx)
* [nginx_plus](./plugins/inputs/nginx_plus)
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [openldap](./plugins/inputs/openldap)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [pf](./plugins/inputs/pf)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
* [postgresql](./plugins/inputs/postgresql)
* [postfix](./plugins/inputs/postfix)
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [postgresql](./plugins/inputs/postgresql)
* [powerdns](./plugins/inputs/powerdns)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
@@ -164,15 +197,20 @@ configuration options.
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
* [smart](./plugins/inputs/smart)
* [snmp](./plugins/inputs/snmp)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [solr](./plugins/inputs/solr)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [teamspeak](./plugins/inputs/teamspeak)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [unbound](./plugins/input/unbound)
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu
@@ -204,8 +242,9 @@ Telegraf can also collect metrics via the following service plugins:
* [filestack](./plugins/inputs/webhooks/filestack)
* [github](./plugins/inputs/webhooks/github)
* [mandrill](./plugins/inputs/webhooks/mandrill)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [particle](./plugins/inputs/webhooks/particle)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [zipkin](./plugins/inputs/zipkin)
Telegraf is able to parse the following input data formats into metrics, these
@@ -224,6 +263,7 @@ formats may be used with input plugins supporting the `data_format` option:
## Aggregator Plugins
* [basicstats](./plugins/aggregators/basicstats)
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
@@ -234,6 +274,7 @@ formats may be used with input plugins supporting the `data_format` option:
* [amqp](./plugins/outputs/amqp) (rabbitmq)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [cratedb](./plugins/outputs/cratedb)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
* [elasticsearch](./plugins/outputs/elasticsearch)
@@ -253,3 +294,4 @@ formats may be used with input plugins supporting the `data_format` option:
* [socket_writer](./plugins/outputs/socket_writer)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
* [wavefront](./plugins/outputs/wavefront)

View File

@@ -28,6 +28,18 @@ type Accumulator interface {
tags map[string]string,
t ...time.Time)
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
AddSummary(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
AddHistogram(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
SetPrecision(precision, interval time.Duration)
AddError(err error)

View File

@@ -76,6 +76,28 @@ func (ac *accumulator) AddCounter(
}
}
func (ac *accumulator) AddSummary(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddHistogram(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
// AddError passes a runtime error to the accumulator.
// The error will be tagged with the plugin name and written to the log.
func (ac *accumulator) AddError(err error) {

View File

@@ -252,7 +252,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 300)
// create an output metric channel and a gorouting that continously passes
// create an output metric channel and a gorouting that continuously passes
// each metric onto the output plugins & aggregators.
outMetricC := make(chan telegraf.Metric, 100)
var wg sync.WaitGroup

View File

@@ -12,11 +12,11 @@ platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.8.1.msi" curl -o "C:\Cache\go1.8.1.msi" https://storage.googleapis.com/golang/go1.8.1.windows-amd64.msi
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.8.1.msi" /quiet
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version

View File

@@ -1,11 +1,14 @@
machine:
go:
version: 1.8.1
services:
- docker
- memcached
- redis
- rabbitmq-server
post:
- sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.9.2.linux-amd64.tar.gz
- go version
dependencies:
override:

View File

@@ -55,11 +55,8 @@ var fUsage = flag.String("usage", "",
var fService = flag.String("service", "",
"operate on the service")
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
var (
nextVersion = "1.4.0"
nextVersion = "1.5.0"
version string
commit string
branch string
@@ -268,7 +265,7 @@ func (p *program) Stop(s service.Service) error {
func displayVersion() string {
if version == "" {
return fmt.Sprintf("v%s~pre%s", nextVersion, commit)
return fmt.Sprintf("v%s~%s", nextVersion, commit)
}
return "v" + version
}

View File

@@ -39,6 +39,11 @@ metrics as they pass through Telegraf:
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
Use [measurement filtering](CONFIGURATION.md#measurement-filtering)
to control which metrics are passed through a processor or aggregator. If a
metric is filtered out the metric bypasses the plugin and is passed downstream
to the next plugin.
**Processor** plugins process metrics as they pass through and immediately emit
results based on the values they process. For example, this could be printing
all metrics or adding a tag to all metrics that pass through.

View File

@@ -24,11 +24,17 @@ Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
When using the `.deb` or `.rpm` packages, you can define environment variables
in the `/etc/default/telegraf` file.
## Configuration file locations
The location of the configuration file can be set via the `--config` command
line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
the `-config-directory` command line flag is used.
line flag.
When the `--config-directory` command line flag is used files ending with
`.conf` in the specified directory will also be included in the Telegraf
configuration.
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
@@ -92,9 +98,13 @@ you can configure that here.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the input plugin.
## Output Configuration
There are no generic configuration options available for all outputs.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the output plugin.
## Aggregator Configuration
@@ -115,6 +125,10 @@ aggregator and will not get sent to the output plugins.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters be used to
limit what metrics are handled by the aggregator. Excluded metrics are passed
downstream to the next aggregator.
## Processor Configuration
The following config parameters are available for all processors:
@@ -122,6 +136,10 @@ The following config parameters are available for all processors:
* **order**: This is the order in which the processor(s) get executed. If this
is not specified then processor execution order will be random.
The [measurement filtering](#measurement-filtering) can parameters may be used
to limit what metrics are handled by the processor. Excluded metrics are
passed downstream to the next processor.
#### Measurement Filtering
Filters can be configured per input, output, processor, or aggregator,
@@ -371,3 +389,15 @@ to the system load metrics due to the `namepass` parameter.
[[outputs.file]]
files = ["stdout"]
```
#### Processor Configuration Examples:
Print only the metrics with `cpu` as the measurement name, all metrics are
passed to the output:
```toml
[[processors.printer]]
namepass = "cpu"
[[outputs.file]]
files = ["/tmp/metrics.out"]
```

View File

@@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
### Graphite Configuration:
```toml

46
docs/FAQ.md Normal file
View File

@@ -0,0 +1,46 @@
# Frequently Asked Questions
### Q: How can I monitor the Docker Engine Host from within a container?
You will need to setup several volume mounts as well as some environment
variables:
```
docker run --name telegraf
-v /:/hostfs:ro
-v /etc:/hostfs/etc:ro
-v /proc:/hostfs/proc:ro
-v /sys:/hostfs/sys:ro
-v /var/run/utmp:/var/run/utmp:ro
-e HOST_ETC=/hostfs/etc
-e HOST_PROC=/hostfs/proc
-e HOST_SYS=/hostfs/sys
-e HOST_MOUNT_PREFIX=/hostfs
telegraf
```
### Q: Why do I get a "no such host" error resolving hostnames that other
programs can resolve?
Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution).
This resolver behaves differently than the C library functions but is more
efficient when used with the Go runtime.
If you encounter problems or want to use more advanced name resolution methods
that are unsupported by the pure Go resolver, you can switch to the cgo
resolver.
If running manually set:
```
export GODEBUG=netdns=cgo
```
If running as a service add the environment variable to `/etc/default/telegraf`:
```
GODEBUG=netdns=cgo
```
### Q: When will the next version be released?
The latest release date estimate can be viewed on the
[milestones](https://github.com/influxdata/telegraf/milestones) page.

View File

@@ -82,6 +82,8 @@ following works:
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)

View File

@@ -38,7 +38,7 @@ Telegraf can manage its own service through the --service flag:
| `telegraf.exe --service stop` | Stop the telegraf service |
Trobleshooting common error #1067
Troubleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start

File diff suppressed because it is too large Load Diff

View File

@@ -63,8 +63,8 @@
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".

View File

@@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter {
}
return &out
}
type IncludeExcludeFilter struct {
include Filter
exclude Filter
}
func NewIncludeExcludeFilter(
include []string,
exclude []string,
) (Filter, error) {
in, err := Compile(include)
if err != nil {
return nil, err
}
ex, err := Compile(exclude)
if err != nil {
return nil, err
}
return &IncludeExcludeFilter{in, ex}, nil
}
func (f *IncludeExcludeFilter) Match(s string) bool {
if f.include != nil {
if !f.include.Match(s) {
return false
}
}
if f.exclude != nil {
if f.exclude.Match(s) {
return false
}
}
return true
}

View File

@@ -126,7 +126,7 @@ type AgentConfig struct {
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatability
// compatibility
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode
@@ -683,7 +683,7 @@ func (c *Config) LoadConfig(path string) error {
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatability only.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))

View File

@@ -40,6 +40,7 @@ func TestSnakeCase(t *testing.T) {
var (
sleepbin, _ = exec.LookPath("sleep")
echobin, _ = exec.LookPath("echo")
shell, _ = exec.LookPath("sh")
)
func TestRunTimeout(t *testing.T) {
@@ -84,13 +85,13 @@ func TestCombinedOutput(t *testing.T) {
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
// the same output from a failed command.
func TestCombinedOutputError(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "foo")
cmd := exec.Command(shell, "-c", "false")
expected, err := cmd.CombinedOutput()
cmd2 := exec.Command(sleepbin, "foo")
cmd2 := exec.Command(shell, "-c", "false")
actual, err := CombinedOutputTimeout(cmd2, time.Second)
assert.Error(t, err)
@@ -98,10 +99,10 @@ func TestCombinedOutputError(t *testing.T) {
}
func TestRunError(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "foo")
cmd := exec.Command(shell, "-c", "false")
err := RunTimeout(cmd, time.Second)
assert.Error(t, err)

View File

@@ -150,12 +150,6 @@ func makemetric(
continue
}
case string:
if strings.HasSuffix(val, `\`) {
log.Printf("D! Measurement [%s] field [%s] has a value "+
"ending with a backslash, skipping", measurement, k)
delete(fields, k)
continue
}
fields[k] = v
default:
fields[k] = v

View File

@@ -370,16 +370,17 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
expectedTags: map[string]string{},
},
{
name: "Field value with trailing slash dropped",
name: "Field value with trailing slash okay",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
"bad": `xyzzy\`,
"ok": `xyzzy\`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
expectedTags: map[string]string{},
},
@@ -387,7 +388,7 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
name: "Must have one field after dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"bad": `xyzzy\`,
"bad": math.NaN(),
},
tags: map[string]string{},
expectedNil: true,

View File

@@ -13,6 +13,8 @@ const (
Counter
Gauge
Untyped
Summary
Histogram
)
type Metric interface {

View File

@@ -20,8 +20,14 @@ var (
// stringFieldEscaper is for escaping string field values only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
stringFieldUnEscaper = strings.NewReplacer(
`\"`, `"`,
`\\`, `\`,
)
)
func escape(s string, t string) string {

View File

@@ -21,14 +21,14 @@ func New(
t time.Time,
mType ...telegraf.ValueType,
) (telegraf.Metric, error) {
if len(fields) == 0 {
return nil, fmt.Errorf("Metric cannot be made without any fields")
}
if len(name) == 0 {
return nil, fmt.Errorf("Metric cannot be made with an empty name")
return nil, fmt.Errorf("missing measurement name")
}
if len(fields) == 0 {
return nil, fmt.Errorf("%s: must have one or more fields", name)
}
if strings.HasSuffix(name, `\`) {
return nil, fmt.Errorf("Metric cannot have measurement name ending with a backslash")
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
}
var thisType telegraf.ValueType
@@ -49,10 +49,10 @@ func New(
taglen := 0
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have tag key ending with a backslash")
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
}
if strings.HasSuffix(v, `\`) {
return nil, fmt.Errorf("Metric cannot have tag value ending with a backslash")
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
}
if len(k) == 0 || len(v) == 0 {
@@ -77,15 +77,9 @@ func New(
// pre-allocate capacity of the fields slice
fieldlen := 0
for k, v := range fields {
for k, _ := range fields {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
}
switch val := v.(type) {
case string:
if strings.HasSuffix(val, `\`) {
return nil, fmt.Errorf("Metric cannot have field value ending with a backslash")
}
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
}
// 10 bytes is completely arbitrary, but will at least prevent some
@@ -108,7 +102,8 @@ func New(
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Returns -1 if not found.
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
// not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
@@ -128,6 +123,46 @@ func indexUnescapedByte(buf []byte, b byte) int {
return keyi
}
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
// to b in buf that is not escaped. Allows for the escape char `\` to be
// escaped. Returns -1 if not found.
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if countBackslashes(buf, keyi-1)%2 == 0 {
break
} else {
keyi++
}
}
return keyi
}
// countBackslashes counts the number of preceding backslashes starting at
// the 'start' index.
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
} else {
break
}
}
return count
}
type metric struct {
name []byte
tags []byte
@@ -289,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}

View File

@@ -31,7 +31,7 @@ func TestNewMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@@ -257,6 +257,8 @@ func TestNewMetric_Fields(t *testing.T) {
"string": "test",
"quote_string": `x"y`,
"backslash_quote_string": `x\"y`,
"backslash": `x\y`,
"ends_with_backslash": `x\`,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
@@ -412,7 +414,7 @@ func TestNewGaugeMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@@ -434,7 +436,7 @@ func TestNewCounterMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@@ -708,12 +710,6 @@ func TestNewMetric_TrailingSlash(t *testing.T) {
`value\`: "x",
},
},
{
name: "cpu",
fields: map[string]interface{}{
"value": `x\`,
},
},
{
name: "cpu",
tags: map[string]string{

View File

@@ -326,7 +326,9 @@ func scanTagsValue(buf []byte, i int) (int, int, error) {
func scanFields(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
quoted := false
// track how many '"" we've seen since last '='
quotes := 0
// tracks how many '=' we've seen
equals := 0
@@ -350,13 +352,17 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
// Only quote values in the field value since quotes are not significant
// in the field key
if buf[i] == '"' && equals > commas {
quoted = !quoted
i++
quotes++
if quotes > 2 {
break
}
continue
}
// If we see an =, ensure that there is at least on char before and after it
if buf[i] == '=' && !quoted {
if buf[i] == '=' && quotes != 1 {
quotes = 0
equals++
// check for "... =123" but allow "a\ =123"
@@ -398,18 +404,18 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
}
}
if buf[i] == ',' && !quoted {
if buf[i] == ',' && quotes != 1 {
commas++
}
// reached end of block?
if buf[i] == ' ' && !quoted {
if buf[i] == ' ' && quotes != 1 {
break
}
i++
}
if quoted {
if quotes != 0 && quotes != 2 {
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
}
@@ -647,7 +653,7 @@ func skipWhitespace(buf []byte, i int) int {
}
// makeError is a helper function for making a metric parsing error.
// reason is the reason that the error occured.
// reason is the reason why the error occurred.
// buf should be the current buffer we are parsing.
// i is the current index, to give some context on where in the buffer we are.
func makeError(reason string, buf []byte, i int) error {

View File

@@ -4,6 +4,7 @@ import (
"io"
"io/ioutil"
"regexp"
"strings"
"testing"
"time"
@@ -180,7 +181,7 @@ func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
}
}
// Regresssion test for when a metric requires to be split and one of the
// Regression test for when a metric requires to be split and one of the
// split metrics is larger than the buffer.
//
// Previously the metric index would be set incorrectly causing a panic.
@@ -217,7 +218,7 @@ func TestMetricReader_SplitOverflowOversized(t *testing.T) {
}
}
// Regresssion test for when a split metric exactly fits in the buffer.
// Regression test for when a split metric exactly fits in the buffer.
//
// Previously the metric would be overflow split when not required.
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
@@ -620,6 +621,83 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
}
}
func TestReader_Read(t *testing.T) {
epoch := time.Unix(0, 0)
type args struct {
name string
tags map[string]string
fields map[string]interface{}
t time.Time
mType []telegraf.ValueType
}
tests := []struct {
name string
args args
expected []byte
}{
{
name: "escape backslashes in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\`},
t: epoch,
},
expected: []byte(`cpu value="test\\" 0`),
},
{
name: "escape quote in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test"`},
t: epoch,
},
expected: []byte(`cpu value="test\"" 0`),
},
{
name: "escape quote and backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\"`},
t: epoch,
},
expected: []byte(`cpu value="test\\\"" 0`),
},
{
name: "escape multiple backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\\`},
t: epoch,
},
expected: []byte(`cpu value="test\\\\" 0`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := make([]byte, 512)
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
require.NoError(t, err)
r := NewReader([]telegraf.Metric{m})
num, err := r.Read(buf)
if err != io.EOF {
require.NoError(t, err)
}
line := string(buf[:num])
// This is done so that we can use raw strings in the test spec
noeol := strings.TrimRight(line, "\n")
require.Equal(t, string(tt.expected), noeol)
require.Equal(t, len(tt.expected)+1, num)
})
}
}
func TestMetricRoundtrip(t *testing.T) {
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000

View File

@@ -1,6 +1,7 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
)

View File

@@ -0,0 +1,43 @@
# BasicStats Aggregator Plugin
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
emitting the aggregate every `period` seconds.
### Configuration:
```toml
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
```
### Measurements & Fields:
- measurement1
- field1_count
- field1_max
- field1_min
- field1_mean
- field1_s2 (variance)
- field1_stdev (standard deviation)
### Tags:
No tags are applied by this aggregator.
### Example Output:
```
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
```

View File

@@ -0,0 +1,155 @@
package basicstats
import (
"math"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type BasicStats struct {
cache map[uint64]aggregate
}
func NewBasicStats() telegraf.Aggregator {
mm := &BasicStats{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]basicstats
name string
tags map[string]string
}
type basicstats struct {
count float64
min float64
max float64
mean float64
M2 float64 //intermedia value for variance/stdev
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *BasicStats) SampleConfig() string {
return sampleConfig
}
func (m *BasicStats) Description() string {
return "Keep the aggregate basicstats of each metric passing through."
}
func (m *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]basicstats),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
continue
}
tmp := m.cache[id].fields[k]
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
//variable initialization
x := fv
mean := tmp.mean
M2 := tmp.M2
//counter compute
n := tmp.count + 1
tmp.count = n
//mean compute
delta := x - mean
mean = mean + delta/n
tmp.mean = mean
//variance/stdev compute
M2 = M2 + delta*(x-mean)
tmp.M2 = M2
//max/min compute
if fv < tmp.min {
tmp.min = fv
} else if fv > tmp.max {
tmp.max = fv
}
//store final data
m.cache[id].fields[k] = tmp
}
}
}
}
func (m *BasicStats) Push(acc telegraf.Accumulator) {
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
fields[k+"_count"] = v.count
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
fields[k+"_mean"] = v.mean
//v.count always >=1
if v.count > 1 {
variance := v.M2 / (v.count - 1)
fields[k+"_s2"] = variance
fields[k+"_stdev"] = math.Sqrt(variance)
}
//if count == 1 StdDev = infinite => so I won't send data
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (m *BasicStats) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
})
}

View File

@@ -0,0 +1,151 @@
package basicstats
import (
"math"
"testing"
"time"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": float64(2),
"d": float64(2),
},
time.Now(),
)
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": float64(4),
"d": float64(6),
"e": float64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"b_count": float64(2), //b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), //c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"d_count": float64(2), //d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(1),
"b_min": float64(1),
"b_mean": float64(1),
"c_count": float64(1), //c
"c_max": float64(2),
"c_min": float64(2),
"c_mean": float64(2),
"d_count": float64(1), //d
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(3),
"b_min": float64(3),
"b_mean": float64(3),
"c_count": float64(1), //c
"c_max": float64(4),
"c_min": float64(4),
"c_mean": float64(4),
"d_count": float64(1), //d
"d_max": float64(6),
"d_min": float64(6),
"d_mean": float64(6),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View File

@@ -1,38 +1,25 @@
# Histogram Aggregator Plugin
#### Goal
The histogram aggregator plugin creates histograms containing the counts of
field values within a range.
This plugin was added for ability to build histograms.
Values added to a bucket are also added to the larger buckets in the
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
#### Description
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
Bucket counts however are not reset between periods and will be non-strictly
increasing while Telegraf is running.
The histogram aggregator plugin aggregates values of specified metric's
fields. The metric is emitted every `period` seconds. All you need to do
is to specify borders of histogram buckets and fields, for which you want
to aggregate histogram.
#### Design
#### How it works
The each metric is passed to the aggregator and this aggregator searches
Each metric is passed to the aggregator and this aggregator searches
histogram buckets for those fields, which have been specified in the
config. If buckets are found, the aggregator will put +1 to appropriate
bucket. Otherwise, nothing will happen. Every `period` seconds these data
will be pushed to output.
config. If buckets are found, the aggregator will increment +1 to the appropriate
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
seconds this data will be forwarded to the outputs.
Note, that the all hits of current bucket will be also added to all next
buckets in final result of distribution. Why does it work this way? In
configuration you define right borders for each bucket in a ascending
sequence. Internally buckets are presented as ranges with borders
(0..bucketBorder]: 0..1, 0..10, 0..50, …, 0..+Inf. So the value "+1" will be
put into those buckets, in which the metric value fell with such ranges of
buckets.
This plugin creates cumulative histograms. It means, that the hits in the
buckets will always increase from the moment of telegraf start. But if you
restart telegraf, all hits in the buckets will be reset to 0.
Also, the algorithm of hit counting to buckets was implemented on the base
of the algorithm, which is implemented in the Prometheus
The algorithm of hit counting to buckets was implemented on the base
of the algorithm which is implemented in the Prometheus
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
### Configuration
@@ -40,61 +27,44 @@ of the algorithm, which is implemented in the Prometheus
```toml
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The example of config to aggregate histogram for all fields of specified metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
## The name of metric.
metric_name = "cpu"
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## The example of config to aggregate histogram for concrete fields of specified metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
## The name of metric.
metric_name = "diskio"
## The concrete fields of metric.
metric_fields = ["io_time", "read_time", "write_time"]
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
```
#### Explanation
The user is responsible for defining the bounds of the histogram bucket as
well as the measurement name and fields to aggregate.
The field `metric_fields` is the list of metric fields. For example, the
metric `cpu` has the following fields: usage_user, usage_system,
usage_idle, usage_nice, usage_iowait, usage_irq, usage_softirq, usage_steal,
usage_guest, usage_guest_nice.
Each histogram config section must contain a `buckets` and `measurement_name`
option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
Note that histogram metrics will be pushed every `period` seconds.
As you know telegraf calls aggregator `Reset()` func each `period` seconds.
Histogram aggregator ignores `Reset()` and continues to count hits.
The `buckets` option contains a list of floats which specify the bucket
boundaries. Each float value defines the inclusive upper bound of the bucket.
The `+Inf` bucket is added automatically and does not need to be defined.
#### Use cases
You can specify fields using two cases:
1. The specifying only metric name. In this case all fields of metric
will be aggregated.
2. The specifying metric name and concrete field.
#### Some rules
- The setting of each histogram must be in separate section with title
`aggregators.histogram.config`.
- The each value of bucket must be float value.
- Don\`t include the border bucket `+Inf`. It will be done automatically.
### Measurements & Fields:
The postfix `bucket` will be added to each field.
The postfix `bucket` will be added to each field key.
- measurement1
- field1_bucket
@@ -102,16 +72,15 @@ The postfix `bucket` will be added to each field.
### Tags:
All measurements have tag `le`. This tag has the border value of bucket. It
means that the metric value is less or equal to the value of this tag. For
example, let assume that we have the metric value 10 and the following
buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value 10,
because the metrics value is passed into bucket with right border value `10`.
All measurements are given the tag `le`. This tag has the border value of
bucket. It means that the metric value is less than or equal to the value of
this tag. For example, let assume that we have the metric value 10 and the
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
10, because the metrics value is passed into bucket with right border value
`10`.
### Example Output:
The following output will return to the Prometheus client.
```
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000

View File

@@ -24,8 +24,8 @@ type HistogramAggregator struct {
// config is the config, which contains name, field of metric and histogram buckets.
type config struct {
Metric string `toml:"metric_name"`
Fields []string `toml:"metric_fields"`
Metric string `toml:"measurement_name"`
Fields []string `toml:"fields"`
Buckets buckets `toml:"buckets"`
}
@@ -65,28 +65,28 @@ func NewHistogramAggregator() telegraf.Aggregator {
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## The example of config to aggregate histogram for all fields of specified metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
## The name of metric.
metric_name = "cpu"
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## The example of config to aggregate for specified fields of metric.
[[aggregators.histogram.config]]
## The set of buckets.
buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
## The name of metric.
metric_name = "diskio"
## The concrete fields of metric
metric_fields = ["io_time", "read_time", "write_time"]
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
`
// SampleConfig returns sample of config
@@ -96,7 +96,7 @@ func (h *HistogramAggregator) SampleConfig() string {
// Description returns description of aggregator plugin
func (h *HistogramAggregator) Description() string {
return "Keep the aggregate histogram of each metric passing through."
return "Create aggregate histograms."
}
// Add adds new hit to the buckets

View File

@@ -6,30 +6,37 @@ additional information can be found.
### Configuration:
This section contains the default TOML to configure the plugin. You can
generate it using `telegraf --usage <plugin-name>`.
```toml
# Description
[[inputs.example]]
# SampleConfig
example_option = "example_value"
```
### Measurements & Fields:
### Metrics:
Here you should add an optional description and links to where the user can
get more information about the measurements.
If the output is determined dynamically based on the input source, or there
are more metrics than can reasonably be listed, describe how the input is
mapped to the output.
- measurement1
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- field3 (integer, bytes)
### Tags:
- All measurements have the following tags:
- tags:
- tag1 (optional description)
- tag2
- measurement2 has the following tags:
- fields:
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- tags:
- tag3
- fields:
- field3 (integer, bytes)
### Sample Queries:
@@ -44,6 +51,10 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
### Example Output:
This section shows example output in Line Protocol format. You can often use
`telegraf --input-filter <plugin-name> --test` or use the `file` output to get
this information.
```
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455

View File

@@ -5,6 +5,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
@@ -14,6 +15,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
_ "github.com/influxdata/telegraf/plugins/inputs/dcos"
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
@@ -36,6 +38,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
@@ -53,14 +56,18 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
_ "github.com/influxdata/telegraf/plugins/inputs/postfix"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
@@ -74,19 +81,23 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/smart"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/unbound"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"

View File

@@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}

View File

@@ -0,0 +1,85 @@
# Bond Input Plugin
The Bond Input plugin collects bond interface status, bond's slaves interfaces
status and failures count of bond's slaves interfaces.
The plugin collects these metrics from `/proc/net/bonding/*` files.
### Configuration:
```toml
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
```
### Measurements & Fields:
- bond
- active_slave (for active-backup mode)
- status
- bond_slave
- failures
- status
### Description:
```
active_slave
Currently active slave interface for active-backup mode.
status
Status of bond interface or bonds's slave interface (down = 0, up = 1).
failures
Amount of failures for bond's slave interface.
```
### Tags:
- bond
- bond
- bond_slave
- bond
- interface
### Example output:
Configuration:
```
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
bond_interfaces = ["bond0", "bond1"]
```
Run:
```
telegraf --config telegraf.conf --input-filter bond --test
```
Output:
```
* Plugin: inputs.bond, Collection 1
> bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000
> bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000
> bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000
> bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000
```

204
plugins/inputs/bond/bond.go Normal file
View File

@@ -0,0 +1,204 @@
package bond
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// default host proc path
const defaultHostProc = "/proc"
// env host proc variable name
const envProc = "HOST_PROC"
type Bond struct {
HostProc string `toml:"host_proc"`
BondInterfaces []string `toml:"bond_interfaces"`
}
var sampleConfig = `
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
`
func (bond *Bond) Description() string {
return "Collect bond interface status, slaves statuses and failures count"
}
func (bond *Bond) SampleConfig() string {
return sampleConfig
}
func (bond *Bond) Gather(acc telegraf.Accumulator) error {
// load proc path, get default value if config value and env variable are empty
bond.loadPath()
// list bond interfaces from bonding directory or gather all interfaces.
bondNames, err := bond.listInterfaces()
if err != nil {
return err
}
for _, bondName := range bondNames {
bondAbsPath := bond.HostProc + "/net/bonding/" + bondName
file, err := ioutil.ReadFile(bondAbsPath)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err))
continue
}
rawFile := strings.TrimSpace(string(file))
err = bond.gatherBondInterface(bondName, rawFile, acc)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err))
}
}
return nil
}
func (bond *Bond) gatherBondInterface(bondName string, rawFile string, acc telegraf.Accumulator) error {
splitIndex := strings.Index(rawFile, "Slave Interface:")
if splitIndex == -1 {
splitIndex = len(rawFile)
}
bondPart := rawFile[:splitIndex]
slavePart := rawFile[splitIndex:]
err := bond.gatherBondPart(bondName, bondPart, acc)
if err != nil {
return err
}
err = bond.gatherSlavePart(bondName, slavePart, acc)
if err != nil {
return err
}
return nil
}
func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.Accumulator) error {
fields := make(map[string]interface{})
tags := map[string]string{
"bond": bondName,
}
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Currently Active Slave") {
fields["active_slave"] = value
}
if strings.Contains(name, "MII Status") {
fields["status"] = 0
if value == "up" {
fields["status"] = 1
}
acc.AddFields("bond", fields, tags)
return nil
}
}
if err := scanner.Err(); err != nil {
return err
}
return fmt.Errorf("Couldn't find status info for '%s' ", bondName)
}
func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error {
var slave string
var status int
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Slave Interface") {
slave = value
}
if strings.Contains(name, "MII Status") {
status = 0
if value == "up" {
status = 1
}
}
if strings.Contains(name, "Link Failure Count") {
count, err := strconv.Atoi(value)
if err != nil {
return err
}
fields := map[string]interface{}{
"status": status,
"failures": count,
}
tags := map[string]string{
"bond": bondName,
"interface": slave,
}
acc.AddFields("bond_slave", fields, tags)
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
// loadPath can be used to read path firstly from config
// if it is empty then try read from env variable
func (bond *Bond) loadPath() {
if bond.HostProc == "" {
bond.HostProc = proc(envProc, defaultHostProc)
}
}
// proc can be used to read file paths from env
func proc(env, path string) string {
// try to read full file path
if p := os.Getenv(env); p != "" {
return p
}
// return default path
return path
}
func (bond *Bond) listInterfaces() ([]string, error) {
var interfaces []string
if len(bond.BondInterfaces) > 0 {
interfaces = bond.BondInterfaces
} else {
paths, err := filepath.Glob(bond.HostProc + "/net/bonding/*")
if err != nil {
return nil, err
}
for _, p := range paths {
interfaces = append(interfaces, filepath.Base(p))
}
}
return interfaces, nil
}
func init() {
inputs.Add("bond", func() telegraf.Input {
return &Bond{}
})
}

View File

@@ -0,0 +1,77 @@
package bond
import (
"testing"
"github.com/influxdata/telegraf/testutil"
)
var sampleTest802 = `
Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008)
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
Transmit Hash Policy: layer2 (0)
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
802.3ad info
LACP rate: fast
Aggregator selection policy (ad_select): stable
bond bond0 has no active aggregator
Slave Interface: eth1
MII Status: up
Link Failure Count: 0
Permanent HW addr: 00:0c:29:f5:b7:11
Aggregator ID: N/A
Slave Interface: eth2
MII Status: up
Link Failure Count: 3
Permanent HW addr: 00:0c:29:f5:b7:1b
Aggregator ID: N/A
`
var sampleTestAB = `
Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: eth2 (primary_reselect always)
Currently Active Slave: eth2
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth3
MII Status: down
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 2
Permanent HW addr:
Slave queue ID: 0
Slave Interface: eth2
MII Status: up
Speed: 100 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr:
`
func TestGatherBondInterface(t *testing.T) {
var acc testutil.Accumulator
bond := &Bond{}
bond.gatherBondInterface("bond802", sampleTest802, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"})
bond.gatherBondInterface("bondAB", sampleTestAB, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"})
}

View File

@@ -26,7 +26,7 @@ func TestParseSockId(t *testing.T) {
func TestParseMonDump(t *testing.T) {
dump, err := parseDump(monPerfDump)
assert.NoError(t, err)
assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
}

View File

@@ -225,7 +225,7 @@ var fileFormats = [...]fileFormat{
}
func numberOrString(s string) interface{} {
i, err := strconv.Atoi(s)
i, err := strconv.ParseInt(s, 10, 64)
if err == nil {
return i
}

View File

@@ -31,17 +31,17 @@ func TestCgroupStatistics_1(t *testing.T) {
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.stat.cache": 1739362304123123123,
"memory.stat.rss": 1775325184,
"memory.stat.rss_huge": 778043392,
"memory.stat.mapped_file": 421036032,
"memory.stat.dirty": -307200,
"memory.max_usage_in_bytes.0": 0,
"memory.max_usage_in_bytes.1": -1,
"memory.max_usage_in_bytes.2": 2,
"memory.limit_in_bytes": 223372036854771712,
"memory.stat.cache": int64(1739362304123123123),
"memory.stat.rss": int64(1775325184),
"memory.stat.rss_huge": int64(778043392),
"memory.stat.mapped_file": int64(421036032),
"memory.stat.dirty": int64(-307200),
"memory.max_usage_in_bytes.0": int64(0),
"memory.max_usage_in_bytes.1": int64(-1),
"memory.max_usage_in_bytes.2": int64(2),
"memory.limit_in_bytes": int64(223372036854771712),
"memory.use_hierarchy": "12-781",
"notify_on_release": 0,
"notify_on_release": int64(0),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -63,10 +63,10 @@ func TestCgroupStatistics_2(t *testing.T) {
"path": "testdata/cpu",
}
fields := map[string]interface{}{
"cpuacct.usage_percpu.0": -1452543795404,
"cpuacct.usage_percpu.1": 1376681271659,
"cpuacct.usage_percpu.2": 1450950799997,
"cpuacct.usage_percpu.3": -1473113374257,
"cpuacct.usage_percpu.0": int64(-1452543795404),
"cpuacct.usage_percpu.1": int64(1376681271659),
"cpuacct.usage_percpu.2": int64(1450950799997),
"cpuacct.usage_percpu.3": int64(-1473113374257),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -88,7 +88,7 @@ func TestCgroupStatistics_3(t *testing.T) {
"path": "testdata/memory/group_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -115,7 +115,7 @@ func TestCgroupStatistics_4(t *testing.T) {
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -147,7 +147,7 @@ func TestCgroupStatistics_5(t *testing.T) {
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -174,9 +174,9 @@ func TestCgroupStatistics_6(t *testing.T) {
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.usage_in_bytes": 3513667584,
"memory.usage_in_bytes": int64(3513667584),
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": 9223372036854771712,
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}

View File

@@ -1,5 +1,3 @@
// +build linux
package chrony
import (

View File

@@ -1,3 +0,0 @@
// +build !linux
package chrony

View File

@@ -1,5 +1,3 @@
// +build linux
package chrony
import (

View File

@@ -92,7 +92,7 @@ func (c *CloudWatch) SampleConfig() string {
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = "5m"
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"

View File

@@ -69,6 +69,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
config.Datacenter = c.Datacentre
}
if c.Token != "" {
config.Token = c.Token
}
if c.Username != "" {
config.HttpAuth = &api.HttpBasicAuth{
Username: c.Username,

View File

@@ -20,7 +20,7 @@ var sampleChecks = []*api.HealthCheck{
},
}
func TestGatherHealtCheck(t *testing.T) {
func TestGatherHealthCheck(t *testing.T) {
expectedFields := map[string]interface{}{
"check_name": "foo.health",
"status": "passing",

View File

@@ -21,7 +21,7 @@ var sampleConfig = `
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
## If no protocol is specifed, HTTP is used.
## If no protocol is specified, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
`

View File

@@ -0,0 +1,209 @@
# DC/OS Input Plugin
This input plugin gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/).
**Series Cardinality Warning**
Depending on the work load of your DC/OS cluster, this plugin can quickly
create a high number of series which, when unchecked, can cause high load on
your database.
- Use [measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) liberally to exclude unneeded metrics as well as the node, container, and app inclue/exclude options.
- Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#retention-policy-rp).
- Limit the number of series allowed in your database using the `max-series-per-database` and `max-values-per-tag` settings.
- Consider enabling the [TSI](https://docs.influxdata.com/influxdb/v1.3/about_the_project/releasenotes-changelog/#release-notes-8) engine.
- Monitor your [series cardinality](https://docs.influxdata.com/influxdb/v1.3/troubleshooting/frequently-asked-questions/#how-can-i-query-for-series-cardinality).
### Configuration:
```toml
[[inputs.dcos]]
## The DC/OS cluster URL.
cluster_url = "https://dcos-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]
```
#### Enterprise Authentication
When using Enterprise DC/OS, it is recommended to use a service account to
authenticate with the cluster.
The plugin requires the following permissions:
```
dcos:adminrouter:ops:system-metrics full
dcos:adminrouter:ops:mesos full
```
Follow the directions to [create a service account and assign permissions](https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/).
Quick configuration using the Enterprise CLI:
```
dcos security org service-accounts keypair telegraf-sa-key.pem telegraf-sa-cert.pem
dcos security org service-accounts create -p telegraf-sa-cert.pem -d "Telegraf DC/OS input plugin" telegraf
dcos security org users grant telegraf dcos:adminrouter:ops:system-metrics full
dcos security org users grant telegraf dcos:adminrouter:ops:mesos full
```
#### Open Source Authentication
The Open Source DC/OS does not provide service accounts. Instead you can use
of the following options:
1. [Disable authentication](https://dcos.io/docs/1.10/security/managing-authentication/#authentication-opt-out)
2. Use the `token_file` parameter to read a authentication token from a file.
Then `token_file` can be set by using the [dcos cli] to login periodically.
The cli can login for at most XXX days, you will need to ensure the cli
performs a new login before this time expires.
```
dcos auth login --username foo --password bar
dcos config show core.dcos_acs_token > ~/.dcos/token
```
Another option to create a `token_file` is to generate a token using the
cluster secret. This will allow you to set the expiration date manually or
even create a never expiring token. However, if the cluster secret or the
token is compromised it cannot be revoked and may require a full reinstall of
the cluster. For more information on this technique reference
[this blog post](https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add).
### Metrics:
Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/)
for details on interprete field interpretation.
- dcos_node
- tags:
- cluster
- hostname
- path (filesystem fields only)
- interface (network fields only)
- fields:
- system_uptime (float)
- cpu_cores (float)
- cpu_total (float)
- cpu_user (float)
- cpu_system (float)
- cpu_idle (float)
- cpu_wait (float)
- load_1min (float)
- load_5min (float)
- load_15min (float)
- filesystem_capacity_total_bytes (int)
- filesystem_capacity_used_bytes (int)
- filesystem_capacity_free_bytes (int)
- filesystem_inode_total (float)
- filesystem_inode_used (float)
- filesystem_inode_free (float)
- memory_total_bytes (int)
- memory_free_bytes (int)
- memory_buffers_bytes (int)
- memory_cached_bytes (int)
- swap_total_bytes (int)
- swap_free_bytes (int)
- swap_used_bytes (int)
- network_in_bytes (int)
- network_out_bytes (int)
- network_in_packets (float)
- network_out_packets (float)
- network_in_dropped (float)
- network_out_dropped (float)
- network_in_errors (float)
- network_out_errors (float)
- process_count (float)
- dcos_container
- tags:
- cluster
- hostname
- container_id
- task_name
- fields:
- cpus_limit (float)
- cpus_system_time (float)
- cpus_throttled_time (float)
- cpus_user_time (float)
- disk_limit_bytes (int)
- disk_used_bytes (int)
- mem_limit_bytes (int)
- mem_total_bytes (int)
- net_rx_bytes (int)
- net_rx_dropped (float)
- net_rx_errors (float)
- net_rx_packets (float)
- net_tx_bytes (int)
- net_tx_dropped (float)
- net_tx_errors (float)
- net_tx_packets (float)
- dcos_app
- tags:
- cluster
- hostname
- container_id
- task_name
- fields:
- fields are application specific
### Example Output:
```
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/boot filesystem_capacity_free_bytes=918188032i,filesystem_capacity_total_bytes=1063256064i,filesystem_capacity_used_bytes=145068032i,filesystem_inode_free=523958,filesystem_inode_total=524288,filesystem_inode_used=330 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=dummy0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=docker0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18 cpu_cores=2,cpu_idle=81.62,cpu_system=4.19,cpu_total=13.670000000000002,cpu_user=9.48,cpu_wait=0,load_15min=0.7,load_1min=0.22,load_5min=0.6,memory_buffers_bytes=970752i,memory_cached_bytes=1830473728i,memory_free_bytes=1178636288i,memory_total_bytes=3975073792i,process_count=198,swap_free_bytes=859828224i,swap_total_bytes=859828224i,swap_used_bytes=0i,system_uptime=18874 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=lo network_in_bytes=1090992450i,network_in_dropped=0,network_in_errors=0,network_in_packets=1546938,network_out_bytes=1090992450i,network_out_dropped=0,network_out_errors=0,network_out_packets=1546938 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/ filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=minuteman network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=eth0 network_in_bytes=539886216i,network_in_dropped=1,network_in_errors=0,network_in_packets=979808,network_out_bytes=112395836i,network_out_dropped=0,network_out_errors=0,network_out_packets=891239 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=spartan network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/overlay filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=vtep1024 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/plugins filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=d-dcos network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=9a78d34a-3bbf-467e-81cf-a57737f154ee,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 cpus_limit=0.3,cpus_system_time=307.31,cpus_throttled_time=102.029930607,cpus_user_time=268.57,disk_limit_bytes=268435456i,disk_used_bytes=30953472i,mem_limit_bytes=570425344i,mem_total_bytes=13316096i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18,task_name=hello-world cpus_limit=0.6,cpus_system_time=25.6,cpus_throttled_time=327.977109217,cpus_user_time=566.54,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=1107296256i,mem_total_bytes=335941632i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=c76e1488-4fb7-4010-a4cf-25725f8173f9,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=cbe0b2f9-061f-44ac-8f15-4844229e8231,hostname=192.168.122.18,task_name=telegraf cpus_limit=0.2,cpus_system_time=8.109999999,cpus_throttled_time=93.183916045,cpus_user_time=17.97,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=167772160i,mem_total_bytes=0i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 cpus_limit=0.2,cpus_system_time=2.69,cpus_throttled_time=20.064861214,cpus_user_time=6.56,disk_limit_bytes=268435456i,disk_used_bytes=29360128i,mem_limit_bytes=297795584i,mem_total_bytes=13733888i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
```

View File

@@ -0,0 +1,332 @@
package dcos
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
jwt "github.com/dgrijalva/jwt-go"
)
const (
// How long to stayed logged in for
loginDuration = 65 * time.Minute
)
// Client is an interface for communicating with the DC/OS API.
type Client interface {
SetToken(token string)
Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
GetSummary(ctx context.Context) (*Summary, error)
GetContainers(ctx context.Context, node string) ([]Container, error)
GetNodeMetrics(ctx context.Context, node string) (*Metrics, error)
GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error)
GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error)
}
type APIError struct {
StatusCode int
Title string
Description string
}
// Login is request data for logging in.
type Login struct {
UID string `json:"uid"`
Exp int64 `json:"exp"`
Token string `json:"token"`
}
// LoginError is the response when login fails.
type LoginError struct {
Title string `json:"title"`
Description string `json:"description"`
}
// LoginAuth is the response to a successful login.
type LoginAuth struct {
Token string `json:"token"`
}
// Slave is a node in the cluster.
type Slave struct {
ID string `json:"id"`
}
// Summary provides high level cluster wide information.
type Summary struct {
Cluster string
Slaves []Slave
}
// Container is a container on a node.
type Container struct {
ID string
}
type DataPoint struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Unit string `json:"unit"`
Value float64 `json:"value"`
}
// Metrics are the DCOS metrics
type Metrics struct {
Datapoints []DataPoint `json:"datapoints"`
Dimensions map[string]interface{} `json:"dimensions"`
}
// AuthToken is the authentication token.
type AuthToken struct {
Text string
Expire time.Time
}
// ClusterClient is a Client that uses the cluster URL.
type ClusterClient struct {
clusterURL *url.URL
httpClient *http.Client
credentials *Credentials
token string
semaphore chan struct{}
}
type claims struct {
UID string `json:"uid"`
jwt.StandardClaims
}
func (e APIError) Error() string {
if e.Description != "" {
return fmt.Sprintf("%s: %s", e.Title, e.Description)
}
return e.Title
}
func NewClusterClient(
clusterURL *url.URL,
timeout time.Duration,
maxConns int,
tlsConfig *tls.Config,
) *ClusterClient {
httpClient := &http.Client{
Transport: &http.Transport{
MaxIdleConns: maxConns,
TLSClientConfig: tlsConfig,
},
Timeout: timeout,
}
semaphore := make(chan struct{}, maxConns)
c := &ClusterClient{
clusterURL: clusterURL,
httpClient: httpClient,
semaphore: semaphore,
}
return c
}
func (c *ClusterClient) SetToken(token string) {
c.token = token
}
func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
token, err := c.createLoginToken(sa)
if err != nil {
return nil, err
}
exp := time.Now().Add(loginDuration)
body := &Login{
UID: sa.AccountID,
Exp: exp.Unix(),
Token: token,
}
octets, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", c.url("/acs/api/v1/auth/login"), bytes.NewBuffer(octets))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
req = req.WithContext(ctx)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
auth := &LoginAuth{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(auth)
if err != nil {
return nil, err
}
token := &AuthToken{
Text: auth.Token,
Expire: exp,
}
return token, nil
}
loginError := &LoginError{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(loginError)
if err != nil {
err := &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
}
return nil, err
}
err = &APIError{
StatusCode: resp.StatusCode,
Title: loginError.Title,
Description: loginError.Description,
}
return nil, err
}
func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) {
summary := &Summary{}
err := c.doGet(ctx, c.url("/mesos/master/state-summary"), summary)
if err != nil {
return nil, err
}
return summary, nil
}
func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
list := []string{}
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node)
err := c.doGet(ctx, c.url(path), &list)
if err != nil {
return nil, err
}
containers := make([]Container, 0, len(list))
for _, c := range list {
containers = append(containers, Container{ID: c})
}
return containers, nil
}
func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, error) {
metrics := &Metrics{}
err := c.doGet(ctx, url, metrics)
if err != nil {
return nil, err
}
return metrics, nil
}
func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node)
return c.getMetrics(ctx, c.url(path))
}
func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container)
return c.getMetrics(ctx, c.url(path))
}
func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container)
return c.getMetrics(ctx, c.url(path))
}
func createGetRequest(url string, token string) (*http.Request, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
if token != "" {
req.Header.Add("Authorization", "token="+token)
}
req.Header.Add("Accept", "application/json")
return req, nil
}
func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) error {
req, err := createGetRequest(url, c.token)
if err != nil {
return err
}
select {
case c.semaphore <- struct{}{}:
break
case <-ctx.Done():
return ctx.Err()
}
resp, err := c.httpClient.Do(req.WithContext(ctx))
if err != nil {
<-c.semaphore
return err
}
defer func() {
resp.Body.Close()
<-c.semaphore
}()
// Clear invalid token if unauthorized
if resp.StatusCode == http.StatusUnauthorized {
c.token = ""
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
}
}
if resp.StatusCode == http.StatusNoContent {
return nil
}
err = json.NewDecoder(resp.Body).Decode(v)
return err
}
func (c *ClusterClient) url(path string) string {
url := c.clusterURL
url.Path = path
return url.String()
}
func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{
UID: sa.AccountID,
StandardClaims: jwt.StandardClaims{
// How long we have to login with this token
ExpiresAt: int64(5 * time.Minute / time.Second),
},
})
return token.SignedString(sa.PrivateKey)
}

View File

@@ -0,0 +1,232 @@
package dcos
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
jwt "github.com/dgrijalva/jwt-go"
"github.com/stretchr/testify/require"
)
const (
privateKey = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
-----END RSA PRIVATE KEY-----`
)
func TestLogin(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedError error
expectedToken string
}{
{
name: "Login successful",
responseCode: 200,
responseBody: `{"token": "XXX.YYY.ZZZ"}`,
expectedError: nil,
expectedToken: "XXX.YYY.ZZZ",
},
{
name: "Unauthorized Error",
responseCode: http.StatusUnauthorized,
responseBody: `{"title": "x", "description": "y"}`,
expectedError: &APIError{http.StatusUnauthorized, "x", "y"},
expectedToken: "",
},
}
key, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(privateKey))
require.NoError(t, err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
sa := &ServiceAccount{
AccountID: "telegraf",
PrivateKey: key,
}
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
auth, err := client.Login(ctx, sa)
require.Equal(t, tt.expectedError, err)
if tt.expectedToken != "" {
require.Equal(t, tt.expectedToken, auth.Text)
} else {
require.Nil(t, auth)
}
ts.Close()
})
}
}
func TestGetSummary(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Summary
expectedError error
}{
{
name: "No nodes",
responseCode: 200,
responseBody: `{"cluster": "a", "slaves": []}`,
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
expectedError: nil,
},
{
name: "Unauthorized Error",
responseCode: http.StatusUnauthorized,
responseBody: `<html></html>`,
expectedValue: nil,
expectedError: &APIError{StatusCode: http.StatusUnauthorized, Title: "401 Unauthorized"},
},
{
name: "Has nodes",
responseCode: 200,
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
expectedValue: &Summary{
Cluster: "a",
Slaves: []Slave{
Slave{ID: "a"},
Slave{ID: "b"},
},
},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
summary, err := client.GetSummary(ctx)
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, summary)
ts.Close()
})
}
}
func TestGetNodeMetrics(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Metrics
expectedError error
}{
{
name: "Empty Body",
responseCode: 200,
responseBody: `{}`,
expectedValue: &Metrics{},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
m, err := client.GetNodeMetrics(ctx, "foo")
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, m)
ts.Close()
})
}
}
func TestGetContainerMetrics(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Metrics
expectedError error
}{
{
name: "204 No Contents",
responseCode: 204,
responseBody: ``,
expectedValue: &Metrics{},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
m, err := client.GetContainerMetrics(ctx, "foo", "bar")
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, m)
ts.Close()
})
}
}

View File

@@ -0,0 +1,72 @@
package dcos
import (
"context"
"crypto/rsa"
"fmt"
"io/ioutil"
"strings"
"time"
"unicode/utf8"
)
const (
// How long before expiration to renew token
relogDuration = 5 * time.Minute
)
type Credentials interface {
Token(ctx context.Context, client Client) (string, error)
IsExpired() bool
}
type ServiceAccount struct {
AccountID string
PrivateKey *rsa.PrivateKey
auth *AuthToken
}
type TokenCreds struct {
Path string
}
type NullCreds struct {
}
func (c *ServiceAccount) Token(ctx context.Context, client Client) (string, error) {
auth, err := client.Login(ctx, c)
if err != nil {
return "", err
}
c.auth = auth
return auth.Text, nil
}
func (c *ServiceAccount) IsExpired() bool {
return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now())
}
func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) {
octets, err := ioutil.ReadFile(c.Path)
if err != nil {
return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err)
}
if !utf8.Valid(octets) {
return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path)
}
token := strings.TrimSpace(string(octets))
return token, nil
}
func (c *TokenCreds) IsExpired() bool {
return true
}
func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) {
return "", nil
}
func (c *NullCreds) IsExpired() bool {
return true
}

435
plugins/inputs/dcos/dcos.go Normal file
View File

@@ -0,0 +1,435 @@
package dcos
import (
"context"
"io/ioutil"
"net/url"
"sort"
"strings"
"sync"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
const (
defaultMaxConnections = 10
defaultResponseTimeout = 20 * time.Second
)
var (
nodeDimensions = []string{
"hostname",
"path",
"interface",
}
containerDimensions = []string{
"hostname",
"container_id",
"task_name",
}
appDimensions = []string{
"hostname",
"container_id",
"task_name",
}
)
type DCOS struct {
ClusterURL string `toml:"cluster_url"`
ServiceAccountID string `toml:"service_account_id"`
ServiceAccountPrivateKey string
TokenFile string
NodeInclude []string
NodeExclude []string
ContainerInclude []string
ContainerExclude []string
AppInclude []string
AppExclude []string
MaxConnections int
ResponseTimeout internal.Duration
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
client Client
creds Credentials
initialized bool
nodeFilter filter.Filter
containerFilter filter.Filter
appFilter filter.Filter
taskNameFilter filter.Filter
}
func (d *DCOS) Description() string {
return "Input plugin for DC/OS metrics"
}
var sampleConfig = `
## The DC/OS cluster URL.
cluster_url = "https://dcos-ee-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]
`
func (d *DCOS) SampleConfig() string {
return sampleConfig
}
func (d *DCOS) Gather(acc telegraf.Accumulator) error {
err := d.init()
if err != nil {
return err
}
ctx := context.Background()
token, err := d.creds.Token(ctx, d.client)
if err != nil {
return err
}
d.client.SetToken(token)
summary, err := d.client.GetSummary(ctx)
if err != nil {
return err
}
var wg sync.WaitGroup
for _, node := range summary.Slaves {
wg.Add(1)
go func(node string) {
defer wg.Done()
d.GatherNode(ctx, acc, summary.Cluster, node)
}(node.ID)
}
wg.Wait()
return nil
}
func (d *DCOS) GatherNode(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
if !d.nodeFilter.Match(node) {
return
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
m, err := d.client.GetNodeMetrics(ctx, node)
if err != nil {
acc.AddError(err)
return
}
d.addNodeMetrics(acc, cluster, m)
}()
d.GatherContainers(ctx, acc, cluster, node)
wg.Wait()
}
func (d *DCOS) GatherContainers(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
containers, err := d.client.GetContainers(ctx, node)
if err != nil {
acc.AddError(err)
return
}
var wg sync.WaitGroup
for _, container := range containers {
if d.containerFilter.Match(container.ID) {
wg.Add(1)
go func(container string) {
defer wg.Done()
m, err := d.client.GetContainerMetrics(ctx, node, container)
if err != nil {
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
return
}
acc.AddError(err)
return
}
d.addContainerMetrics(acc, cluster, m)
}(container.ID)
}
if d.appFilter.Match(container.ID) {
wg.Add(1)
go func(container string) {
defer wg.Done()
m, err := d.client.GetAppMetrics(ctx, node, container)
if err != nil {
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
return
}
acc.AddError(err)
return
}
d.addAppMetrics(acc, cluster, m)
}(container.ID)
}
}
wg.Wait()
}
type point struct {
tags map[string]string
labels map[string]string
fields map[string]interface{}
}
func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point {
points := make(map[string]*point)
for _, dp := range m.Datapoints {
fieldKey := strings.Replace(dp.Name, ".", "_", -1)
tags := dp.Tags
if tags == nil {
tags = make(map[string]string)
}
if dp.Unit == "bytes" && !strings.HasSuffix(fieldKey, "_bytes") {
fieldKey = fieldKey + "_bytes"
}
if strings.HasPrefix(fieldKey, "dcos_metrics_module_") {
fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_")
}
tagset := make([]string, 0, len(tags))
for k, v := range tags {
tagset = append(tagset, k+"="+v)
}
sort.Strings(tagset)
seriesParts := make([]string, 0, len(tagset))
seriesParts = append(seriesParts, tagset...)
seriesKey := strings.Join(seriesParts, ",")
p, ok := points[seriesKey]
if !ok {
p = &point{}
p.tags = tags
p.labels = make(map[string]string)
p.fields = make(map[string]interface{})
points[seriesKey] = p
}
if dp.Unit == "bytes" {
p.fields[fieldKey] = int64(dp.Value)
} else {
p.fields[fieldKey] = dp.Value
}
}
results := make([]*point, 0, len(points))
for _, p := range points {
for k, v := range m.Dimensions {
switch v := v.(type) {
case string:
p.tags[k] = v
case map[string]string:
if k == "labels" {
for k, v := range v {
p.labels[k] = v
}
}
}
}
results = append(results, p)
}
return results
}
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) {
tm := time.Now()
points := d.createPoints(acc, m)
for _, p := range points {
tags := make(map[string]string)
tags["cluster"] = cluster
for _, tagkey := range tagDimensions {
v, ok := p.tags[tagkey]
if ok {
tags[tagkey] = v
}
}
for k, v := range p.labels {
tags[k] = v
}
acc.AddFields(mname, p.fields, tags, tm)
}
}
func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions)
}
func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions)
}
func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_app", m, appDimensions)
}
func (d *DCOS) init() error {
if !d.initialized {
err := d.createFilters()
if err != nil {
return err
}
if d.client == nil {
client, err := d.createClient()
if err != nil {
return err
}
d.client = client
}
if d.creds == nil {
creds, err := d.createCredentials()
if err != nil {
return err
}
d.creds = creds
}
d.initialized = true
}
return nil
}
func (d *DCOS) createClient() (Client, error) {
tlsCfg, err := internal.GetTLSConfig(
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
if err != nil {
return nil, err
}
url, err := url.Parse(d.ClusterURL)
if err != nil {
return nil, err
}
client := NewClusterClient(
url,
d.ResponseTimeout.Duration,
d.MaxConnections,
tlsCfg,
)
return client, nil
}
func (d *DCOS) createCredentials() (Credentials, error) {
if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" {
bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey)
if err != nil {
return nil, err
}
privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(bs)
if err != nil {
return nil, err
}
creds := &ServiceAccount{
AccountID: d.ServiceAccountID,
PrivateKey: privateKey,
}
return creds, nil
} else if d.TokenFile != "" {
creds := &TokenCreds{
Path: d.TokenFile,
}
return creds, nil
} else {
creds := &NullCreds{}
return creds, nil
}
}
func (d *DCOS) createFilters() error {
var err error
d.nodeFilter, err = filter.NewIncludeExcludeFilter(
d.NodeInclude, d.NodeExclude)
if err != nil {
return err
}
d.containerFilter, err = filter.NewIncludeExcludeFilter(
d.ContainerInclude, d.ContainerExclude)
if err != nil {
return err
}
d.appFilter, err = filter.NewIncludeExcludeFilter(
d.AppInclude, d.AppExclude)
if err != nil {
return err
}
return nil
}
func init() {
inputs.Add("dcos", func() telegraf.Input {
return &DCOS{
MaxConnections: defaultMaxConnections,
ResponseTimeout: internal.Duration{
Duration: defaultResponseTimeout,
},
}
})
}

View File

@@ -0,0 +1,441 @@
package dcos
import (
"context"
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
type mockClient struct {
SetTokenF func(token string)
LoginF func(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
GetSummaryF func(ctx context.Context) (*Summary, error)
GetContainersF func(ctx context.Context, node string) ([]Container, error)
GetNodeMetricsF func(ctx context.Context, node string) (*Metrics, error)
GetContainerMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
GetAppMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
}
func (c *mockClient) SetToken(token string) {
c.SetTokenF(token)
}
func (c *mockClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
return c.LoginF(ctx, sa)
}
func (c *mockClient) GetSummary(ctx context.Context) (*Summary, error) {
return c.GetSummaryF(ctx)
}
func (c *mockClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
return c.GetContainersF(ctx, node)
}
func (c *mockClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
return c.GetNodeMetricsF(ctx, node)
}
func (c *mockClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
return c.GetContainerMetricsF(ctx, node, container)
}
func (c *mockClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
return c.GetAppMetricsF(ctx, node, container)
}
func TestAddNodeMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "basic datapoint conversion",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "process.count",
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
},
"process_count", 42.0,
)}
},
},
{
name: "path added as tag",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "filesystem.inode.free",
Tags: map[string]string{
"path": "/var/lib",
},
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"path": "/var/lib",
},
"filesystem_inode_free", 42.0,
)}
},
},
{
name: "interface added as tag",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "network.out.dropped",
Tags: map[string]string{
"interface": "eth0",
},
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"interface": "eth0",
},
"network_out_dropped", 42.0,
)}
},
},
{
name: "bytes unit appended to fieldkey",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "network.in",
Tags: map[string]string{
"interface": "eth0",
},
Unit: "bytes",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"interface": "eth0",
},
"network_in_bytes", int64(42),
)}
},
},
{
name: "dimensions added as tags",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "process.count",
Tags: map[string]string{},
Unit: "count",
Value: 42.0,
},
{
Name: "memory.total",
Tags: map[string]string{},
Unit: "bytes",
Value: 42,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"hostname": "192.168.122.18",
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "192.168.122.18",
},
"process_count", 42.0),
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "192.168.122.18",
},
"memory_total_bytes", int64(42)),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addNodeMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestAddContainerMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "container",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "net.rx.errors",
Tags: map[string]string{
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"executor_name": "Command Executor (Task: telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a) (Command: NO EXECUTABLE)",
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
"source": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
},
Unit: "count",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
"framework_name": "marathon",
"framework_principal": "dcos_marathon",
"framework_role": "slave_public",
"hostname": "192.168.122.18",
"labels": map[string]string{
"DCOS_SPACE": "/telegraf",
},
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
"task_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"task_name": "telegraf",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_container",
map[string]string{
"cluster": "a",
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"hostname": "192.168.122.18",
"task_name": "telegraf",
"DCOS_SPACE": "/telegraf",
},
"net_rx_errors",
42.0,
),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addContainerMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestAddAppMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "tags are optional",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
Unit: "",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_app",
map[string]string{
"cluster": "a",
},
"container_throttled_bytes_per_sec", 42.0,
),
}
},
},
{
name: "dimensions are tagged",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
Unit: "",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
"hostname": "192.168.122.18",
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_app",
map[string]string{
"cluster": "a",
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
"hostname": "192.168.122.18",
},
"container_throttled_bytes_per_sec", 42.0,
),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addAppMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestGatherFilterNode(t *testing.T) {
var tests = []struct {
name string
nodeInclude []string
nodeExclude []string
client Client
check func(*testutil.Accumulator) []bool
}{
{
name: "cluster without nodes has no metrics",
client: &mockClient{
SetTokenF: func(token string) {},
GetSummaryF: func(ctx context.Context) (*Summary, error) {
return &Summary{
Cluster: "a",
Slaves: []Slave{},
}, nil
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.NMetrics() == 0,
}
},
},
{
name: "node include",
nodeInclude: []string{"x"},
client: &mockClient{
SetTokenF: func(token string) {},
GetSummaryF: func(ctx context.Context) (*Summary, error) {
return &Summary{
Cluster: "a",
Slaves: []Slave{
Slave{ID: "x"},
Slave{ID: "y"},
},
}, nil
},
GetContainersF: func(ctx context.Context, node string) ([]Container, error) {
return []Container{}, nil
},
GetNodeMetricsF: func(ctx context.Context, node string) (*Metrics, error) {
return &Metrics{
Datapoints: []DataPoint{
{
Name: "value",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"hostname": "x",
},
}, nil
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "x",
},
"value", 42.0,
),
acc.NMetrics() == 1,
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{
NodeInclude: tt.nodeInclude,
NodeExclude: tt.nodeExclude,
client: tt.client,
}
err := dcos.Gather(&acc)
require.NoError(t, err)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}

View File

@@ -16,21 +16,21 @@ const metricName = "dmcache"
type cacheStatus struct {
device string
length int
length int64
target string
metadataBlocksize int
metadataUsed int
metadataTotal int
cacheBlocksize int
cacheUsed int
cacheTotal int
readHits int
readMisses int
writeHits int
writeMisses int
demotions int
promotions int
dirty int
metadataBlocksize int64
metadataUsed int64
metadataTotal int64
cacheBlocksize int64
cacheUsed int64
cacheTotal int64
readHits int64
readMisses int64
writeHits int64
writeMisses int64
demotions int64
promotions int64
dirty int64
}
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
@@ -69,12 +69,12 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
}
status.device = strings.TrimRight(values[0], ":")
status.length, err = strconv.Atoi(values[2])
status.length, err = strconv.ParseInt(values[2], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.target = values[3]
status.metadataBlocksize, err = strconv.Atoi(values[4])
status.metadataBlocksize, err = strconv.ParseInt(values[4], 10, 64)
if err != nil {
return cacheStatus{}, err
}
@@ -82,15 +82,15 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
if len(metadata) != 2 {
return cacheStatus{}, parseError
}
status.metadataUsed, err = strconv.Atoi(metadata[0])
status.metadataUsed, err = strconv.ParseInt(metadata[0], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.metadataTotal, err = strconv.Atoi(metadata[1])
status.metadataTotal, err = strconv.ParseInt(metadata[1], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.cacheBlocksize, err = strconv.Atoi(values[6])
status.cacheBlocksize, err = strconv.ParseInt(values[6], 10, 64)
if err != nil {
return cacheStatus{}, err
}
@@ -98,39 +98,39 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
if len(cache) != 2 {
return cacheStatus{}, parseError
}
status.cacheUsed, err = strconv.Atoi(cache[0])
status.cacheUsed, err = strconv.ParseInt(cache[0], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.cacheTotal, err = strconv.Atoi(cache[1])
status.cacheTotal, err = strconv.ParseInt(cache[1], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.readHits, err = strconv.Atoi(values[8])
status.readHits, err = strconv.ParseInt(values[8], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.readMisses, err = strconv.Atoi(values[9])
status.readMisses, err = strconv.ParseInt(values[9], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.writeHits, err = strconv.Atoi(values[10])
status.writeHits, err = strconv.ParseInt(values[10], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.writeMisses, err = strconv.Atoi(values[11])
status.writeMisses, err = strconv.ParseInt(values[11], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.demotions, err = strconv.Atoi(values[12])
status.demotions, err = strconv.ParseInt(values[12], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.promotions, err = strconv.Atoi(values[13])
status.promotions, err = strconv.ParseInt(values[13], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.dirty, err = strconv.Atoi(values[14])
status.dirty, err = strconv.ParseInt(values[14], 10, 64)
if err != nil {
return cacheStatus{}, err
}

View File

@@ -1,3 +1,5 @@
// +build linux
package dmcache
import (
@@ -33,20 +35,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
"device": "cs-1",
}
fields1 := map[string]interface{}{
"length": 4883791872,
"metadata_blocksize": 8,
"metadata_used": 1018,
"metadata_total": 1501122,
"cache_blocksize": 512,
"cache_used": 7,
"cache_total": 464962,
"read_hits": 139,
"read_misses": 352643,
"write_hits": 15,
"write_misses": 46,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(4883791872),
"metadata_blocksize": int64(8),
"metadata_used": int64(1018),
"metadata_total": int64(1501122),
"cache_blocksize": int64(512),
"cache_used": int64(7),
"cache_total": int64(464962),
"read_hits": int64(139),
"read_misses": int64(352643),
"write_hits": int64(15),
"write_misses": int64(46),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
@@ -54,20 +56,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
"device": "cs-2",
}
fields2 := map[string]interface{}{
"length": 4294967296,
"metadata_blocksize": 8,
"metadata_used": 72352,
"metadata_total": 1310720,
"cache_blocksize": 128,
"cache_used": 26,
"cache_total": 24327168,
"read_hits": 2409,
"read_misses": 286,
"write_hits": 265,
"write_misses": 524682,
"demotions": 0,
"promotions": 0,
"dirty": 0,
"length": int64(4294967296),
"metadata_blocksize": int64(8),
"metadata_used": int64(72352),
"metadata_total": int64(1310720),
"cache_blocksize": int64(128),
"cache_used": int64(26),
"cache_total": int64(24327168),
"read_hits": int64(2409),
"read_misses": int64(286),
"write_hits": int64(265),
"write_misses": int64(524682),
"demotions": int64(0),
"promotions": int64(0),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
@@ -76,20 +78,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
}
fields3 := map[string]interface{}{
"length": 9178759168,
"metadata_blocksize": 16,
"metadata_used": 73370,
"metadata_total": 2811842,
"cache_blocksize": 640,
"cache_used": 33,
"cache_total": 24792130,
"read_hits": 2548,
"read_misses": 352929,
"write_hits": 280,
"write_misses": 524728,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(9178759168),
"metadata_blocksize": int64(16),
"metadata_used": int64(73370),
"metadata_total": int64(2811842),
"cache_blocksize": int64(640),
"cache_used": int64(33),
"cache_total": int64(24792130),
"read_hits": int64(2548),
"read_misses": int64(352929),
"write_hits": int64(280),
"write_misses": int64(524728),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
}
@@ -111,20 +113,20 @@ func TestNotPerDeviceGoodOutput(t *testing.T) {
}
fields := map[string]interface{}{
"length": 9178759168,
"metadata_blocksize": 16,
"metadata_used": 73370,
"metadata_total": 2811842,
"cache_blocksize": 640,
"cache_used": 33,
"cache_total": 24792130,
"read_hits": 2548,
"read_misses": 352929,
"write_hits": 280,
"write_misses": 524728,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(9178759168),
"metadata_blocksize": int64(16),
"metadata_used": int64(73370),
"metadata_total": int64(2811842),
"cache_blocksize": int64(640),
"cache_used": int64(33),
"cache_total": int64(24792130),
"read_hits": int64(2548),
"read_misses": int64(352929),
"write_hits": int64(280),
"write_misses": int64(524728),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
}

View File

@@ -17,7 +17,7 @@ type DnsQuery struct {
// Domains or subdomains to query
Domains []string
// Network protocl name
// Network protocol name
Network string
// Server to query

View File

@@ -17,6 +17,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
## Note: configure this in one of the manager nodes in a Swarm cluster.
## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
@@ -57,6 +62,15 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/)
When using the `"ENV"` endpoint, the connection is configured using the
[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient).
#### Kubernetes Labels
Kubernetes may add many labels to your containers, if they are not needed you
may prefer to exclude them:
```
docker_label_exclude = ["annotation.kubernetes*"]
```
### Measurements & Fields:
Every effort was made to preserve the names based on the JSON response from the
@@ -152,6 +166,9 @@ based on the availability of per-cpu stats on your system.
- available
- total
- used
- docker_swarm
- tasks_desired
- tasks_running
### Tags:
@@ -182,6 +199,10 @@ based on the availability of per-cpu stats on your system.
- network
- docker_container_blkio specific:
- device
- docker_swarm specific:
- service_id
- service_name
- service_mode
### Example Output:
@@ -233,4 +254,7 @@ io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
```
>docker_swarm,
service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\
tasks_desired=3,tasks_running=3 1508968160000000000
```

View File

@@ -6,6 +6,7 @@ import (
"net/http"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
docker "github.com/docker/docker/client"
"github.com/docker/go-connections/sockets"
)
@@ -20,6 +21,9 @@ type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
}
func NewEnvClient() (Client, error) {
@@ -65,3 +69,12 @@ func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, s
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
return c.client.ContainerInspect(ctx, containerID)
}
func (c *SocketClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
return c.client.ServiceList(ctx, options)
}
func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
return c.client.TaskList(ctx, options)
}
func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
return c.client.NodeList(ctx, options)
}

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"regexp"
"strconv"
@@ -14,38 +15,29 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type DockerLabelFilter struct {
labelInclude filter.Filter
labelExclude filter.Filter
}
type DockerContainerFilter struct {
containerInclude filter.Filter
containerExclude filter.Filter
}
// Docker object
type Docker struct {
Endpoint string
ContainerNames []string
GatherServices bool `toml:"gather_services"`
Timeout internal.Duration
PerDevice bool `toml:"perdevice"`
Total bool `toml:"total"`
TagEnvironment []string `toml:"tag_env"`
LabelInclude []string `toml:"docker_label_include"`
LabelExclude []string `toml:"docker_label_exclude"`
LabelFilter DockerLabelFilter
ContainerInclude []string `toml:"container_name_include"`
ContainerExclude []string `toml:"container_name_exclude"`
ContainerFilter DockerContainerFilter
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
@@ -55,10 +47,12 @@ type Docker struct {
newEnvClient func() (Client, error)
newClient func(string, *tls.Config) (Client, error)
client Client
httpClient *http.Client
engine_host string
filtersCreated bool
client Client
httpClient *http.Client
engine_host string
filtersCreated bool
labelFilter filter.Filter
containerFilter filter.Filter
}
// KB, MB, GB, TB, PB...human friendly
@@ -82,6 +76,9 @@ var sampleConfig = `
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
gather_services = false
## Only collect metrics for these containers, collect all if empty
container_names = []
@@ -160,6 +157,13 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
acc.AddError(err)
}
if d.GatherServices {
err := d.gatherSwarmInfo(acc)
if err != nil {
acc.AddError(err)
}
}
// List containers
opts := types.ContainerListOptions{}
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
@@ -187,6 +191,75 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
return nil
}
func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
services, err := d.client.ServiceList(ctx, types.ServiceListOptions{})
if err != nil {
return err
}
if len(services) > 0 {
tasks, err := d.client.TaskList(ctx, types.TaskListOptions{})
if err != nil {
return err
}
nodes, err := d.client.NodeList(ctx, types.NodeListOptions{})
if err != nil {
return err
}
running := map[string]int{}
tasksNoShutdown := map[string]int{}
activeNodes := make(map[string]struct{})
for _, n := range nodes {
if n.Status.State != swarm.NodeStateDown {
activeNodes[n.ID] = struct{}{}
}
}
for _, task := range tasks {
if task.DesiredState != swarm.TaskStateShutdown {
tasksNoShutdown[task.ServiceID]++
}
if task.Status.State == swarm.TaskStateRunning {
running[task.ServiceID]++
}
}
for _, service := range services {
tags := map[string]string{}
fields := make(map[string]interface{})
now := time.Now()
tags["service_id"] = service.ID
tags["service_name"] = service.Spec.Name
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
tags["service_mode"] = "replicated"
fields["tasks_running"] = running[service.ID]
fields["tasks_desired"] = *service.Spec.Mode.Replicated.Replicas
} else if service.Spec.Mode.Global != nil {
tags["service_mode"] = "global"
fields["tasks_running"] = running[service.ID]
fields["tasks_desired"] = tasksNoShutdown[service.ID]
} else {
log.Printf("E! Unknow Replicas Mode")
}
// Add metrics
acc.AddFields("docker_swarm",
fields,
tags,
now)
}
}
return nil
}
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// Init vars
dataFields := make(map[string]interface{})
@@ -291,12 +364,8 @@ func (d *Docker) gatherContainer(
"container_version": imageVersion,
}
if len(d.ContainerInclude) > 0 || len(d.ContainerExclude) > 0 {
if len(d.ContainerInclude) == 0 || !d.ContainerFilter.containerInclude.Match(cname) {
if len(d.ContainerExclude) == 0 || d.ContainerFilter.containerExclude.Match(cname) {
return nil
}
}
if !d.containerFilter.Match(cname) {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
@@ -317,10 +386,8 @@ func (d *Docker) gatherContainer(
// Add labels to tags
for k, label := range container.Labels {
if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) {
if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) {
tags[k] = label
}
if d.labelFilter.Match(k) {
tags[k] = label
}
}
@@ -355,7 +422,11 @@ func gatherContainerStats(
total bool,
daemonOSType string,
) {
now := stat.Read
tm := stat.Read
if tm.Before(time.Unix(0, 0)) {
tm = time.Now()
}
memfields := map[string]interface{}{
"container_id": id,
@@ -415,7 +486,7 @@ func gatherContainerStats(
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
}
acc.AddFields("docker_container_mem", memfields, tags, now)
acc.AddFields("docker_container_mem", memfields, tags, tm)
cpufields := map[string]interface{}{
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
@@ -440,7 +511,7 @@ func gatherContainerStats(
cputags := copyTags(tags)
cputags["cpu"] = "cpu-total"
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
acc.AddFields("docker_container_cpu", cpufields, cputags, tm)
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
@@ -458,7 +529,7 @@ func gatherContainerStats(
"usage_total": percpu,
"container_id": id,
}
acc.AddFields("docker_container_cpu", fields, percputags, now)
acc.AddFields("docker_container_cpu", fields, percputags, tm)
}
totalNetworkStatMap := make(map[string]interface{})
@@ -478,7 +549,7 @@ func gatherContainerStats(
if perDevice {
nettags := copyTags(tags)
nettags["network"] = network
acc.AddFields("docker_container_net", netfields, nettags, now)
acc.AddFields("docker_container_net", netfields, nettags, tm)
}
if total {
for field, value := range netfields {
@@ -511,17 +582,17 @@ func gatherContainerStats(
nettags := copyTags(tags)
nettags["network"] = "total"
totalNetworkStatMap["container_id"] = id
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, now)
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm)
}
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total)
}
func gatherBlockIOMetrics(
stat *types.StatsJSON,
acc telegraf.Accumulator,
tags map[string]string,
now time.Time,
tm time.Time,
id string,
perDevice bool,
total bool,
@@ -592,7 +663,7 @@ func gatherBlockIOMetrics(
if perDevice {
iotags := copyTags(tags)
iotags["device"] = device
acc.AddFields("docker_container_blkio", fields, iotags, now)
acc.AddFields("docker_container_blkio", fields, iotags, tm)
}
if total {
for field, value := range fields {
@@ -623,7 +694,7 @@ func gatherBlockIOMetrics(
totalStatMap["container_id"] = id
iotags := copyTags(tags)
iotags["device"] = "total"
acc.AddFields("docker_container_blkio", totalStatMap, iotags, now)
acc.AddFields("docker_container_blkio", totalStatMap, iotags, tm)
}
}
@@ -666,46 +737,25 @@ func parseSize(sizeStr string) (int64, error) {
}
func (d *Docker) createContainerFilters() error {
// Backwards compatibility for deprecated `container_names` parameter.
if len(d.ContainerNames) > 0 {
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
}
if len(d.ContainerInclude) != 0 {
var err error
d.ContainerFilter.containerInclude, err = filter.Compile(d.ContainerInclude)
if err != nil {
return err
}
filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude)
if err != nil {
return err
}
if len(d.ContainerExclude) != 0 {
var err error
d.ContainerFilter.containerExclude, err = filter.Compile(d.ContainerExclude)
if err != nil {
return err
}
}
d.containerFilter = filter
return nil
}
func (d *Docker) createLabelFilters() error {
if len(d.LabelInclude) != 0 {
var err error
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
if err != nil {
return err
}
filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude)
if err != nil {
return err
}
if len(d.LabelExclude) != 0 {
var err error
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
if err != nil {
return err
}
}
d.labelFilter = filter
return nil
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/influxdata/telegraf/testutil"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/stretchr/testify/require"
)
@@ -16,6 +17,9 @@ type MockClient struct {
ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerStatsF func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error)
ServiceListF func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
TaskListF func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
NodeListF func(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
}
func (c *MockClient) Info(ctx context.Context) (types.Info, error) {
@@ -44,21 +48,53 @@ func (c *MockClient) ContainerInspect(
return c.ContainerInspectF(ctx, containerID)
}
func (c *MockClient) ServiceList(
ctx context.Context,
options types.ServiceListOptions,
) ([]swarm.Service, error) {
return c.ServiceListF(ctx, options)
}
func (c *MockClient) TaskList(
ctx context.Context,
options types.TaskListOptions,
) ([]swarm.Task, error) {
return c.TaskListF(ctx, options)
}
func (c *MockClient) NodeList(
ctx context.Context,
options types.NodeListOptions,
) ([]swarm.Node, error) {
return c.NodeListF(ctx, options)
}
var baseClient = MockClient{
InfoF: func(context.Context) (types.Info, error) {
return info, nil
},
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
return containerList, nil
},
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
return containerStats(), nil
},
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
return containerInspect, nil
},
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
return ServiceList, nil
},
TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) {
return TaskList, nil
},
NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) {
return NodeList, nil
},
}
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
return &MockClient{
InfoF: func(context.Context) (types.Info, error) {
return info, nil
},
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
return containerList, nil
},
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
return containerStats(), nil
},
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
return containerInspect, nil
},
}, nil
return &baseClient, nil
}
func TestDockerGatherContainerStats(t *testing.T) {
@@ -227,6 +263,15 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
return containerInspect, nil
},
ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) {
return ServiceList, nil
},
TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) {
return TaskList, nil
},
NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) {
return NodeList, nil
},
}, nil
},
}
@@ -234,82 +279,291 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
require.NoError(t, err)
}
func TestDockerGatherLabels(t *testing.T) {
var gatherLabelsTests = []struct {
include []string
exclude []string
expected []string
notexpected []string
func TestContainerLabels(t *testing.T) {
var tests = []struct {
name string
container types.Container
include []string
exclude []string
expected map[string]string
}{
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
{
name: "Nil filters matches all",
container: types.Container{
Labels: map[string]string{
"a": "x",
},
},
include: nil,
exclude: nil,
expected: map[string]string{
"a": "x",
},
},
{
name: "Empty filters matches all",
container: types.Container{
Labels: map[string]string{
"a": "x",
},
},
include: []string{},
exclude: []string{},
expected: map[string]string{
"a": "x",
},
},
{
name: "Must match include",
container: types.Container{
Labels: map[string]string{
"a": "x",
"b": "y",
},
},
include: []string{"a"},
exclude: []string{},
expected: map[string]string{
"a": "x",
},
},
{
name: "Must not match exclude",
container: types.Container{
Labels: map[string]string{
"a": "x",
"b": "y",
},
},
include: []string{},
exclude: []string{"b"},
expected: map[string]string{
"a": "x",
},
},
{
name: "Include Glob",
container: types.Container{
Labels: map[string]string{
"aa": "x",
"ab": "y",
"bb": "z",
},
},
include: []string{"a*"},
exclude: []string{},
expected: map[string]string{
"aa": "x",
"ab": "y",
},
},
{
name: "Exclude Glob",
container: types.Container{
Labels: map[string]string{
"aa": "x",
"ab": "y",
"bb": "z",
},
},
include: []string{},
exclude: []string{"a*"},
expected: map[string]string{
"bb": "z",
},
},
{
name: "Excluded Includes",
container: types.Container{
Labels: map[string]string{
"aa": "x",
"ab": "y",
"bb": "z",
},
},
include: []string{"a*"},
exclude: []string{"*b"},
expected: map[string]string{
"aa": "x",
},
},
}
for _, tt := range gatherLabelsTests {
t.Run("", func(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
newClient: newClient,
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
client := baseClient
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
return []types.Container{tt.container}, nil
}
return &client, nil
}
for _, label := range tt.include {
d.LabelInclude = append(d.LabelInclude, label)
}
for _, label := range tt.exclude {
d.LabelExclude = append(d.LabelExclude, label)
d := Docker{
newClient: newClientFunc,
LabelInclude: tt.include,
LabelExclude: tt.exclude,
}
err := d.Gather(&acc)
require.NoError(t, err)
for _, label := range tt.expected {
if !acc.HasTag("docker_container_cpu", label) {
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
// Grab tags from a container metric
var actual map[string]string
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
actual = metric.Tags
}
}
for _, label := range tt.notexpected {
if acc.HasTag("docker_container_cpu", label) {
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
}
for k, v := range tt.expected {
require.Equal(t, v, actual[k])
}
})
}
}
func TestContainerNames(t *testing.T) {
var gatherContainerNames = []struct {
include []string
exclude []string
expected []string
notexpected []string
var tests = []struct {
name string
containers [][]string
include []string
exclude []string
expected []string
}{
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
{
name: "Nil filters matches all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: nil,
exclude: nil,
expected: []string{"etcd", "etcd2"},
},
{
name: "Empty filters matches all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{},
exclude: []string{},
expected: []string{"etcd", "etcd2"},
},
{
name: "Match all containers",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"*"},
exclude: []string{},
expected: []string{"etcd", "etcd2"},
},
{
name: "Include prefix match",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"etc*"},
exclude: []string{},
expected: []string{"etcd", "etcd2"},
},
{
name: "Exact match",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"etcd"},
exclude: []string{},
expected: []string{"etcd"},
},
{
name: "Star matches zero length",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"etcd2*"},
exclude: []string{},
expected: []string{"etcd2"},
},
{
name: "Exclude matches all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{},
exclude: []string{"etc*"},
expected: []string{},
},
{
name: "Exclude single",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{},
exclude: []string{"etcd"},
expected: []string{"etcd2"},
},
{
name: "Exclude all",
containers: [][]string{
{"/etcd"},
{"/etcd2"},
},
include: []string{"*"},
exclude: []string{"*"},
expected: []string{},
},
{
name: "Exclude item matching include",
containers: [][]string{
{"acme"},
{"foo"},
{"acme-test"},
},
include: []string{"acme*"},
exclude: []string{"*test*"},
expected: []string{"acme"},
},
{
name: "Exclude item no wildcards",
containers: [][]string{
{"acme"},
{"acme-test"},
},
include: []string{"acme*"},
exclude: []string{"test"},
expected: []string{"acme", "acme-test"},
},
}
for _, tt := range gatherContainerNames {
t.Run("", func(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) {
client := baseClient
client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
var containers []types.Container
for _, names := range tt.containers {
containers = append(containers, types.Container{
Names: names,
})
}
return containers, nil
}
return &client, nil
}
d := Docker{
newClient: newClient,
newClient: newClientFunc,
ContainerInclude: tt.include,
ContainerExclude: tt.exclude,
}
@@ -317,39 +571,21 @@ func TestContainerNames(t *testing.T) {
err := d.Gather(&acc)
require.NoError(t, err)
// Set of expected names
var expected = make(map[string]bool)
for _, v := range tt.expected {
expected[v] = true
}
// Set of actual names
var actual = make(map[string]bool)
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
if val, ok := metric.Tags["container_name"]; ok {
var found bool = false
for _, cname := range tt.expected {
if val == cname {
found = true
break
}
}
if !found {
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
}
}
if name, ok := metric.Tags["container_name"]; ok {
actual[name] = true
}
}
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
if val, ok := metric.Tags["container_name"]; ok {
var found bool = false
for _, cname := range tt.notexpected {
if val == cname {
found = true
break
}
}
if found {
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
}
}
}
}
require.Equal(t, expected, actual)
})
}
}
@@ -436,3 +672,42 @@ func TestDockerGatherInfo(t *testing.T) {
},
)
}
func TestDockerGatherSwarmInfo(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
newClient: newClient,
}
err := acc.GatherError(d.Gather)
require.NoError(t, err)
d.gatherSwarmInfo(&acc)
// test docker_container_net measurement
acc.AssertContainsTaggedFields(t,
"docker_swarm",
map[string]interface{}{
"tasks_running": int(2),
"tasks_desired": uint64(2),
},
map[string]string{
"service_id": "qolkls9g5iasdiuihcyz9rnx2",
"service_name": "test1",
"service_mode": "replicated",
},
)
acc.AssertContainsTaggedFields(t,
"docker_swarm",
map[string]interface{}{
"tasks_running": int(1),
"tasks_desired": int(1),
},
map[string]string{
"service_id": "qolkls9g5iasdiuihcyz9rn3",
"service_name": "test2",
"service_mode": "global",
},
)
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
)
var info = types.Info{
@@ -133,6 +134,79 @@ var containerList = []types.Container{
},
}
var two = uint64(2)
var ServiceList = []swarm.Service{
swarm.Service{
ID: "qolkls9g5iasdiuihcyz9rnx2",
Spec: swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: "test1",
},
Mode: swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &two,
},
},
},
},
swarm.Service{
ID: "qolkls9g5iasdiuihcyz9rn3",
Spec: swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: "test2",
},
Mode: swarm.ServiceMode{
Global: &swarm.GlobalService{},
},
},
},
}
var TaskList = []swarm.Task{
swarm.Task{
ID: "kwh0lv7hwwbh",
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
NodeID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.TaskStatus{
State: "running",
},
DesiredState: "running",
},
swarm.Task{
ID: "u78m5ojbivc3",
ServiceID: "qolkls9g5iasdiuihcyz9rnx2",
NodeID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.TaskStatus{
State: "running",
},
DesiredState: "running",
},
swarm.Task{
ID: "1n1uilkhr98l",
ServiceID: "qolkls9g5iasdiuihcyz9rn3",
NodeID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.TaskStatus{
State: "running",
},
DesiredState: "running",
},
}
var NodeList = []swarm.Node{
swarm.Node{
ID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.NodeStatus{
State: "ready",
},
},
swarm.Node{
ID: "0cl4jturcyd1ks3fwpd010kor",
Status: swarm.NodeStatus{
State: "ready",
},
},
}
func containerStats() types.ContainerStats {
var stat types.ContainerStats
jsonStat := `

View File

@@ -23,10 +23,21 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
## Set cluster_health to true when you want to also obtain cluster health stats
cluster_health = false
## Set cluster_stats to true when you want to obtain cluster stats from the
## Master node.
## Adjust cluster_health_level when you want to also obtain detailed health stats
## The options are
## - indices (default)
## - cluster
# cluster_health_level = "indices"
## Set cluster_stats to true when you want to also obtain cluster stats from the
## Master node.
cluster_stats = false
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breakers". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"

View File

@@ -3,17 +3,16 @@ package elasticsearch
import (
"encoding/json"
"fmt"
"net/http"
"regexp"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
"io/ioutil"
"net/http"
"regexp"
"strings"
"sync"
"time"
)
// mask for masking username/password from error messages
@@ -94,10 +93,21 @@ const sampleConfig = `
## Set cluster_health to true when you want to also obtain cluster health stats
cluster_health = false
## Adjust cluster_health_level when you want to also obtain detailed health stats
## The options are
## - indices (default)
## - cluster
# cluster_health_level = "indices"
## Set cluster_stats to true when you want to also obtain cluster stats from the
## Master node.
cluster_stats = false
## node_stats is a list of sub-stats that you want to have gathered. Valid options
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
## "breakers". Per default, all stats are gathered.
# node_stats = ["jvm", "http"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
@@ -113,7 +123,9 @@ type Elasticsearch struct {
Servers []string
HttpTimeout internal.Duration
ClusterHealth bool
ClusterHealthLevel string
ClusterStats bool
NodeStats []string
SSLCA string `toml:"ssl_ca"` // Path to CA file
SSLCert string `toml:"ssl_cert"` // Path to host cert file
SSLKey string `toml:"ssl_key"` // Path to cert key file
@@ -126,7 +138,8 @@ type Elasticsearch struct {
// NewElasticsearch return a new instance of Elasticsearch
func NewElasticsearch() *Elasticsearch {
return &Elasticsearch{
HttpTimeout: internal.Duration{Duration: time.Second * 5},
HttpTimeout: internal.Duration{Duration: time.Second * 5},
ClusterHealthLevel: "indices",
}
}
@@ -158,12 +171,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
for _, serv := range e.Servers {
go func(s string, acc telegraf.Accumulator) {
defer wg.Done()
var url string
if e.Local {
url = s + statsPathLocal
} else {
url = s + statsPath
}
url := e.nodeStatsUrl(s)
e.isMaster = false
if e.ClusterStats {
@@ -182,7 +190,10 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
}
if e.ClusterHealth {
url = s + "/_cluster/health?level=indices"
url = s + "/_cluster/health"
if e.ClusterHealthLevel != "" {
url = url + "?level=" + e.ClusterHealthLevel
}
if err := e.gatherClusterHealth(url, acc); err != nil {
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
return
@@ -219,6 +230,22 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
return client, nil
}
func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string {
var url string
if e.Local {
url = baseUrl + statsPathLocal
} else {
url = baseUrl + statsPath
}
if len(e.NodeStats) == 0 {
return url
}
return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ","))
}
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
nodeStats := &struct {
ClusterName string `json:"cluster_name"`
@@ -259,6 +286,11 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
now := time.Now()
for p, s := range stats {
// if one of the individual node stats is not even in the
// original result
if s == nil {
continue
}
f := jsonparser.JSONFlattener{}
// parse Json, ignoring strings and bools
err := f.FlattenJSON("", s)

View File

@@ -13,6 +13,16 @@ import (
"github.com/stretchr/testify/require"
)
func defaultTags() map[string]string {
return map[string]string{
"cluster_name": "es-testcluster",
"node_attribute_master": "true",
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
"node_name": "test.host.com",
"node_host": "test",
}
}
type transportMock struct {
statusCode int
body string
@@ -45,15 +55,9 @@ func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) {
assert.Fail(t, msg)
}
}
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
tags := map[string]string{
"cluster_name": "es-testcluster",
"node_attribute_master": "true",
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
"node_name": "test.host.com",
"node_host": "test",
}
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
tags := defaultTags()
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
@@ -79,6 +83,31 @@ func TestGather(t *testing.T) {
checkNodeStatsResult(t, &acc)
}
func TestGatherIndividualStats(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.NodeStats = []string{"jvm", "process"}
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess)
var acc testutil.Accumulator
if err := acc.GatherError(es.Gather); err != nil {
t.Fatal(err)
}
checkIsMaster(es, false, t)
tags := defaultTags()
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
}
func TestGatherNodeStats(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
@@ -93,10 +122,11 @@ func TestGatherNodeStats(t *testing.T) {
checkNodeStatsResult(t, &acc)
}
func TestGatherClusterHealth(t *testing.T) {
func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.ClusterHealthLevel = ""
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
var acc testutil.Accumulator
@@ -104,6 +134,56 @@ func TestGatherClusterHealth(t *testing.T) {
checkIsMaster(es, false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v1IndexExpected,
map[string]string{"index": "v1"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v2IndexExpected,
map[string]string{"index": "v2"})
}
func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.ClusterHealthLevel = "cluster"
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
var acc testutil.Accumulator
require.NoError(t, es.gatherClusterHealth("junk", &acc))
checkIsMaster(es, false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v1IndexExpected,
map[string]string{"index": "v1"})
acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices",
v2IndexExpected,
map[string]string{"index": "v2"})
}
func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.ClusterHealthLevel = "indices"
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices)
var acc testutil.Accumulator
require.NoError(t, es.gatherClusterHealth("junk", &acc))
checkIsMaster(es, false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
map[string]string{"name": "elasticsearch_telegraf"})
@@ -185,7 +265,6 @@ func TestGatherClusterStatsNonMaster(t *testing.T) {
// ensure flag is clear so Cluster Stats would not be done
checkIsMaster(es, false, t)
checkNodeStatsResult(t, &acc)
}
func newElasticsearchWithClient() *Elasticsearch {

View File

@@ -1,6 +1,21 @@
package elasticsearch
const clusterHealthResponse = `
{
"cluster_name": "elasticsearch_telegraf",
"status": "green",
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
"active_primary_shards": 5,
"active_shards": 15,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0
}
`
const clusterHealthResponseWithIndices = `
{
"cluster_name": "elasticsearch_telegraf",
"status": "green",
@@ -489,6 +504,100 @@ const nodeStatsResponse = `
}
`
const nodeStatsResponseJVMProcess = `
{
"cluster_name": "es-testcluster",
"nodes": {
"SDFsfSDFsdfFSDSDfSFDSDF": {
"timestamp": 1436365550135,
"name": "test.host.com",
"transport_address": "inet[/127.0.0.1:9300]",
"host": "test",
"ip": [
"inet[/127.0.0.1:9300]",
"NONE"
],
"attributes": {
"master": "true"
},
"process": {
"timestamp": 1436460392945,
"open_file_descriptors": 160,
"cpu": {
"percent": 2,
"sys_in_millis": 1870,
"user_in_millis": 13610,
"total_in_millis": 15480
},
"mem": {
"total_virtual_in_bytes": 4747890688
}
},
"jvm": {
"timestamp": 1436460392945,
"uptime_in_millis": 202245,
"mem": {
"heap_used_in_bytes": 52709568,
"heap_used_percent": 5,
"heap_committed_in_bytes": 259522560,
"heap_max_in_bytes": 1038876672,
"non_heap_used_in_bytes": 39634576,
"non_heap_committed_in_bytes": 40841216,
"pools": {
"young": {
"used_in_bytes": 32685760,
"max_in_bytes": 279183360,
"peak_used_in_bytes": 71630848,
"peak_max_in_bytes": 279183360
},
"survivor": {
"used_in_bytes": 8912880,
"max_in_bytes": 34865152,
"peak_used_in_bytes": 8912888,
"peak_max_in_bytes": 34865152
},
"old": {
"used_in_bytes": 11110928,
"max_in_bytes": 724828160,
"peak_used_in_bytes": 14354608,
"peak_max_in_bytes": 724828160
}
}
},
"threads": {
"count": 44,
"peak_count": 45
},
"gc": {
"collectors": {
"young": {
"collection_count": 2,
"collection_time_in_millis": 98
},
"old": {
"collection_count": 1,
"collection_time_in_millis": 24
}
}
},
"buffer_pools": {
"direct": {
"count": 40,
"used_in_bytes": 6304239,
"total_capacity_in_bytes": 6304239
},
"mapped": {
"count": 0,
"used_in_bytes": 0,
"total_capacity_in_bytes": 0
}
}
}
}
}
}
`
var nodestatsIndicesExpected = map[string]interface{}{
"id_cache_memory_size_in_bytes": float64(0),
"completion_size_in_bytes": float64(0),

View File

@@ -1,19 +1,19 @@
# Fail2ban Plugin
# Fail2ban Input Plugin
The fail2ban plugin gathers counts of failed and banned ip addresses from fail2ban.
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
This plugin run fail2ban-client command, and fail2ban-client require root access.
You have to grant telegraf to run fail2ban-client:
This plugin runs the `fail2ban-client` command which generally requires root access.
Acquiring the required permissions can be done using several methods:
- Run telegraf as root. (deprecate)
- Configure sudo to grant telegraf to fail2ban-client.
- Use sudo run fail2ban-client.
- Run telegraf as root. (not recommended)
### Using sudo
You may edit your sudo configuration with the following:
``` sudo
telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
```
### Configuration:
@@ -21,10 +21,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
``` toml
# Read metrics from fail2ban.
[[inputs.fail2ban]]
## fail2ban-client require root access.
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
## This plugin run only "fail2ban-client status".
## Use sudo to run fail2ban-client
use_sudo = false
```
@@ -38,7 +35,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
- All measurements have the following tags:
- jail
### Example Output:
```
@@ -55,6 +52,5 @@ Status for the jail: sshd
```
```
$ ./telegraf --config telegraf.conf --input-filter fail2ban --test
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
```

View File

@@ -1,5 +1,3 @@
// +build linux
package fail2ban
import (
@@ -8,9 +6,10 @@ import (
"os/exec"
"strings"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"strconv"
)
var (
@@ -23,10 +22,7 @@ type Fail2ban struct {
}
var sampleConfig = `
## fail2ban-client require root access.
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
## This plugin run only "fail2ban-client status".
## Use sudo to run fail2ban-client
use_sudo = false
`

View File

@@ -1,3 +0,0 @@
// +build !linux
package fail2ban

View File

@@ -20,6 +20,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
- filestat
- exists (int, 0 | 1)
- size_bytes (int, bytes)
- modification_time (int, unixtime)
- md5 (optional, string)
### Tags:
@@ -32,6 +33,6 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
```
$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test
* Plugin: filestat, Collection 1
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1461203374493128216
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i 1461203374493199335
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1507218518192154351
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i,modification_time=1507152973123456789i 1507218518192154351
```

View File

@@ -86,6 +86,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
fileName)
} else {
fields["size_bytes"] = fileInfo.Size()
fields["modification_time"] = fileInfo.ModTime().UnixNano()
}
if f.Md5 {

View File

@@ -5,6 +5,8 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
@@ -24,28 +26,19 @@ func TestGatherNoMd5(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
tags2 := map[string]string{
"file": dir + "log2.log",
}
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
tags3 := map[string]string{
"file": "/non/existant/file",
}
fields3 := map[string]interface{}{
"exists": int64(0),
}
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0)))
}
func TestGatherExplicitFiles(t *testing.T) {
@@ -64,30 +57,21 @@ func TestGatherExplicitFiles(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
tags2 := map[string]string{
"file": dir + "log2.log",
}
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
tags3 := map[string]string{
"file": "/non/existant/file",
}
fields3 := map[string]interface{}{
"exists": int64(0),
}
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0)))
}
func TestGatherGlob(t *testing.T) {
@@ -104,22 +88,16 @@ func TestGatherGlob(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
tags2 := map[string]string{
"file": dir + "log2.log",
}
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
}
func TestGatherSuperAsterisk(t *testing.T) {
@@ -136,32 +114,57 @@ func TestGatherSuperAsterisk(t *testing.T) {
tags1 := map[string]string{
"file": dir + "log1.log",
}
fields1 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
tags2 := map[string]string{
"file": dir + "log2.log",
}
fields2 := map[string]interface{}{
"size_bytes": int64(0),
"exists": int64(1),
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
}
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e"))
tags3 := map[string]string{
"file": dir + "test.conf",
}
fields3 := map[string]interface{}{
"size_bytes": int64(104),
"exists": int64(1),
"md5_sum": "5a7e9b77fa25e7bb411dbd17cf403c1f",
require.True(t, acc.HasPoint("filestat", tags3, "size_bytes", int64(104)))
require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(1)))
require.True(t, acc.HasPoint("filestat", tags3, "md5_sum", "5a7e9b77fa25e7bb411dbd17cf403c1f"))
}
func TestModificationTime(t *testing.T) {
dir := getTestdataDir()
fs := NewFileStat()
fs.Files = []string{
dir + "log1.log",
}
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
acc := testutil.Accumulator{}
acc.GatherError(fs.Gather)
tags1 := map[string]string{
"file": dir + "log1.log",
}
require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0)))
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1)))
require.True(t, acc.HasInt64Field("filestat", "modification_time"))
}
func TestNoModificationTime(t *testing.T) {
fs := NewFileStat()
fs.Files = []string{
"/non/existant/file",
}
acc := testutil.Accumulator{}
acc.GatherError(fs.Gather)
tags1 := map[string]string{
"file": "/non/existant/file",
}
require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(0)))
require.False(t, acc.HasInt64Field("filestat", "modification_time"))
}
func TestGetMd5(t *testing.T) {

View File

@@ -22,11 +22,11 @@ example configuratio with `@id` parameter for http plugin:
[[inputs.fluentd]]
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
##
## Endpoint:
## Endpoint:
## - only one URI is allowed
## - https is not supported
endpoint = "http://localhost:24220/api/plugins.json"
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
exclude = [
"monitor_agent",

View File

@@ -18,11 +18,11 @@ const (
sampleConfig = `
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
##
## Endpoint:
## Endpoint:
## - only one URI is allowed
## - https is not supported
endpoint = "http://localhost:24220/api/plugins.json"
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
exclude = [
"monitor_agent",
@@ -148,15 +148,15 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
}
if p.BufferQueueLength != nil {
tmpFields["buffer_queue_length"] = p.BufferQueueLength
tmpFields["buffer_queue_length"] = *p.BufferQueueLength
}
if p.RetryCount != nil {
tmpFields["retry_count"] = p.RetryCount
tmpFields["retry_count"] = *p.RetryCount
}
if p.BufferTotalQueuedSize != nil {
tmpFields["buffer_total_queued_size"] = p.BufferTotalQueuedSize
tmpFields["buffer_total_queued_size"] = *p.BufferTotalQueuedSize
}
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {

View File

@@ -122,12 +122,6 @@ func Test_parse(t *testing.T) {
}
func Test_Gather(t *testing.T) {
if testing.Short() {
t.Skip("Skipping Gather function test")
}
t.Log("Testing Gather function")
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -157,13 +151,13 @@ func Test_Gather(t *testing.T) {
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
assert.Equal(t, expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
assert.Equal(t, expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
assert.Equal(t, expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
assert.Equal(t, expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
}

View File

@@ -1,5 +1,3 @@
// +build linux
package hddtemp
import (

View File

@@ -1,3 +0,0 @@
// +build !linux
package hddtemp

View File

@@ -8,6 +8,10 @@ The `/write` endpoint supports the `precision` query parameter and can be set to
When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
Enable TLS by specifying the file names of a service TLS certificate and key.
Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````.
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
**Example:**
@@ -28,4 +32,11 @@ This is a sample configuration for the plugin.
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
tls_cert= "/etc/telegraf/cert.pem"
tls_key = "/etc/telegraf/key.pem"
## MTLS
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
```

View File

@@ -3,7 +3,10 @@ package http_listener
import (
"bytes"
"compress/gzip"
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"log"
"net"
"net/http"
@@ -37,6 +40,10 @@ type HTTPListener struct {
MaxLineSize int
Port int
TlsAllowedCacerts []string
TlsCert string
TlsKey string
mu sync.Mutex
wg sync.WaitGroup
@@ -75,6 +82,14 @@ const sampleConfig = `
## Maximum line size allowed to be sent in bytes.
## 0 means to use the default of 65536 bytes (64 kibibytes)
max_line_size = 0
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
tls_cert = "/etc/telegraf/cert.pem"
tls_key = "/etc/telegraf/key.pem"
`
func (h *HTTPListener) SampleConfig() string {
@@ -117,10 +132,33 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
h.MaxLineSize = DEFAULT_MAX_LINE_SIZE
}
if h.ReadTimeout.Duration < time.Second {
h.ReadTimeout.Duration = time.Second * 10
}
if h.WriteTimeout.Duration < time.Second {
h.WriteTimeout.Duration = time.Second * 10
}
h.acc = acc
h.pool = NewPool(200, h.MaxLineSize)
var listener, err = net.Listen("tcp", h.ServiceAddress)
tlsConf := h.getTLSConfig()
server := &http.Server{
Addr: h.ServiceAddress,
Handler: h,
ReadTimeout: h.ReadTimeout.Duration,
WriteTimeout: h.WriteTimeout.Duration,
TLSConfig: tlsConf,
}
var err error
var listener net.Listener
if tlsConf != nil {
listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf)
} else {
listener, err = net.Listen("tcp", h.ServiceAddress)
}
if err != nil {
return err
}
@@ -130,7 +168,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
h.wg.Add(1)
go func() {
defer h.wg.Done()
h.httpListen()
server.Serve(h.listener)
}()
log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress)
@@ -149,27 +187,6 @@ func (h *HTTPListener) Stop() {
log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress)
}
// httpListen sets up an http.Server and calls server.Serve.
// like server.Serve, httpListen will always return a non-nil error, for this
// reason, the error returned should probably be ignored.
// see https://golang.org/pkg/net/http/#Server.Serve
func (h *HTTPListener) httpListen() error {
if h.ReadTimeout.Duration < time.Second {
h.ReadTimeout.Duration = time.Second * 10
}
if h.WriteTimeout.Duration < time.Second {
h.WriteTimeout.Duration = time.Second * 10
}
var server = http.Server{
Handler: h,
ReadTimeout: h.ReadTimeout.Duration,
WriteTimeout: h.WriteTimeout.Duration,
}
return server.Serve(h.listener)
}
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
h.RequestsRecv.Incr(1)
defer h.RequestsServed.Incr(1)
@@ -327,6 +344,38 @@ func badRequest(res http.ResponseWriter) {
res.Write([]byte(`{"error":"http: bad request"}`))
}
func (h *HTTPListener) getTLSConfig() *tls.Config {
tlsConf := &tls.Config{
InsecureSkipVerify: false,
Renegotiation: tls.RenegotiateNever,
}
if len(h.TlsCert) == 0 || len(h.TlsKey) == 0 {
return nil
}
cert, err := tls.LoadX509KeyPair(h.TlsCert, h.TlsKey)
if err != nil {
return nil
}
tlsConf.Certificates = []tls.Certificate{cert}
if h.TlsAllowedCacerts != nil {
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
clientPool := x509.NewCertPool()
for _, ca := range h.TlsAllowedCacerts {
c, err := ioutil.ReadFile(ca)
if err != nil {
continue
}
clientPool.AppendCertsFromPEM(c)
}
tlsConf.ClientCAs = clientPool
}
return tlsConf
}
func init() {
inputs.Add("http_listener", func() telegraf.Input {
return &HTTPListener{

View File

@@ -2,6 +2,9 @@ package http_listener
import (
"bytes"
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"net/http"
"net/url"
@@ -29,6 +32,84 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257
badMsg = "blahblahblah: 42\n"
emptyMsg = ""
serviceRootPEM = `-----BEGIN CERTIFICATE-----
MIIBxzCCATCgAwIBAgIJAJb7HqN2BzWWMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV
BAMMC1RlbGVncmFmIENBMB4XDTE3MTEwNDA0MzEwN1oXDTI3MTEwMjA0MzEwN1ow
FjEUMBIGA1UEAwwLVGVsZWdyYWYgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ
AoGBANbkUkK6JQC3rbLcXhLJTS9SX6uXyFwl7bUfpAN5Hm5EqfvG3PnLrogfTGLr
Tq5CRAu/gbbdcMoL9TLv/aaDVnrpV0FslKhqYmkOgT28bdmA7Qtr539aQpMKCfcW
WCnoMcBD5u5h9MsRqpdq+0Mjlsf1H2hSf07jHk5R1T4l8RMXAgMBAAGjHTAbMAwG
A1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4GBANSrwvpU
t8ihIhpHqgJZ34DM92CZZ3ZHmH/KyqlnuGzjjpnVZiXVrLDTOzrA0ziVhmefY29w
roHjENbFm54HW97ogxeURuO8HRHIVh2U0rkyVxOfGZiUdINHqsZdSnDY07bzCtSr
Z/KsfWXM5llD1Ig1FyBHpKjyUvfzr73sjm/4
-----END CERTIFICATE-----`
serviceCertPEM = `-----BEGIN CERTIFICATE-----
MIIBzzCCATigAwIBAgIBATANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBQxEjAQBgNV
BAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsJRss1af
XKrcIjQoAp2kdJIpT2Ya+MRQXJ18b0PP7szh2lisY11kd/HCkd4D4efuIkpszHaN
xwyTOZLOoplxp6fizzgOYjXsJ6SzbO1MQNmq8Ch/+uKiGgFwLX+YxOOsGSDIHNhF
vcBi93cQtCWPBFz6QRQf9yfIAA5KKxUfJcMCAwEAAaMvMC0wCQYDVR0TBAIwADAL
BgNVHQ8EBAMCBSAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQAD
gYEAiC3WI4y9vfYz53gw7FKnNK7BBdwRc43x7Pd+5J/cclWyUZPdmcj1UNmv/3rj
2qcMmX06UdgPoHppzNAJePvMVk0vjMBUe9MmYlafMz0h4ma/it5iuldXwmejFcdL
6wWQp7gVTileCEmq9sNvfQN1FmT3EWf4IMdO2MNat/1If0g=
-----END CERTIFICATE-----`
serviceKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
-----END RSA PRIVATE KEY-----`
clientRootPEM = serviceRootPEM
clientCertPEM = `-----BEGIN CERTIFICATE-----
MIIBzjCCATegAwIBAgIBAjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl
Z3JhZiBDQTAeFw0xNzExMDQwNDMxMDdaFw0yNzExMDIwNDMxMDdaMBMxETAPBgNV
BAMMCHRlbGVncmFmMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDP2IMqyOqI
sJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqMpBUTj3vLlOzsHfVVot1WRqc6
3esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4UkJBWim8ArSbFqnZjcR19G3tG
LUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQABoy8wLTAJBgNVHRMEAjAAMAsG
A1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOB
gQCHxMk38XNxL9nPFBYo3JqITJCFswu6/NLHwDBXCuZKl53rUuFWduiO+1OuScKQ
sQ79W0jHsWRKGOUFrF5/Gdnh8AlkVaITVlcmhdAOFCEbeGpeEvLuuK6grckPitxy
bRF5oM4TCLKKAha60Ir41rk2bomZM9+NZu+Bm+csDqCoxQ==
-----END CERTIFICATE-----`
clientKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQDP2IMqyOqIsJjwBprrz8WPzmlrpyYikQ4XSCSJB3DSTIO+igqM
pBUTj3vLlOzsHfVVot1WRqc63esM4JE92rc6S73xi4g8L/r8cPIHW4hvFJdMti4U
kJBWim8ArSbFqnZjcR19G3tGLUOiXAUG3nWzMzoEsPruvV1dkKRbJVE4MwIDAQAB
AoGAFzb/r4+xYoMXEfgq5ZvXXTCY5cVNpR6+jCsqqYODPnn9XRLeCsdo8z5bfWms
7NKLzHzca/6IPzL6Rf3vOxFq1YyIZfYVHH+d63/9blAm3Iajjp1W2yW5aj9BJjTb
nm6F0RfuW/SjrZ9IXxTZhSpCklPmUzVZpzvwV3KGeVTVCEECQQDoavCeOwLuqDpt
0aM9GMFUpOU7kLPDuicSwCDaTae4kN2rS17Zki41YXe8A8+509IEN7mK09Vq9HxY
SX6EmV1FAkEA5O9QcCHEa8P12EmUC8oqD2bjq6o7JjUIRlKinwZTlooMJYZw98gA
FVSngTUvLVCVIvSdjldXPOGgfYiccTZrFwJAfHS3gKOtAEuJbkEyHodhD4h1UB4+
hPLr9Xh4ny2yQH0ilpV3px5GLEOTMFUCKUoqTiPg8VxaDjn5U/WXED5n2QJAR4J1
NsFlcGACj+/TvacFYlA6N2nyFeokzoqLX28Ddxdh2erXqJ4hYIhT1ik9tkLggs2z
1T1084BquCuO6lIcOwJBALX4xChoMUF9k0IxSQzlz//seQYDkQNsE7y9IgAOXkzp
RaR4pzgPbnKj7atG+2dBnffWfE+1Mcy0INDAO6WxPg0=
-----END RSA PRIVATE KEY-----`
)
var (
initClient sync.Once
client *http.Client
initServiceCertFiles sync.Once
allowedCAFiles []string
serviceCAFiles []string
serviceCertFile string
serviceKeyFile string
)
func newTestHTTPListener() *HTTPListener {
@@ -38,9 +119,79 @@ func newTestHTTPListener() *HTTPListener {
return listener
}
func createURL(listener *HTTPListener, path string, rawquery string) string {
func newTestHTTPSListener() *HTTPListener {
initServiceCertFiles.Do(func() {
acaf, err := ioutil.TempFile("", "allowedCAFile.crt")
if err != nil {
panic(err)
}
defer acaf.Close()
_, err = io.Copy(acaf, bytes.NewReader([]byte(clientRootPEM)))
allowedCAFiles = []string{acaf.Name()}
scaf, err := ioutil.TempFile("", "serviceCAFile.crt")
if err != nil {
panic(err)
}
defer scaf.Close()
_, err = io.Copy(scaf, bytes.NewReader([]byte(serviceRootPEM)))
serviceCAFiles = []string{scaf.Name()}
scf, err := ioutil.TempFile("", "serviceCertFile.crt")
if err != nil {
panic(err)
}
defer scf.Close()
_, err = io.Copy(scf, bytes.NewReader([]byte(serviceCertPEM)))
serviceCertFile = scf.Name()
skf, err := ioutil.TempFile("", "serviceKeyFile.crt")
if err != nil {
panic(err)
}
defer skf.Close()
_, err = io.Copy(skf, bytes.NewReader([]byte(serviceKeyPEM)))
serviceKeyFile = skf.Name()
})
listener := &HTTPListener{
ServiceAddress: ":0",
TlsAllowedCacerts: allowedCAFiles,
TlsCert: serviceCertFile,
TlsKey: serviceKeyFile,
}
return listener
}
func getHTTPSClient() *http.Client {
initClient.Do(func() {
cas := x509.NewCertPool()
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
clientCert, err := tls.X509KeyPair([]byte(clientCertPEM), []byte(clientKeyPEM))
if err != nil {
panic(err)
}
client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: cas,
Certificates: []tls.Certificate{clientCert},
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS12,
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
Renegotiation: tls.RenegotiateNever,
InsecureSkipVerify: false,
},
},
}
})
return client
}
func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string {
u := url.URL{
Scheme: "http",
Scheme: scheme,
Host: "localhost:" + strconv.Itoa(listener.Port),
Path: path,
RawQuery: rawquery,
@@ -48,6 +199,45 @@ func createURL(listener *HTTPListener, path string, rawquery string) string {
return u.String()
}
func TestWriteHTTPSNoClientAuth(t *testing.T) {
listener := newTestHTTPSListener()
listener.TlsAllowedCacerts = nil
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
cas := x509.NewCertPool()
cas.AppendCertsFromPEM([]byte(serviceRootPEM))
noClientAuthClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: cas,
},
},
}
// post single message to listener
resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
func TestWriteHTTPSWithClientAuth(t *testing.T) {
listener := newTestHTTPSListener()
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
// post single message to listener
resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
func TestWriteHTTP(t *testing.T) {
listener := newTestHTTPListener()
@@ -56,8 +246,9 @@ func TestWriteHTTP(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(1)
@@ -67,8 +258,9 @@ func TestWriteHTTP(t *testing.T) {
)
// post multiple message to listener
resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(2)
@@ -82,8 +274,9 @@ func TestWriteHTTP(t *testing.T) {
}
// Post a gigantic metric to the listener and verify that an error is returned:
resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 400, resp.StatusCode)
acc.Wait(3)
@@ -102,8 +295,9 @@ func TestWriteHTTPNoNewline(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(1)
@@ -124,8 +318,9 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
defer listener.Stop()
// Post a gigantic metric to the listener and verify that it writes OK this time:
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
@@ -139,8 +334,9 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) {
require.NoError(t, listener.Start(acc))
defer listener.Stop()
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 413, resp.StatusCode)
}
@@ -154,8 +350,9 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) {
require.NoError(t, listener.Start(acc))
defer listener.Stop()
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
hostTags := []string{"server02", "server03",
@@ -179,8 +376,9 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) {
require.NoError(t, listener.Start(acc))
defer listener.Stop()
resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 400, resp.StatusCode)
hostTags := []string{"server02", "server03",
@@ -205,7 +403,7 @@ func TestWriteHTTPGzippedData(t *testing.T) {
data, err := ioutil.ReadFile("./testdata/testmsgs.gz")
require.NoError(t, err)
req, err := http.NewRequest("POST", createURL(listener, "/write", ""), bytes.NewBuffer(data))
req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data))
require.NoError(t, err)
req.Header.Set("Content-Encoding", "gzip")
@@ -240,8 +438,9 @@ func TestWriteHTTPHighTraffic(t *testing.T) {
go func(innerwg *sync.WaitGroup) {
defer innerwg.Done()
for i := 0; i < 500; i++ {
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
}(&wg)
@@ -262,8 +461,9 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 404, resp.StatusCode)
}
@@ -275,8 +475,9 @@ func TestWriteHTTPInvalid(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 400, resp.StatusCode)
}
@@ -288,8 +489,9 @@ func TestWriteHTTPEmpty(t *testing.T) {
defer listener.Stop()
// post single message to listener
resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
@@ -302,13 +504,14 @@ func TestQueryAndPingHTTP(t *testing.T) {
// post query to listener
resp, err := http.Post(
createURL(listener, "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil)
require.NoError(t, err)
require.EqualValues(t, 200, resp.StatusCode)
// post ping to listener
resp, err = http.Post(createURL(listener, "/ping", ""), "", nil)
resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil)
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
}
@@ -321,8 +524,9 @@ func TestWriteWithPrecision(t *testing.T) {
msg := "xyzzy value=42 1422568543\n"
resp, err := http.Post(
createURL(listener, "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg)))
require.NoError(t, err)
resp.Body.Close()
require.EqualValues(t, 204, resp.StatusCode)
acc.Wait(1)

View File

@@ -98,6 +98,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
}
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
TLSClientConfig: tlsCfg,
},

View File

@@ -1,6 +1,7 @@
package httpjson
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
@@ -15,6 +16,10 @@ import (
"github.com/influxdata/telegraf/plugins/parsers"
)
var (
utf8BOM = []byte("\xef\xbb\xbf")
)
// HttpJson struct
type HttpJson struct {
Name string
@@ -170,7 +175,6 @@ func (h *HttpJson) gatherServer(
serverURL string,
) error {
resp, responseTime, err := h.sendRequest(serverURL)
if err != nil {
return err
}
@@ -266,6 +270,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
if err != nil {
return string(body), responseTime, err
}
body = bytes.TrimPrefix(body, utf8BOM)
// Process response
if resp.StatusCode != http.StatusOK {

View File

@@ -477,15 +477,13 @@ func TestHttpJsonBadJson(t *testing.T) {
assert.Equal(t, 0, acc.NFields())
}
// Test response to empty string as response objectgT
// Test response to empty string as response object
func TestHttpJsonEmptyResponse(t *testing.T) {
httpjson := genMockHttpJson(empty, 200)
var acc testutil.Accumulator
err := acc.GatherError(httpjson[0].Gather)
assert.Error(t, err)
assert.Equal(t, 0, acc.NFields())
assert.NoError(t, err)
}
// Test that the proper values are ignored or collected
@@ -560,3 +558,18 @@ func TestHttpJsonArray200Tags(t *testing.T) {
}
}
}
var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
// TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed
func TestHttpJsonBOM(t *testing.T) {
httpjson := genMockHttpJson(string(jsonBOM), 200)
for _, service := range httpjson {
if service.Name == "other_webapp" {
var acc testutil.Accumulator
err := acc.GatherError(service.Gather)
require.NoError(t, err)
}
}
}

View File

@@ -1,8 +1,7 @@
# Telegraf ipmi plugin
# IPMI Sensor Input Plugin
Get bare metal metrics using the command line utility `ipmitool`
see ipmitool(https://sourceforge.net/projects/ipmitool/files/ipmitool/)
Get bare metal metrics using the command line utility
[`ipmitool`](https://sourceforge.net/projects/ipmitool/files/ipmitool/).
If no servers are specified, the plugin will query the local machine sensor stats via the following command:
@@ -16,18 +15,7 @@ When one or more servers are specified, the plugin will use the following comman
ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr
```
## Measurements
- ipmi_sensor:
* Tags: `name`, `unit`
* Fields:
- status
- value
The `server` tag will be made available when retrieving stats from remote server(s).
## Configuration
### Configuration
```toml
# Read metrics from the bare metal servers via IPMI
@@ -52,26 +40,49 @@ The `server` tag will be made available when retrieving stats from remote server
timeout = "20s"
```
## Output
### Measurements
- ipmi_sensor:
- tags:
- name
- unit
- server (only when retrieving stats from remote servers)
- fields:
- status (int)
- value (float)
#### Permissions
When gathering from the local system, Telegraf will need permission to the
ipmi device node. When using udev you can create the device node giving
`rw` permissions to the `telegraf` user by adding the following rule to
`/etc/udev/rules.d/52-telegraf-ipmi.rules`:
```
KERNEL=="ipmi*", MODE="660", GROUP="telegraf"
```
### Example Output
When retrieving stats from a remote server:
```
> ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
> ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613
> ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613
ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
```
When retrieving stats from the local machine (no server specified):
```
> ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
> ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613
> ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
> ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
> ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
> ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
> ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455
ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613
ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511
ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875
ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508
ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932
ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896
```

View File

@@ -35,7 +35,7 @@ var sampleConfig = `
##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
## gaps or overlap in pulled data
interval = "30s"

View File

@@ -95,7 +95,7 @@ const measurement = "iptables"
var errParse = errors.New("Cannot parse iptables list information")
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
var commentRe = regexp.MustCompile(`\s*/\*\s*(.+?)\s*\*/\s*`)
var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
lines := strings.Split(data, "\n")
@@ -110,21 +110,14 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
return errParse
}
for _, line := range lines[2:] {
tokens := strings.Fields(line)
if len(tokens) < 10 {
matches := valuesRe.FindStringSubmatch(line)
if len(matches) != 4 {
continue
}
pkts := tokens[0]
bytes := tokens[1]
end := strings.Join(tokens[9:], " ")
matches := commentRe.FindStringSubmatch(end)
if matches == nil {
continue
}
comment := matches[1]
pkts := matches[1]
bytes := matches[2]
comment := matches[3]
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
fields := make(map[string]interface{})

View File

@@ -81,7 +81,7 @@ func TestIptables_Gather(t *testing.T) {
K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 8 - Multiple rows, multipe chains => no error
{ // 8 - Multiple rows, multiple chains => no error
table: "filter",
chains: []string{"INPUT", "FORWARD"},
values: []string{
@@ -154,68 +154,85 @@ func TestIptables_Gather(t *testing.T) {
tags: []map[string]string{},
fields: [][]map[string]interface{}{},
},
{ // 11 - all target and ports
table: "all_recv",
chains: []string{"accountfwd"},
values: []string{
`Chain accountfwd (1 references)
pkts bytes target prot opt in out source destination
123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */
`},
tags: []map[string]string{
map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}},
},
},
}
for i, tt := range tests {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
t.Run(tt.table, func(t *testing.T) {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
continue
}
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
continue
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
}
return
}
continue
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
return
}
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
}
return
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
}
}
})
}
}

View File

@@ -1,5 +1,7 @@
# Telegraf plugin: Jolokia
**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin.
#### Configuration
```toml

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"time"
@@ -59,6 +60,10 @@ type Jolokia struct {
}
const sampleConfig = `
# DEPRECATED: the jolokia plugin has been deprecated in favor of the
# jolokia2 plugin
# see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
## This is the context root used to compose the jolokia url
## NOTE that Jolokia requires a trailing slash at the end of the context root
## NOTE that your jolokia security policy must allow for POST requests.
@@ -254,6 +259,10 @@ func (j *Jolokia) extractValues(measurement string, value interface{}, fields ma
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
if j.jClient == nil {
log.Println("W! DEPRECATED: the jolokia plugin has been deprecated " +
"in favor of the jolokia2 plugin " +
"(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2)")
tr := &http.Transport{ResponseHeaderTimeout: j.ResponseHeaderTimeout.Duration}
j.jClient = &JolokiaClientImpl{&http.Client{
Transport: tr,

View File

@@ -0,0 +1,169 @@
# Jolokia2 Input Plugins
The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html).
## Jolokia Agent Configuration
The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints.
```toml
[[inputs.jolokia2_agent]]
urls = ["http://agent:8080/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
Optionally, specify SSL options for communicating with agents:
```toml
[[inputs.jolokia2_agent]]
urls = ["https://agent:8080/jolokia"]
ssl_ca = "/var/private/ca.pem"
ssl_cert = "/var/private/client.pem"
ssl_key = "/var/private/client-key.pem"
#insecure_skip_verify = false
[[inputs.jolokia2_agent.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
## Jolokia Proxy Configuration
The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint.
```toml
[[inputs.jolokia2_proxy]]
url = "http://proxy:8080/jolokia"
#default_target_username = ""
#default_target_password = ""
[[inputs.jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# username = ""
# password = ""
[[inputs.jolokia2_proxy.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
Optionally, specify SSL options for communicating with proxies:
```toml
[[inputs.jolokia2_proxy]]
url = "https://proxy:8080/jolokia"
ssl_ca = "/var/private/ca.pem"
ssl_cert = "/var/private/client.pem"
ssl_key = "/var/private/client-key.pem"
#insecure_skip_verify = false
#default_target_username = ""
#default_target_password = ""
[[inputs.jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
# username = ""
# password = ""
[[inputs.jolokia2_agent.metric]]
name = "jvm_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
```
## Jolokia Metric Configuration
Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean.
| Key | Required | Description |
|----------------|----------|-------------|
| `mbean` | yes | The object name of a JMX MBean. MBean property-key values can contain a wildcard `*`, allowing you to fetch multiple MBeans with one declaration. |
| `paths` | no | A list of MBean attributes to read. |
| `tag_keys` | no | A list of MBean property-key names to convert into tags. The property-key name becomes the tag name, while the property-key value becomes the tag value. |
| `tag_prefix` | no | A string to prepend to the tag names produced by this `metric` declaration. |
| `field_name` | no | A string to set as the name of the field produced by this metric; can contain substitutions. |
| `field_prefix` | no | A string to prepend to the field names produced by this `metric` declaration; can contain substitutions. |
Use `paths` to refine which fields to collect.
```toml
[[inputs.jolokia2_agent.metric]]
name = "jvm_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
```
The preceeding `jvm_memory` `metric` declaration produces the following output:
```
jvm_memory HeapMemoryUsage.committed=4294967296,HeapMemoryUsage.init=4294967296,HeapMemoryUsage.max=4294967296,HeapMemoryUsage.used=1750658992,NonHeapMemoryUsage.committed=67350528,NonHeapMemoryUsage.init=2555904,NonHeapMemoryUsage.max=-1,NonHeapMemoryUsage.used=65821352,ObjectPendingFinalizationCount=0 1503762436000000000
```
Use `*` wildcards against `mbean` property-key values to create distinct series by capturing values into `tag_keys`.
```toml
[[inputs.jolokia2_agent.metric]]
name = "jvm_garbage_collector"
mbean = "java.lang:name=*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
```
Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and `name` is used as a tag, the preceeding `jvm_garbage_collector` `metric` declaration produces two metrics.
```
jvm_garbage_collector,name=G1\ Old\ Generation CollectionCount=0,CollectionTime=0 1503762520000000000
jvm_garbage_collector,name=G1\ Young\ Generation CollectionTime=32,CollectionCount=2 1503762520000000000
```
Use `tag_prefix` along with `tag_keys` to add detail to tag names.
```toml
[[inputs.jolokia2_agent.metric]]
name = "jvm_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]
tag_prefix = "pool_"
```
The preceeding `jvm_memory_pool` `metric` declaration produces six metrics, each with a distinct `pool_name` tag.
```
jvm_memory_pool,pool_name=Compressed\ Class\ Space PeakUsage.max=1073741824,PeakUsage.committed=3145728,PeakUsage.init=0,Usage.committed=3145728,Usage.init=0,PeakUsage.used=3017976,Usage.max=1073741824,Usage.used=3017976 1503764025000000000
jvm_memory_pool,pool_name=Code\ Cache PeakUsage.init=2555904,PeakUsage.committed=6291456,Usage.committed=6291456,PeakUsage.used=6202752,PeakUsage.max=251658240,Usage.used=6210368,Usage.max=251658240,Usage.init=2555904 1503764025000000000
jvm_memory_pool,pool_name=G1\ Eden\ Space CollectionUsage.max=-1,PeakUsage.committed=56623104,PeakUsage.init=56623104,PeakUsage.used=53477376,Usage.max=-1,Usage.committed=49283072,Usage.used=19922944,CollectionUsage.committed=49283072,CollectionUsage.init=56623104,CollectionUsage.used=0,PeakUsage.max=-1,Usage.init=56623104 1503764025000000000
jvm_memory_pool,pool_name=G1\ Old\ Gen CollectionUsage.max=1073741824,CollectionUsage.committed=0,PeakUsage.max=1073741824,PeakUsage.committed=1017118720,PeakUsage.init=1017118720,PeakUsage.used=137032208,Usage.max=1073741824,CollectionUsage.init=1017118720,Usage.committed=1017118720,Usage.init=1017118720,Usage.used=134708752,CollectionUsage.used=0 1503764025000000000
jvm_memory_pool,pool_name=G1\ Survivor\ Space Usage.max=-1,Usage.init=0,CollectionUsage.max=-1,CollectionUsage.committed=7340032,CollectionUsage.used=7340032,PeakUsage.committed=7340032,Usage.committed=7340032,Usage.used=7340032,CollectionUsage.init=0,PeakUsage.max=-1,PeakUsage.init=0,PeakUsage.used=7340032 1503764025000000000
jvm_memory_pool,pool_name=Metaspace PeakUsage.init=0,PeakUsage.used=21852224,PeakUsage.max=-1,Usage.max=-1,Usage.committed=22282240,Usage.init=0,Usage.used=21852224,PeakUsage.committed=22282240 1503764025000000000
```
Use substitutions to create fields and field prefixes with MBean property-keys captured by wildcards. In the following example, `$1` represents the value of the property-key `name`, and `$2` represents the value of the property-key `topic`.
```toml
[[inputs.jolokia2_agent.metric]]
name = "kafka_topic"
mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics"
field_prefix = "$1"
tag_keys = ["topic"]
```
The preceeding `kafka_topic` `metric` declaration produces a metric per Kafka topic. The `name` Mbean property-key is used as a field prefix to aid in gathering fields together into the single metric.
```
kafka_topic,topic=my-topic BytesOutPerSec.MeanRate=0,FailedProduceRequestsPerSec.MeanRate=0,BytesOutPerSec.EventType="bytes",BytesRejectedPerSec.Count=0,FailedProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.EventType="requests",MessagesInPerSec.RateUnit="SECONDS",BytesInPerSec.EventType="bytes",BytesOutPerSec.RateUnit="SECONDS",BytesInPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.EventType="requests",TotalFetchRequestsPerSec.MeanRate=146.301533938701,BytesOutPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.MeanRate=0,BytesRejectedPerSec.FifteenMinuteRate=0,MessagesInPerSec.FiveMinuteRate=0,BytesInPerSec.Count=0,BytesRejectedPerSec.MeanRate=0,FailedFetchRequestsPerSec.MeanRate=0,FailedFetchRequestsPerSec.FiveMinuteRate=0,FailedFetchRequestsPerSec.FifteenMinuteRate=0,FailedProduceRequestsPerSec.Count=0,TotalFetchRequestsPerSec.FifteenMinuteRate=128.59314292334466,TotalFetchRequestsPerSec.OneMinuteRate=126.71551273850747,TotalFetchRequestsPerSec.Count=1353483,TotalProduceRequestsPerSec.FifteenMinuteRate=0,FailedFetchRequestsPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.Count=0,FailedProduceRequestsPerSec.FifteenMinuteRate=0,TotalFetchRequestsPerSec.FiveMinuteRate=130.8516148751592,TotalFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.RateUnit="SECONDS",BytesInPerSec.MeanRate=0,FailedFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.OneMinuteRate=0,BytesOutPerSec.Count=0,BytesOutPerSec.OneMinuteRate=0,MessagesInPerSec.FifteenMinuteRate=0,MessagesInPerSec.MeanRate=0,BytesInPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.OneMinuteRate=0,TotalProduceRequestsPerSec.EventType="requests",BytesRejectedPerSec.FiveMinuteRate=0,BytesRejectedPerSec.EventType="bytes",BytesOutPerSec.FiveMinuteRate=0,FailedProduceRequestsPerSec.FiveMinuteRate=0,MessagesInPerSec.Count=0,TotalProduceRequestsPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.OneMinuteRate=0,MessagesInPerSec.EventType="messages",MessagesInPerSec.OneMinuteRate=0,TotalFetchRequestsPerSec.EventType="requests",BytesInPerSec.RateUnit="SECONDS",BytesInPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.Count=0 1503767532000000000
```
Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configurations that apply to every `metric` declaration.
| Key | Default Value | Description |
|---------------------------|---------------|-------------|
| `default_field_separator` | `.` | A character to use to join Mbean attributes when creating fields. |
| `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. |
| `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. |

View File

@@ -0,0 +1,271 @@
package jolokia2
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"time"
"github.com/influxdata/telegraf/internal"
)
type Client struct {
URL string
client *http.Client
config *ClientConfig
}
type ClientConfig struct {
ResponseTimeout time.Duration
Username string
Password string
SSLCA string
SSLCert string
SSLKey string
InsecureSkipVerify bool
ProxyConfig *ProxyConfig
}
type ProxyConfig struct {
DefaultTargetUsername string
DefaultTargetPassword string
Targets []ProxyTargetConfig
}
type ProxyTargetConfig struct {
Username string
Password string
URL string
}
type ReadRequest struct {
Mbean string
Attributes []string
Path string
}
type ReadResponse struct {
Status int
Value interface{}
RequestMbean string
RequestAttributes []string
RequestPath string
RequestTarget string
}
// Jolokia JSON request object. Example: {
// "type": "read",
// "mbean: "java.lang:type="Runtime",
// "attribute": "Uptime",
// "target": {
// "url: "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"
// }
// }
type jolokiaRequest struct {
Type string `json:"type"`
Mbean string `json:"mbean"`
Attribute interface{} `json:"attribute,omitempty"`
Path string `json:"path,omitempty"`
Target *jolokiaTarget `json:"target,omitempty"`
}
type jolokiaTarget struct {
URL string `json:"url"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
}
// Jolokia JSON response object. Example: {
// "request": {
// "type": "read"
// "mbean": "java.lang:type=Runtime",
// "attribute": "Uptime",
// "target": {
// "url": "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"
// }
// },
// "value": 1214083,
// "timestamp": 1488059309,
// "status": 200
// }
type jolokiaResponse struct {
Request jolokiaRequest `json:"request"`
Value interface{} `json:"value"`
Status int `json:"status"`
}
func NewClient(url string, config *ClientConfig) (*Client, error) {
tlsConfig, err := internal.GetTLSConfig(
config.SSLCert, config.SSLKey, config.SSLCA, config.InsecureSkipVerify)
if err != nil {
return nil, err
}
transport := &http.Transport{
ResponseHeaderTimeout: config.ResponseTimeout,
TLSClientConfig: tlsConfig,
}
client := &http.Client{
Transport: transport,
Timeout: config.ResponseTimeout,
}
return &Client{
URL: url,
config: config,
client: client,
}, nil
}
func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) {
jrequests := makeJolokiaRequests(requests, c.config.ProxyConfig)
requestBody, err := json.Marshal(jrequests)
if err != nil {
return nil, err
}
requestUrl, err := formatReadUrl(c.URL, c.config.Username, c.config.Password)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", requestUrl, bytes.NewBuffer(requestBody))
req.Header.Add("Content-type", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK))
}
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var jresponses []jolokiaResponse
if err = json.Unmarshal([]byte(responseBody), &jresponses); err != nil {
return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, responseBody)
}
return makeReadResponses(jresponses), nil
}
func makeJolokiaRequests(rrequests []ReadRequest, proxyConfig *ProxyConfig) []jolokiaRequest {
jrequests := make([]jolokiaRequest, 0)
if proxyConfig == nil {
for _, rr := range rrequests {
jrequests = append(jrequests, makeJolokiaRequest(rr, nil))
}
} else {
for _, t := range proxyConfig.Targets {
if t.Username == "" {
t.Username = proxyConfig.DefaultTargetUsername
}
if t.Password == "" {
t.Password = proxyConfig.DefaultTargetPassword
}
for _, rr := range rrequests {
jtarget := &jolokiaTarget{
URL: t.URL,
User: t.Username,
Password: t.Password,
}
jrequests = append(jrequests, makeJolokiaRequest(rr, jtarget))
}
}
}
return jrequests
}
func makeJolokiaRequest(rrequest ReadRequest, jtarget *jolokiaTarget) jolokiaRequest {
jrequest := jolokiaRequest{
Type: "read",
Mbean: rrequest.Mbean,
Path: rrequest.Path,
Target: jtarget,
}
if len(rrequest.Attributes) == 1 {
jrequest.Attribute = rrequest.Attributes[0]
}
if len(rrequest.Attributes) > 1 {
jrequest.Attribute = rrequest.Attributes
}
return jrequest
}
func makeReadResponses(jresponses []jolokiaResponse) []ReadResponse {
rresponses := make([]ReadResponse, 0)
for _, jr := range jresponses {
rrequest := ReadRequest{
Mbean: jr.Request.Mbean,
Path: jr.Request.Path,
Attributes: []string{},
}
attrValue := jr.Request.Attribute
if attrValue != nil {
attribute, ok := attrValue.(string)
if ok {
rrequest.Attributes = []string{attribute}
} else {
attributes, _ := attrValue.([]interface{})
rrequest.Attributes = make([]string, len(attributes))
for i, attr := range attributes {
rrequest.Attributes[i] = attr.(string)
}
}
}
rresponse := ReadResponse{
Value: jr.Value,
Status: jr.Status,
RequestMbean: rrequest.Mbean,
RequestAttributes: rrequest.Attributes,
RequestPath: rrequest.Path,
}
if jtarget := jr.Request.Target; jtarget != nil {
rresponse.RequestTarget = jtarget.URL
}
rresponses = append(rresponses, rresponse)
}
return rresponses
}
func formatReadUrl(configUrl, username, password string) (string, error) {
parsedUrl, err := url.Parse(configUrl)
if err != nil {
return "", err
}
readUrl := url.URL{
Host: parsedUrl.Host,
Scheme: parsedUrl.Scheme,
}
if username != "" || password != "" {
readUrl.User = url.UserPassword(username, password)
}
readUrl.Path = path.Join(parsedUrl.Path, "read")
readUrl.Query().Add("ignoreErrors", "true")
return readUrl.String(), nil
}

View File

@@ -0,0 +1,129 @@
package jolokia2
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf/testutil"
)
func TestJolokia2_ClientAuthRequest(t *testing.T) {
var username string
var password string
var requests []map[string]interface{}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, _ = r.BasicAuth()
body, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(body, &requests)
if err != nil {
t.Error(err)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(`
[jolokia2_agent]
urls = ["%s/jolokia"]
username = "sally"
password = "seashore"
[[jolokia2_agent.metric]]
name = "hello"
mbean = "hello:foo=bar"
`, server.URL))
var acc testutil.Accumulator
plugin.Gather(&acc)
if username != "sally" {
t.Errorf("Expected to post with username %s, but was %s", "sally", username)
}
if password != "seashore" {
t.Errorf("Expected to post with password %s, but was %s", "seashore", password)
}
if len(requests) == 0 {
t.Fatal("Expected to post a request body, but was empty.")
}
request := requests[0]
if expect := "hello:foo=bar"; request["mbean"] != expect {
t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"])
}
}
func TestJolokia2_ClientProxyAuthRequest(t *testing.T) {
var requests []map[string]interface{}
var username string
var password string
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username, password, _ = r.BasicAuth()
body, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(body, &requests)
if err != nil {
t.Error(err)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
plugin := setupPlugin(t, fmt.Sprintf(`
[jolokia2_proxy]
url = "%s/jolokia"
username = "sally"
password = "seashore"
[[jolokia2_proxy.target]]
url = "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"
username = "jack"
password = "benimble"
[[jolokia2_proxy.metric]]
name = "hello"
mbean = "hello:foo=bar"
`, server.URL))
var acc testutil.Accumulator
plugin.Gather(&acc)
if username != "sally" {
t.Errorf("Expected to post with username %s, but was %s", "sally", username)
}
if password != "seashore" {
t.Errorf("Expected to post with password %s, but was %s", "seashore", password)
}
if len(requests) == 0 {
t.Fatal("Expected to post a request body, but was empty.")
}
request := requests[0]
if expect := "hello:foo=bar"; request["mbean"] != expect {
t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"])
}
target, ok := request["target"].(map[string]interface{})
if !ok {
t.Fatal("Expected a proxy target, but was empty.")
}
if expect := "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"; target["url"] != expect {
t.Errorf("Expected proxy target url %s, but was %s", expect, target["url"])
}
if expect := "jack"; target["user"] != expect {
t.Errorf("Expected proxy target username %s, but was %s", expect, target["user"])
}
if expect := "benimble"; target["password"] != expect {
t.Errorf("Expected proxy target password %s, but was %s", expect, target["password"])
}
}

View File

@@ -0,0 +1,40 @@
[[inputs.jolokia2_agent]]
urls = ["http://localhost:8080/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "java_runtime"
mbean = "java.lang:type=Runtime"
paths = ["Uptime"]
[[inputs.jolokia2_agent.metric]]
name = "java_memory"
mbean = "java.lang:type=Memory"
paths = ["HeapMemoryUsage", "NonHeapMemoryUsage", "ObjectPendingFinalizationCount"]
[[inputs.jolokia2_agent.metric]]
name = "java_garbage_collector"
mbean = "java.lang:name=G1*,type=GarbageCollector"
paths = ["CollectionTime", "CollectionCount"]
tag_keys = ["name"]
[[inputs.jolokia2_agent.metric]]
name = "java_last_garbage_collection"
mbean = "java.lang:name=G1 Young Generation,type=GarbageCollector"
paths = ["LastGcInfo/duration", "LastGcInfo/GcThreadCount", "LastGcInfo/memoryUsageAfterGc"]
[[inputs.jolokia2_agent.metrics]]
name = "java_threading"
mbean = "java.lang:type=Threading"
paths = ["TotalStartedThreadCount", "ThreadCount", "DaemonThreadCount", "PeakThreadCount"]
[[inputs.jolokia2_agent.metrics]]
name = "java_class_loading"
mbean = "java.lang:type=ClassLoading"
paths = ["LoadedClassCount", "UnloadedClassCount", "TotalLoadedClassCount"]
[[inputs.jolokia2_agent.metrics]]
name = "java_memory_pool"
mbean = "java.lang:name=*,type=MemoryPool"
paths = ["Usage", "PeakUsage", "CollectionUsage"]
tag_keys = ["name"]

View File

@@ -0,0 +1,55 @@
[[inputs.jolokia2_agent]]
name_prefix = "kafka_"
urls = ["http://localhost:8080/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "controller"
mbean = "kafka.controller:name=*,type=*"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "replica_manager"
mbean = "kafka.server:name=*,type=ReplicaManager"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "purgatory"
mbean = "kafka.server:delayedOperation=*,name=*,type=DelayedOperationPurgatory"
field_prefix = "$1."
field_name = "$2"
[[inputs.jolokia2_agent.metric]]
name = "client"
mbean = "kafka.server:client-id=*,type=*"
tag_keys = ["client-id", "type"]
[[inputs.jolokia2_agent.metric]]
name = "request"
mbean = "kafka.network:name=*,request=*,type=RequestMetrics"
field_prefix = "$1."
tag_keys = ["request"]
[[inputs.jolokia2_agent.metric]]
name = "topics"
mbean = "kafka.server:name=*,type=BrokerTopicMetrics"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "topic"
mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics"
field_prefix = "$1."
tag_keys = ["topic"]
[[inputs.jolokia2_agent.metric]]
name = "partition"
mbean = "kafka.log:name=*,partition=*,topic=*,type=Log"
field_name = "$1"
tag_keys = ["topic", "partition"]
[[inputs.jolokia2_agent.metric]]
name = "partition"
mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition"
field_name = "UnderReplicatedPartitions"
tag_keys = ["topic", "partition"]

View File

@@ -0,0 +1,266 @@
package jolokia2
import (
"fmt"
"sort"
"strings"
"github.com/influxdata/telegraf"
)
const defaultFieldName = "value"
type Gatherer struct {
metrics []Metric
requests []ReadRequest
}
func NewGatherer(metrics []Metric) *Gatherer {
return &Gatherer{
metrics: metrics,
requests: makeReadRequests(metrics),
}
}
// Gather adds points to an accumulator from responses returned
// by a Jolokia agent.
func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error {
var tags map[string]string
if client.config.ProxyConfig != nil {
tags = map[string]string{"jolokia_proxy_url": client.URL}
} else {
tags = map[string]string{"jolokia_agent_url": client.URL}
}
requests := makeReadRequests(g.metrics)
responses, err := client.read(requests)
if err != nil {
return err
}
g.gatherResponses(responses, tags, acc)
return nil
}
// gatherReponses adds points to an accumulator from the ReadResponse objects
// returned by a Jolokia agent.
func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) {
series := make(map[string][]point, 0)
for _, metric := range g.metrics {
points, ok := series[metric.Name]
if !ok {
points = make([]point, 0)
}
responsePoints, responseErrors := g.generatePoints(metric, responses)
for _, responsePoint := range responsePoints {
points = append(points, responsePoint)
}
for _, err := range responseErrors {
acc.AddError(err)
}
series[metric.Name] = points
}
for measurement, points := range series {
for _, point := range compactPoints(points) {
acc.AddFields(measurement,
point.Fields, mergeTags(point.Tags, tags))
}
}
}
// generatePoints creates points for the supplied metric from the ReadResponse
// objects returned by the Jolokia client.
func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]point, []error) {
points := make([]point, 0)
errors := make([]error, 0)
for _, response := range responses {
switch response.Status {
case 200:
break
case 404:
continue
default:
errors = append(errors, fmt.Errorf("Unexpected status in response from target %s: %d",
response.RequestTarget, response.Status))
continue
}
if !metricMatchesResponse(metric, response) {
continue
}
pb := newPointBuilder(metric, response.RequestAttributes, response.RequestPath)
for _, point := range pb.Build(metric.Mbean, response.Value) {
if response.RequestTarget != "" {
point.Tags["jolokia_agent_url"] = response.RequestTarget
}
points = append(points, point)
}
}
return points, errors
}
// mergeTags combines two tag sets into a single tag set.
func mergeTags(metricTags, outerTags map[string]string) map[string]string {
tags := make(map[string]string)
for k, v := range outerTags {
tags[k] = v
}
for k, v := range metricTags {
tags[k] = v
}
return tags
}
// metricMatchesResponse returns true when the name, attributes, and path
// of a Metric match the corresponding elements in a ReadResponse object
// returned by a Jolokia agent.
func metricMatchesResponse(metric Metric, response ReadResponse) bool {
if !metric.MatchObjectName(response.RequestMbean) {
return false
}
if len(metric.Paths) == 0 {
return len(response.RequestAttributes) == 0
}
for _, attribute := range response.RequestAttributes {
if metric.MatchAttributeAndPath(attribute, response.RequestPath) {
return true
}
}
return false
}
// compactPoints attepts to remove points by compacting points
// with matching tag sets. When a match is found, the fields from
// one point are moved to another, and the empty point is removed.
func compactPoints(points []point) []point {
compactedPoints := make([]point, 0)
for _, sourcePoint := range points {
keepPoint := true
for _, compactPoint := range compactedPoints {
if !tagSetsMatch(sourcePoint.Tags, compactPoint.Tags) {
continue
}
keepPoint = false
for key, val := range sourcePoint.Fields {
compactPoint.Fields[key] = val
}
}
if keepPoint {
compactedPoints = append(compactedPoints, sourcePoint)
}
}
return compactedPoints
}
// tagSetsMatch returns true if two maps are equivalent.
func tagSetsMatch(a, b map[string]string) bool {
if len(a) != len(b) {
return false
}
for ak, av := range a {
bv, ok := b[ak]
if !ok {
return false
}
if av != bv {
return false
}
}
return true
}
// makeReadRequests creates ReadRequest objects from metrics definitions.
func makeReadRequests(metrics []Metric) []ReadRequest {
var requests []ReadRequest
for _, metric := range metrics {
if len(metric.Paths) == 0 {
requests = append(requests, ReadRequest{
Mbean: metric.Mbean,
Attributes: []string{},
})
} else {
attributes := make(map[string][]string)
for _, path := range metric.Paths {
segments := strings.Split(path, "/")
attribute := segments[0]
if _, ok := attributes[attribute]; !ok {
attributes[attribute] = make([]string, 0)
}
if len(segments) > 1 {
paths := attributes[attribute]
attributes[attribute] = append(paths, strings.Join(segments[1:], "/"))
}
}
rootAttributes := findRequestAttributesWithoutPaths(attributes)
if len(rootAttributes) > 0 {
requests = append(requests, ReadRequest{
Mbean: metric.Mbean,
Attributes: rootAttributes,
})
}
for _, deepAttribute := range findRequestAttributesWithPaths(attributes) {
for _, path := range attributes[deepAttribute] {
requests = append(requests, ReadRequest{
Mbean: metric.Mbean,
Attributes: []string{deepAttribute},
Path: path,
})
}
}
}
}
return requests
}
func findRequestAttributesWithoutPaths(attributes map[string][]string) []string {
results := make([]string, 0)
for attr, paths := range attributes {
if len(paths) == 0 {
results = append(results, attr)
}
}
sort.Strings(results)
return results
}
func findRequestAttributesWithPaths(attributes map[string][]string) []string {
results := make([]string, 0)
for attr, paths := range attributes {
if len(paths) != 0 {
results = append(results, attr)
}
}
sort.Strings(results)
return results
}

Some files were not shown because too many files have changed in this diff Show More