Compare commits

...

708 Commits

Author SHA1 Message Date
Daniel Nelson
1e51969813 Set 1.5.3 release date
(cherry picked from commit 2160779126)
2018-03-14 16:34:56 -07:00
Daniel Nelson
51b097a7c6 Use previous image on appveyor
(cherry picked from commit f1b681cbdc)
2018-03-14 16:31:18 -07:00
Daniel Nelson
77dfb8c9c5 Update changelog
(cherry picked from commit 6e5e2f713d)
2018-03-14 14:25:53 -07:00
Jonas Hahnfeld
1398f8e678 Add output of stderr in case of error to exec log message (#3862)
If the command failed with a non-zero exit status there might be an error
message on stderr. Append the first line to the error message to ease the
search for its cause.

(cherry picked from commit 8e515688eb)
2018-03-14 14:25:53 -07:00
Daniel Nelson
d96483bffb Use Go 1.9.4 in builds 2018-03-09 14:37:53 -08:00
Daniel Nelson
5e534676a0 Update changelog
(cherry picked from commit f7207f514e)
2018-03-08 10:55:02 -08:00
Dennis Schön
9329200afa Fix uptime metric in passenger input plugin (#3871)
(cherry picked from commit f1c8abd68c)
2018-03-08 10:55:02 -08:00
Daniel Nelson
645b8b905d Update changelog
(cherry picked from commit e4ce057885)
2018-03-07 14:17:36 -08:00
dilshatm
ea7d884c09 Fix collation difference in sqlserver input (#3786)
(cherry picked from commit a6d366fb84)
2018-03-07 14:17:33 -08:00
Daniel Nelson
7f94cb58e4 Update changelog
(cherry picked from commit 5928219454)
2018-02-25 01:07:16 -08:00
Daniel Nelson
d8f2d4af0f Disable keepalive in mqtt output. (#3779)
This functionality currently has race conditions that can result in the
output deadlocking.

(cherry picked from commit 8c932abff6)
2018-02-25 01:07:16 -08:00
Daniel Nelson
d8dae1b1ab Fix memory leak in postgresql_extensible 2018-02-20 11:58:43 -08:00
Daniel Nelson
770cf4e0b6 Update changelog
(cherry picked from commit a00d5b48f8)
2018-02-09 12:13:49 -08:00
efficks
8cb5391f4e Fix ping plugin not reporting zero durations (#3778)
(cherry picked from commit f5ea13a9ab)
2018-02-09 12:13:48 -08:00
Daniel Nelson
c5ddb65ad9 Update changelog
(cherry picked from commit 9a1d69a2ae)
2018-02-05 11:23:03 -08:00
Philipp Weber
d671299e96 Remove userinfo from url tag in prometheus input (#3743)
(cherry picked from commit b7a68eef56)
2018-02-05 11:23:02 -08:00
Daniel Nelson
f59231941f Update changelog
(cherry picked from commit 32732d42f8)
2018-01-30 18:09:24 -08:00
Daniel Nelson
100bdfba6c Set path to / if HOST_MOUNT_PREFIX matches full path (#3736)
(cherry picked from commit 10e51e4b49)
2018-01-30 18:08:50 -08:00
Daniel Nelson
67440c95bb Set release date for 1.5.2
(cherry picked from commit 3a85e7b1f0)
2018-01-30 14:02:50 -08:00
Daniel Nelson
39de63d03c Update changelog
(cherry picked from commit 5d87ad85a1)
2018-01-30 14:01:17 -08:00
Daniel Nelson
56edd339e7 Exclude master_replid fields from redis input (#3725)
(cherry picked from commit c28d0e1b16)
2018-01-30 14:01:13 -08:00
Daniel Nelson
df768f83af Update changelog
(cherry picked from commit f9c0aa1e23)
2018-01-25 13:47:39 -08:00
Pierre Tessier
8733d3826a Add timeout to wavefront output write (#3711)
(cherry picked from commit 3e4c91880a)
2018-01-25 13:47:39 -08:00
Daniel Nelson
2bb97154db Update changelog
(cherry picked from commit 899c3a2ae1)
2018-01-22 12:06:30 -08:00
Daniel Nelson
a8d9e458ab Remove graphite serializer replacement of dot with underscore in field key (#3705)
(cherry picked from commit 4558aeddeb)
2018-01-22 12:06:26 -08:00
Daniel Nelson
b464adb08c Update changelog
(cherry picked from commit 36c9113917)
2018-01-22 12:01:31 -08:00
Daniel Nelson
4bd67824ae Avoid loop creation in second processor pass (#3656)
(cherry picked from commit 5270aa451c)
2018-01-22 12:01:24 -08:00
Daniel Nelson
f5894a6a2f Limit wait time for writes in mqtt output (#3699)
(cherry picked from commit 91fc2765b1)
2018-01-22 12:01:20 -08:00
Daniel Nelson
1790b26651 Update changelog
(cherry picked from commit 5bac08662e)
2018-01-18 17:39:22 -08:00
Piotr Popieluch
bb3ee1fd39 Align aggregator period with internal ticker to avoid skipping metrics (#3693)
By the time the aggregator.run() was called about 600ms already passed since setting now which was skewing up the aggregation intervals and skipping metrics.

(cherry picked from commit 601dc99606)
2018-01-18 17:39:17 -08:00
Daniel Nelson
82df5bf2d8 Update changelog
(cherry picked from commit 0f55d9eba2)
2018-01-17 15:28:52 -08:00
Piotr Popieluch
8b566b2b9f Reconnect before sending graphite metrics if disconnected (#3680)
(cherry picked from commit f374a295d9)
2018-01-17 15:28:52 -08:00
Daniel Nelson
059a751a71 Update changelog
(cherry picked from commit ad921a3840)
2018-01-17 14:39:10 -08:00
Michael Boudreau
dcaa0ca8db Fix index out of bounds error in solr input plugin (#3683)
(cherry picked from commit 9d559292a5)
2018-01-17 14:39:05 -08:00
Daniel Nelson
8777e32d9f Update changelog
(cherry picked from commit 6e24056757)
2018-01-16 13:47:23 -08:00
Noah Crowley
667940afac Ignore empty lines in Graphite plaintext (#3684)
(cherry picked from commit 87830a1c38)
2018-01-16 13:46:58 -08:00
Daniel Nelson
0605af7c99 Pin crate docker image for testing
(cherry picked from commit 3cf0ba1ccf)
2018-01-10 14:11:36 -08:00
Daniel Nelson
4e89c17c0f Set 1.5.1 release date 2018-01-10 13:28:28 -08:00
Daniel Nelson
45b7db7de1 Add link to docs for configuring the openldap monitoring backend
(cherry picked from commit 37757b7782)
2018-01-04 15:35:45 -08:00
Daniel Nelson
cc478f035e Update changelog
(cherry picked from commit 315fd1e987)
2018-01-04 15:30:23 -08:00
Daniel Nelson
fe6239cf9f Escape environment variables during config toml parsing (#3637)
(cherry picked from commit b0c2bb870e)
2018-01-04 15:30:17 -08:00
Daniel Nelson
865917f523 Update changelog
(cherry picked from commit 07cb749e04)
2018-01-03 13:45:00 -08:00
kerams
4aa8d72644 Fix deliver_get field in rabbitmq input (#3633)
(cherry picked from commit acea7109d4)
2018-01-03 13:44:55 -08:00
Daniel Nelson
384ef6af6b Update changelog
(cherry picked from commit 81f42e8b17)
2018-01-02 16:36:18 -08:00
Daniel Nelson
07985e6524 Add information about how to set permissions for postfix input (#3594)
(cherry picked from commit a440ed8d8c)
2018-01-02 14:10:00 -08:00
Daniel Nelson
f8597f78f4 Update changelog
(cherry picked from commit 06c21fb9f7)
2017-12-28 16:24:26 -08:00
Daniel Nelson
83faea7a31 Set content-type charset in influxdb output and allow it be overridden (#3593)
(cherry picked from commit 4f7afb8cb5)
2017-12-28 16:24:21 -08:00
Daniel Nelson
223bbf0df7 Update changelog
(cherry picked from commit ef6e5c5a85)
2017-12-28 16:19:34 -08:00
Daniel Nelson
55f35f291d Fix DC/OS login expiration time (#3625)
(cherry picked from commit 005face7c0)
2017-12-28 16:19:25 -08:00
Daniel Nelson
6852231c1b Update changelog
(cherry picked from commit 1011cd0c94)
2017-12-28 16:16:30 -08:00
Daniel Nelson
ce4ca43a5d Fix name error in jolokia2_agent sample config (#3624)
(cherry picked from commit 6c075c4346)
2017-12-28 16:16:30 -08:00
timhallinflux
5d6622eb44 Update README.md
added `network` for a tiny bit more context
2017-12-14 14:58:37 -08:00
Daniel Nelson
a1668bbf9a Set release date for 1.5.0 2017-12-14 10:59:13 -08:00
Daniel Nelson
fe91c779e9 Remove AWS credential check from cloudwatch output (#3583)
This method is reported to not work with IAM Instance Profiles, and we
do not want to make any calls that would require additional permissions.

(cherry picked from commit 5b40173bcb)
2017-12-13 17:52:45 -08:00
Daniel Nelson
425b6f7d63 Update changelog
(cherry picked from commit 15266bb7eb)
2017-12-13 11:18:34 -08:00
Ildar Svetlov
c322ddb4b0 Don't add system input uptime_format as a counter (#3578)
(cherry picked from commit d935dfa9ed)
2017-12-13 11:17:55 -08:00
Daniel Nelson
648d3bde33 Update changelog
(cherry picked from commit ff634c5056)
2017-12-13 10:58:16 -08:00
Daniel Nelson
d8da77cb42 Add idle state to processes test
(cherry picked from commit 14b31a2354)
2017-12-13 10:57:28 -08:00
Ted Zlatanov
fdb04702eb Support I (idle) process state on procfs+Linux (#3530)
(cherry picked from commit 663a5b1f50)
2017-12-13 10:56:47 -08:00
Steve Banik
ecf43f4986 Fixed typo in README.md (#3574)
(cherry picked from commit d7d224d511)
2017-12-12 11:22:33 -08:00
Daniel Nelson
e307e92e86 Update changelog
(cherry picked from commit abcad439eb)
2017-12-11 18:02:35 -08:00
Daniel Nelson
8d4a09c3ea Fix separation of multiple prometheus_client outputs (#3570)
(cherry picked from commit 8484de6c12)
2017-12-11 18:02:30 -08:00
Daniel Nelson
fd964bd4eb Use auto type detection for scanned devices in smart input (#3561)
(cherry picked from commit 93d16a4603)
2017-12-08 18:03:39 -08:00
Daniel Nelson
994e75f1f0 Update changelog
(cherry picked from commit 88746b01c3)
2017-12-08 18:02:17 -08:00
Daniel Nelson
2e2efafbfc Update sarama-cluster to latest release (#3560)
(cherry picked from commit 37095ef47d)
2017-12-08 18:02:17 -08:00
Daniel Nelson
39537ed86e Use device name instead of abs path for devices tag in smart input (#3550)
(cherry picked from commit 574034c301)
2017-12-08 13:26:15 -08:00
Daniel Nelson
558ce25c94 Log connect error only in wavefront output (#3549)
(cherry picked from commit 177e7e2c73)
2017-12-06 14:56:28 -08:00
Daniel Nelson
0438f412a9 Fix formatting in changelog 2017-12-04 13:17:23 -08:00
Daniel Nelson
ca8911fec0 Update example config 2017-12-01 11:49:07 -08:00
Daniel Nelson
2c5a5373f6 Update changelog 2017-12-01 11:42:00 -08:00
Daniel Nelson
cabe10b88a Update changelog 2017-12-01 11:23:18 -08:00
Daniel Nelson
7f66863b87 Fix HOST_MOUNT_PREFIX in docker with disk input (#3529) 2017-12-01 11:21:39 -08:00
Daniel Nelson
e400ec2b57 Update changelog 2017-11-30 18:42:14 -08:00
Daniel Nelson
44320a5421 Add option to amqp output to publish persistent messages (#3528) 2017-11-30 18:40:12 -08:00
Daniel Nelson
a9951710b3 Add time import 2017-11-29 17:05:13 -08:00
Daniel Nelson
6426bca1f8 Update changelog 2017-11-29 16:36:00 -08:00
Nathan Ferch
f92a4f528f Add input plugin for OpenBSD/FreeBSD pf (#3405) 2017-11-29 16:32:50 -08:00
Daniel Nelson
3ba5458220 Update changelog 2017-11-29 12:17:46 -08:00
Bob Shannon
beb9d7560d Add support for glob patterns in net input plugin (#3140) 2017-11-29 12:16:34 -08:00
Daniel Nelson
24d82aebe6 Update changelog 2017-11-29 12:10:56 -08:00
Daniel Nelson
7dc256e845 Update gopsutil version to include netstat fix (#3513) 2017-11-29 12:06:47 -08:00
Daniel Nelson
297897ae0a Add dcos plugin to changelog and readme 2017-11-29 11:54:33 -08:00
Daniel Nelson
414a7e34fb Add input plugin for DC/OS (#3519) 2017-11-29 11:50:32 -08:00
Patrick Hemmer
bf65e19486 Fix postfix plugin age to use ctime, not mtime (#3525) 2017-11-29 11:25:31 -08:00
Daniel Nelson
2c70958c24 Update changelog 2017-11-29 10:52:59 -08:00
Daniel Nelson
d727a6f85c Add slab to mem plugin (#3518) 2017-11-29 10:49:45 -08:00
Daniel Nelson
4e9b19f7a6 Add bond input to readme and update changelog 2017-11-28 15:19:30 -08:00
Ildar Svetlov
132fb50150 Add bond input plugin (#3424) 2017-11-28 15:16:19 -08:00
Daniel Nelson
d1ba75176d Update changelog 2017-11-28 10:10:36 -08:00
Patrick Hemmer
76240b9f18 Add postfix input plugin (#2553) 2017-11-28 10:08:41 -08:00
Daniel Nelson
06e22ee7ac Update changelog 2017-11-27 17:06:50 -08:00
Lukasz Jagiello
a18eedb970 Use deb-systemd-invoke to restart service (#3506)
From man page:
```
deb-systemd-invoke is a Debian-specific helper script which asks
       /usr/sbin/policy-rc.d before performing a systemctl call.

deb-systemd-invoke is intended to be used from maintscripts to start
       systemd unit files. It is specifically NOT intended to be used
       interactively by users. Instead, users should run systemd and use
       systemctl, or not bother about the systemd enabled state in case they
       are not running systemd.
```

This PR replace regular `systemctl` with `deb-systemd-invoke`.
2017-11-27 17:05:32 -08:00
Lukasz Jagiello
6514399baf Add shadow-utils dependency to rpm package (#3505) 2017-11-27 17:02:16 -08:00
Dylan Meissner
27994abcb5 Jolokia2 handles unordered mbean object name properties (#3504) 2017-11-27 13:43:19 -08:00
Daniel Nelson
a9ada5f65b Update changelog 2017-11-27 12:32:36 -08:00
Laurent Gosselin
f758d0c6c3 Fix global variable collection when using interval_slow option in mysql input (#3500) 2017-11-27 12:29:51 -08:00
Daniel Nelson
7442b5645f Update changelog 2017-11-20 16:50:18 -08:00
Daniel Nelson
d5bd426e0c Fix snmp tools output parsing when they contain Windows eols (#3396) 2017-11-20 16:48:30 -08:00
Daniel Nelson
154b263f14 Update changelog 2017-11-20 16:27:18 -08:00
Leandro Piccilli
92ca661662 Add support for tags in the index name in elasticsearch output (#3470) 2017-11-20 16:25:36 -08:00
Daniel Nelson
54b0b9e727 Update changelog 2017-11-20 14:40:45 -08:00
aromeyer
dc2c8791d0 Add opensmtpd input plugin (#3449) 2017-11-20 14:39:13 -08:00
Daniel Nelson
367bbdeb7e Update changelog 2017-11-20 14:37:09 -08:00
aromeyer
e544d742f9 Add unbound input plugin (#3434) 2017-11-20 14:32:06 -08:00
Daniel Nelson
393c4c6c2d Update changelog 2017-11-20 14:23:16 -08:00
Leandro Piccilli
4d1bc620b2 Add index by week number to Elasticsearch output (#3490) 2017-11-20 14:22:29 -08:00
Daniel Nelson
db8e767f1f Update changelog 2017-11-20 14:20:05 -08:00
Chris Goller
afe05fcfef Use hexadecimal ids and lowercase names in zipkin input (#3488) 2017-11-20 14:19:32 -08:00
Daniel Nelson
9422cca2cc Update changelog 2017-11-16 16:51:02 -08:00
erayaslan
a06ee58785 Use MAX() instead of SUM() for latency measurements in sqlserver (#3471) 2017-11-16 16:49:51 -08:00
Daniel Nelson
b13eea89b1 Update changelog and add particle webhook to readme 2017-11-16 16:11:20 -08:00
David G. Simmons
b813e2ecae Add Particle Webhook Plugin (#3477) 2017-11-16 16:03:19 -08:00
Pierre Fersing
8364417009 Whitelist allowed char classes for graphite output (#3473) 2017-11-15 14:44:20 -08:00
Daniel Nelson
136c15ba33 Skip test requiring cratedb server in short test mode 2017-11-13 15:22:57 -08:00
Daniel Nelson
19839c0167 Update changelog 2017-11-13 15:09:05 -08:00
Daniel Nelson
72682973bd Fix typo in error message 2017-11-13 15:07:54 -08:00
faye-sama
a411306fba Fail metrics parsing on unescaped quotes (#3409)
Before this change Fields() method on a metric parsed from a line with
unescaped quotes could panic. This change makes such line unparseable.

Fixes #3326
2017-11-13 15:06:47 -08:00
Patrick Hemmer
cbd346117a Add tests for procstat systemd & cgroup matching (#3469) 2017-11-13 14:45:31 -08:00
Daniel Nelson
181a56018f Update changelog 2017-11-13 11:02:01 -08:00
Patrick Hemmer
6ee6d55751 Add systemd unit pid and cgroup matching to procstat (#3459) 2017-11-13 10:59:27 -08:00
Daniel Nelson
ebd73b7279 Update changelog 2017-11-10 14:39:11 -08:00
Trevor Pounds
6a57395731 Compile with Go 1.9.2 (#3458) 2017-11-10 14:39:00 -08:00
Daniel Nelson
be13f69305 Update changelog 2017-11-09 14:05:36 -08:00
Felix Geisendörfer
62ec3e50d9 Add CrateDB output plugin (#3210) 2017-11-09 14:03:16 -08:00
Daniel Nelson
07297e80a8 Set 1.4.4 release date 2017-11-08 15:21:20 -08:00
Daniel Nelson
f0578b8c83 Update changelog 2017-11-07 16:48:44 -08:00
Lukasz Jagiello
493af043d3 Add Solr input plugin (#2019) 2017-11-07 16:44:09 -08:00
Daniel Nelson
47d013132a Update changelog 2017-11-07 14:37:04 -08:00
Pierre Tessier
dcff769fed Add modification_time field to filestat input plugin (#3305) 2017-11-07 14:32:48 -08:00
Daniel Nelson
5141f8a2a0 Update contributing documentation 2017-11-07 13:59:06 -08:00
Daniel Nelson
bb14589469 Update changelog 2017-11-07 13:59:06 -08:00
Daniel Nelson
b81bea658f Always ignore autofs filesystems in disk input (#3440) 2017-11-07 11:45:09 -08:00
Daniel Nelson
2c2dc97702 Update changelog 2017-11-07 11:43:15 -08:00
Daniel Nelson
cbbdf1043b Use current time if container read time is zero value (#3437) 2017-11-07 11:41:53 -08:00
Daniel Nelson
c55f285de0 Update changelog 2017-11-07 11:36:29 -08:00
Daniel Nelson
e1295c41c8 Update gopsutil to v2.17.10 (#3441) 2017-11-07 11:26:11 -08:00
Daniel Nelson
e0df62c27b Update changelog 2017-11-06 17:42:42 -08:00
Bob Shannon
fdf12ce6b4 Redact datadog API key in log output (#3420) 2017-11-06 17:41:14 -08:00
Daniel Nelson
e5a265c8c7 Revert particle webhook changes on master 2017-11-06 10:47:10 -08:00
David G. Simmons
112955a9f5 Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-04 09:30:17 -04:00
David G. Simmons
da0ca8a870 Revert "Undo Revert "Revert changes since 9b0af4478""
This reverts commit 6e6aefe5da.
2017-11-04 09:19:37 -04:00
David G. Simmons
6e6aefe5da Undo Revert "Revert changes since 9b0af4478"
This reverts commit 2c31345c70.
2017-11-04 09:14:52 -04:00
David G. Simmons
ae2635b547 Readme update 2017-11-04 08:43:13 -04:00
Daniel Nelson
c14478f025 Update http_listener certs 2017-11-03 21:52:45 -07:00
Daniel Nelson
2c31345c70 Revert changes since 9b0af4478 2017-11-03 21:10:56 -07:00
David G. Simmons
4a9fa7ef4b Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-03 13:48:45 -04:00
David G. Simmons
7db06d2aa4 Revert "New Particle Plugin"
This reverts commit ba462f5c94.
2017-11-03 13:28:54 -04:00
David G. Simmons
871fae6eb3 Revert "bug fixes and refactoring"
This reverts commit 86961cc814.
2017-11-03 13:28:35 -04:00
David G. Simmons
8e587e74f5 Revert "Update README.md"
This reverts commit 8ed00af10a.
2017-11-03 13:28:00 -04:00
David G. Simmons
440918a03b Revert "Updated README.md"
This reverts commit a6ada03b91.
2017-11-03 13:27:06 -04:00
David G. Simmons
f64b23b724 Revert "Small fixes"
This reverts commit a987118b01.
2017-11-03 13:27:06 -04:00
David G. Simmons
c11739d143 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:27:06 -04:00
David G. Simmons
883696c224 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:16:09 -04:00
David G. Simmons
0ea0519e89 Merge branch 'master' into dn-particle-plugin 2017-11-03 12:13:49 -04:00
David G. Simmons
4596ae70a9 ignore mac-files 2017-11-03 12:07:03 -04:00
David G. Simmons
92caf33fff Updated Test JSON 2017-11-03 12:07:03 -04:00
David G. Simmons
a987118b01 Small fixes
Hoping to pass CircleCI this time
2017-11-03 12:07:03 -04:00
David G. Simmons
a6ada03b91 Updated README.md 2017-11-03 12:07:03 -04:00
David G. Simmons
8ed00af10a Update README.md 2017-11-03 12:07:03 -04:00
David Norton
86961cc814 bug fixes and refactoring 2017-11-03 12:07:03 -04:00
David G. Simmons
ba462f5c94 New Particle Plugin 2017-11-03 12:07:03 -04:00
David G. Simmons
1d1d5e6089 Updated Test JSON 2017-11-02 17:21:50 -04:00
David G. Simmons
8560c2f88d Fixed Readme 2017-11-02 17:19:37 -04:00
David G. Simmons
5d135cece3 test son update 2017-11-02 14:19:01 -04:00
Daniel Nelson
9b0af4478b Remove incorrect comment about linker options 2017-11-01 18:17:51 -07:00
Daniel Nelson
26ccc1f205 Add teamspeak to readme and update changelog 2017-11-01 13:30:43 -07:00
Patric Kanngießer
76ed70340b Add Teamspeak 3 input plugin (#3315) 2017-11-01 13:27:59 -07:00
Maximilien Richer
5f215c22fe Fix typos in comments (#3415) 2017-10-31 17:00:06 -07:00
Maximilien Richer
63842d48fd Add config to input-varnish README (#3414) 2017-10-31 16:58:45 -07:00
Daniel Nelson
777b84d1dc Clarify what it means to filter metrics from processors 2017-10-30 16:32:39 -07:00
Daniel Nelson
c116af35c7 Update changelog 2017-10-30 15:35:34 -07:00
Daniel Nelson
fcfcc803b1 Use explicit schemas in mqtt_consumer input (#3401) 2017-10-30 15:33:20 -07:00
Daniel Nelson
4d5de8698b Update changelog 2017-10-30 13:53:45 -07:00
Aditya C S
23ad959d71 Add support for SSL settings to ElasticSearch output plugin (#3406) 2017-10-30 13:52:40 -07:00
Aditya C S
d9fa916711 Update docker plugin README (#3404) 2017-10-30 12:26:39 -07:00
Daniel Nelson
53b13a20d0 Update changelog 2017-10-27 11:55:17 -07:00
Maximilien Richer
ffa8a4a716 Add instance name option to varnish plugin (#3398)
This change add a new configuration option to allow probing of
namespaced varnish instances, usually reached using the '-n' switch on
the varnish cli.
2017-10-27 11:53:59 -07:00
Daniel Nelson
8b4708c82a Update changelog 2017-10-26 13:37:54 -07:00
Vladimir S
88ec171293 Perform DNS lookup before ping (#3385) 2017-10-26 13:35:37 -07:00
Daniel Nelson
5885ef2c1c Update changelog 2017-10-25 15:29:56 -07:00
Daniel Nelson
a519abf13f Gather concurrently from snmp agents (#3365) 2017-10-25 15:28:55 -07:00
Daniel Nelson
6ea61b55d9 Set release date for 1.4.3 2017-10-25 14:15:10 -07:00
Daniel Nelson
206397d475 Update changelog 2017-10-24 16:31:22 -07:00
Jeremy Doupe
a6797a44d5 Add history and summary types to telegraf and prometheus plugins (#3337) 2017-10-24 16:28:52 -07:00
Daniel Nelson
13c1f1524a Update changelog 2017-10-24 16:25:49 -07:00
Daniel Nelson
9a062498e7 Use golang.org/x/sys/unix instead of syscall in diskio (#3384) 2017-10-24 16:22:31 -07:00
Daniel Nelson
f64cf89db1 Update changelog 2017-10-24 15:46:47 -07:00
Daniel Nelson
6d1777276c If the connector name cannot be unquoted, use the raw value (#3371) 2017-10-24 15:36:23 -07:00
Daniel Nelson
65580759fc Update changelog 2017-10-23 12:36:31 -07:00
Sergei Smolianinov
d2f9fc7d8c Fix ACL token usage in consul input plugin (#3376) 2017-10-23 12:31:27 -07:00
Daniel Nelson
77cc071796 Update changelog 2017-10-19 17:06:14 -07:00
Daniel Nelson
4deb6238a3 Add support for decimal timestamps to ts-epoch modifier (#3358) 2017-10-19 16:36:32 -07:00
Daniel Nelson
7088d98304 Update changelog 2017-10-19 16:27:29 -07:00
Daniel Nelson
4243403432 Remove warning when JSON contains null value (#3359) 2017-10-19 16:25:58 -07:00
Mamat Rahmat
3bbc2beeed Fix small typo in documentation (#3364) 2017-10-19 14:47:40 -07:00
Daniel Nelson
0e6a70b199 Update changelog 2017-10-18 17:43:01 -07:00
Daniel Nelson
ec4efe5b03 Use labels in prometheus output for string fields (#3350) 2017-10-18 17:42:30 -07:00
Daniel Nelson
adb1f5588c Update changelog 2017-10-18 14:53:34 -07:00
Daniel Nelson
6e5915c59f Fix prometheus passthrough for existing value types (#3351) 2017-10-18 14:51:08 -07:00
Daniel Nelson
9b59cdd10e Update changelog 2017-10-18 13:57:58 -07:00
clheikes
02baa696c3 Fix TELEGRAF_OPTS expansion in systemd service unit (#3354) 2017-10-18 13:57:32 -07:00
Daniel Nelson
a4fa19252f Update changelog 2017-10-18 12:47:58 -07:00
Daniel Nelson
7ba376964c Update changelog 2017-10-18 12:25:46 -07:00
Ayrdrie
a75ab3e190 Fix mongodb input panic when restarting mongodb (#3355) 2017-10-18 12:24:30 -07:00
Daniel Nelson
2208657d73 Add release date info to FAQ 2017-10-17 10:43:53 -07:00
Daniel Nelson
9d8e935734 Update changelog 2017-10-16 14:26:12 -07:00
Pierre Fersing
f5a9d1bc75 Fix CPU system plugin gets stuck after system suspend (#3342) 2017-10-16 14:25:00 -07:00
Daniel Nelson
4b05edea53 Update changelog 2017-10-16 14:19:16 -07:00
Craig Wickesser
246ffab3e0 Add UDP IPv6 support to statsd input (#3344) 2017-10-16 14:18:36 -07:00
Daniel Nelson
3ea41e885c Update changelog 2017-10-16 11:27:00 -07:00
Daniel Nelson
1f348037b7 Fix case sensitivity issue in sqlserver query (#3336) 2017-10-16 11:26:16 -07:00
Daniel Nelson
86f19dee2b Fix typo in ipmi_sensor readme 2017-10-16 11:10:06 -07:00
Daniel Nelson
a1796989f7 Add ipmi_sensor permission documentation 2017-10-13 13:53:18 -07:00
Daniel Nelson
6b67fedfdc Remove timing sensitive riemann test 2017-10-13 11:30:30 -07:00
Daniel Nelson
5cd3327d5f Update changelog 2017-10-13 11:12:27 -07:00
Adam Johnson
bf9f94eb9d Fix cloudwatch output requires unneeded permissions (#3335) 2017-10-13 11:04:40 -07:00
Daniel Nelson
0f9f757da7 Update changelog 2017-10-12 17:26:58 -07:00
Windkit Li
2f8d0f4d47 Fix snmpwalk address format in leofs input (#3328) 2017-10-12 17:26:14 -07:00
Daniel Nelson
024dea2ff9 Update changelog 2017-10-12 15:52:01 -07:00
Daniel Nelson
fa25e123d8 Fix container name filters in docker input (#3331) 2017-10-12 15:50:09 -07:00
Patrick Hemmer
bed14e5037 Fix documented equation for diskio average queue depth (#3334) 2017-10-12 15:08:51 -07:00
Daniel Nelson
c74c29b164 Remove suggested plugins from readme.
These are confusing since we don't support all of the examples.
2017-10-11 12:56:33 -07:00
Daniel Nelson
4e0c8e6026 Set 1.4.2 release date 2017-10-10 13:29:31 -07:00
Daniel Nelson
d7ea83f39b Update readme and changelog for basicstats aggregator 2017-10-10 12:04:41 -07:00
Toni Moreno
b641f06552 Add new basicstats aggregator (#2167) 2017-10-10 12:02:01 -07:00
Pierre Tessier
c7a6d4eaa4 Fix link for wavefront plugin in changelog (#3317) 2017-10-10 11:21:46 -07:00
Daniel Nelson
61b0336d97 Use 5 second timeout overhead when waiting for ping to complete 2017-10-09 15:09:07 -07:00
Daniel Nelson
761544f56d Add HasPoint method to testutil.Accumulator 2017-10-09 15:02:57 -07:00
Daniel Nelson
0f452ad0df Document /etc/default/telegraf file 2017-10-06 16:57:57 -07:00
Daniel Nelson
4093bc98b7 Update changelog 2017-10-06 16:17:09 -07:00
Christian Meilke
75567d5b51 Add ability to limit node stats in elasticsearch input (#3304) 2017-10-06 16:16:32 -07:00
Daniel Nelson
59bb31e765 Use golang 1.9.1 2017-10-05 16:19:53 -07:00
Daniel Nelson
13c7802b84 Update changelog 2017-10-05 16:15:43 -07:00
Daniel Nelson
cce40c515a Use chunked transfer encoding in InfluxDB output (#3307) 2017-10-05 16:14:21 -07:00
Daniel Nelson
6e1fa559a3 Update changelog 2017-10-05 16:05:51 -07:00
Daniel Nelson
f56dda0ac8 Fix panic in cpu input if number of cpus changes (#3306) 2017-10-05 16:02:21 -07:00
Daniel Nelson
4fab572b6b Release buffer back to pool earlier 2017-10-05 12:12:14 -07:00
Daniel Nelson
b9f319529f Update changelog 2017-10-04 15:30:11 -07:00
Christian Meilke
0bb32570ba Add cluster health level configuration to elasticsearch input (#3269) 2017-10-04 15:29:32 -07:00
Daniel Nelson
a4ea4c7a25 Add smart to changelog and readme 2017-10-04 15:18:15 -07:00
Rickard von Essen
e69c3f9d1c Add smart input plugin for collecting S.M.A.R.T. data (#2449) 2017-10-04 15:15:58 -07:00
Daniel Nelson
002ccf3295 Update changelog 2017-10-03 15:25:19 -07:00
Daniel Nelson
a163effa6d Add support for proxy environment variables to http_response (#3302) 2017-10-03 15:22:57 -07:00
Daniel Nelson
93ff811358 Update changelog 2017-10-03 14:37:02 -07:00
Aditya C S
dd4299e925 Collect Docker Swarm service metrics in docker input plugin (#3141) 2017-10-03 14:36:26 -07:00
Daniel Nelson
b610276485 Skip invalid urls in nginx input 2017-10-03 10:54:31 -07:00
David Norton
6aee40fac1 bug fixes and refactoring 2017-10-03 09:07:15 -04:00
Pierre Tessier
79f66dc5b3 Added newline to each metric line in wavefront output (#3290) 2017-10-02 17:42:21 -07:00
Daniel Nelson
0a55ab42b4 Update changelog 2017-10-02 17:39:32 -07:00
Jimena Cabrera Notari
aba269e94c Add extra wired tiger cache metrics to mongodb input (#3281) 2017-10-02 17:38:51 -07:00
Daniel Nelson
f67350107d Update changelog 2017-10-02 17:16:38 -07:00
Daniel Nelson
8e3ed96d6f Fix case sensitivity error in sqlserver input (#3287) 2017-10-02 17:15:34 -07:00
Daniel Nelson
771fbc311a Regenerate TLS certs due to expiration 2017-10-02 15:44:55 -07:00
David G. Simmons
d7b88b10ad New Particle Plugin 2017-10-02 16:50:23 -04:00
Daniel Nelson
cdca81c999 Fix mqtt_consumer connection_timeout test 2017-10-02 12:28:31 -07:00
Daniel Nelson
ed6f438c9d Add Wavefront output to changelog and readme 2017-09-29 16:15:48 -07:00
Pierre Tessier
366f3f560c Add Wavefront output plugin (#3160) 2017-09-29 16:13:08 -07:00
Daniel Nelson
e4f5547d37 Update example config 2017-09-29 16:09:31 -07:00
Daniel Nelson
e1bf655ef9 Add deprecation notice to jolokia sample config 2017-09-29 16:08:31 -07:00
Daniel Nelson
29b6f4168c Update changelog 2017-09-29 15:59:56 -07:00
Daniel Nelson
3d62e045af Fix format of connection_timeout in mqtt_consumer (#3286) 2017-09-29 15:58:38 -07:00
Daniel Nelson
ad4a5aa7a0 Document how to exclude kubernetes annotation 2017-09-29 14:07:19 -07:00
Daniel Nelson
f2cb1da7cf Update changelog 2017-09-29 11:50:15 -07:00
François de Metz
c3d15f0aff Add support for the rollbar occurrence webhook event. (#1692) 2017-09-29 11:49:22 -07:00
David G. Simmons
b2453e3ec3 Revert "New Particle.io Plugin for Telegraf"
This reverts commit c3b11f9cfb.
Accidentally pushed to master, instead of my fork. Backing it out.
2017-09-29 12:57:13 -04:00
David G. Simmons
c3b11f9cfb New Particle.io Plugin for Telegraf
Only the tests need to be fixed.
2017-09-29 12:45:06 -04:00
Daniel Nelson
cd1791494a Update changelog 2017-09-27 11:38:43 -07:00
Daniel Nelson
402460f038 Use underscore as default opentsdb seperator
Preserves backwards compatibility
2017-09-27 11:36:41 -07:00
owlet123
f85db90780 Add configurable separator for metrics and fields in opentsdb output (#3106) 2017-09-27 11:29:40 -07:00
Daniel Nelson
9bddd50a64 Add deprecation notice to jolokia plugin 2017-09-27 10:52:10 -07:00
Daniel Nelson
b8a0b8461a Update changelog and readme for jolokia2 plugin 2017-09-26 17:42:38 -07:00
Dylan Meissner
ee26191eb5 Add redesigned Jolokia input plugin (#2278) 2017-09-26 17:34:46 -07:00
Daniel Nelson
cadafa6405 Update changelog 2017-09-26 16:03:04 -07:00
Daniel Nelson
22a9ffbb9d Allow JSON data format to contain zero metrics (#3268) 2017-09-26 15:58:33 -07:00
Daniel Nelson
2e1457a496 Update changelog 2017-09-26 15:38:22 -07:00
Daniel Nelson
8614445235 Fix parsing of JSON with a UTF8 BOM in httpjson (#3267) 2017-09-26 15:36:00 -07:00
Daniel Nelson
f23d1eb078 Update changelog 2017-09-26 15:28:07 -07:00
Daniel Nelson
ef5c12bd86 Fix dmcache tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
c013cc1497 Fix cgroup tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
bb665cf013 Fix ceph tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
5dff5932fd Fix nginx_plus tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
f823fc73f6 Allow 64bit integers in kernel_vmstat 2017-09-26 15:25:57 -07:00
Daniel Nelson
fd702e6bb8 Set 1.4.1 release date in changelog 2017-09-26 14:19:02 -07:00
Daniel Nelson
50024c1860 Update changelog 2017-09-25 16:34:04 -07:00
Lukasz Jagiello
a4b8805f7f Add support for NSQLookupd to nsq_consumer (#3215) 2017-09-25 16:33:05 -07:00
James
837e6b1a32 Add additional numeric type handling tests for postgresql_extensible (#3066) 2017-09-25 10:58:10 -07:00
Agniva De Sarker
063f3f68df Improve statsd plugin perf by using a byte buffer pool (#3254) 2017-09-25 10:55:02 -07:00
Daniel Nelson
b24663b0bd Remove nightly versioning scheme 2017-09-22 18:07:08 -07:00
Daniel Nelson
366bda45c3 Remove out of date Vagrantfile 2017-09-22 17:35:58 -07:00
Daniel Nelson
c010fb1c3c Fix build versioning; add dev.docker file 2017-09-22 17:35:58 -07:00
Daniel Nelson
08c197f73a Fix golang version 2017-09-22 17:35:58 -07:00
Daniel Nelson
cafb22d145 Fix unittest for golang 1.9 2017-09-22 17:35:58 -07:00
Christian Meilke
73df179bd6 Tag original URL for k8s services in prometheus input (#3257) 2017-09-22 17:26:19 -07:00
Daniel Nelson
c3bea59f3b Update changelog 2017-09-22 11:46:47 -07:00
Daniel Nelson
52393582d2 Unlock Statsd when stopping to prevent deadlock (#3258) 2017-09-22 11:45:45 -07:00
Daniel Nelson
ce29ca78e3 Add nginx_plus to changelog and readme 2017-09-19 11:49:55 -07:00
Patrick O'Brien
6e6ed075dc Add new nginx_plus input plugin (#3214) 2017-09-19 11:46:01 -07:00
Daniel Nelson
c0a4bd99a1 Update changelog 2017-09-19 11:27:57 -07:00
Paulo Cabido
decb09e760 Add configurable metrics endpoint to prometheus output (#3245) 2017-09-19 11:27:11 -07:00
Daniel Nelson
a63f80e017 Build with go 1.9 on circleci 2017-09-18 16:30:09 -07:00
Daniel Nelson
daee48c861 Update prometheus input documentation 2017-09-18 16:21:45 -07:00
Daniel Nelson
dea8bf7ac0 Update changelog 2017-09-18 15:07:18 -07:00
Christian Meilke
292c5229bf Add support for k8s service DNS discovery to prometheus input (#3236) 2017-09-18 15:06:11 -07:00
Daniel Nelson
0048bf2120 Update changelog 2017-09-18 14:25:17 -07:00
Daniel Nelson
b8e134cd37 Fix arm64 packages contain 32-bit executable (#3246) 2017-09-18 14:22:54 -07:00
Patrick Hemmer
0339dc7faf Add process resource limits to procstat input (#3231) 2017-09-15 11:16:44 -07:00
Daniel Nelson
575a07c985 Update input plugin example readme. 2017-09-14 15:50:55 -07:00
Daniel Nelson
b94cda6b46 Update changelog 2017-09-14 15:28:47 -07:00
Trevor Pounds
73372872c2 Fix panic in statsd p100 calculation (#3230) 2017-09-14 15:27:42 -07:00
Daniel Nelson
103ae3b710 Update changelog 2017-09-14 15:22:46 -07:00
Trevor Pounds
171332c579 Add support for timing sums in statsd input (#3234) 2017-09-14 15:21:54 -07:00
Daniel Nelson
875ab3c4b7 Update changelog 2017-09-14 15:05:03 -07:00
Mark Wilkinson - m82labs
1c5ebd4be3 Fix duplicate keys in perf counters sqlserver query (#3175) 2017-09-14 15:04:13 -07:00
Daniel Nelson
103d24bfba Update changelog 2017-09-14 15:00:55 -07:00
Daniel Nelson
d5f48e3e96 Fix skipped line with empty target in iptables (#3235) 2017-09-14 14:59:28 -07:00
Daniel Nelson
7a41d2c586 Update changelog 2017-09-14 13:06:58 -07:00
Trevor Pounds
fa1982323a Fix counter and gauge metric types. (#3232) 2017-09-14 13:05:37 -07:00
Daniel Nelson
cdf63c5776 Update changelog 2017-09-13 17:31:39 -07:00
Daniel Nelson
0a8c2e0b3b Whitelist allowed char classes for opentsdb output. (#3227) 2017-09-13 17:30:52 -07:00
Daniel Nelson
9197a59cdb Update changelog 2017-09-13 17:28:33 -07:00
Dimitris Rozakis
9c8f4afa37 Respect path prefix in influx output uri (#3224) 2017-09-13 17:27:01 -07:00
Daniel Nelson
eebee9759f Fix fluentd test 2017-09-12 17:57:55 -07:00
Daniel Nelson
ee85f9275e Update changelog 2017-09-12 17:27:50 -07:00
Daniel Nelson
4e53464fe2 Remove unneeded error check 2017-09-12 17:24:57 -07:00
Adrián López
2163981872 Add timeout option for kubernetes (#3211) 2017-09-12 17:22:15 -07:00
Daniel Nelson
c5cfde667a Update changelog 2017-09-12 17:17:41 -07:00
Daniel Nelson
8a68e7424c Fix optional field types in fluentd input 2017-09-12 17:15:19 -07:00
Daniel Nelson
cc63b3b667 Update changelog 2017-09-11 12:27:39 -07:00
DanKans
5488f4b3ac Fix MQTT input exits if Broker is not available on startup (#3202) 2017-09-11 12:24:51 -07:00
Daniel Nelson
14a4b108b4 Update changelog 2017-09-11 11:57:18 -07:00
Daniel Nelson
32f313a6a6 Add polling method to logparser and tail inputs (#3213) 2017-09-11 11:56:04 -07:00
Daniel Nelson
c720200883 Update changelog 2017-09-11 11:54:18 -07:00
DanKans
f62e543003 Fix address already in use with webhooks input during reload (#3206) 2017-09-11 11:51:45 -07:00
Daniel Nelson
be83c8c8f0 Update changelog 2017-09-08 16:02:15 -07:00
Jeff Nickoloff
c809debfd4 TLS and MTLS enhancements to HTTPListener input plugin (#3191) 2017-09-08 16:01:16 -07:00
Daniel Nelson
247c2e71fd Update changelog 2017-09-08 15:36:26 -07:00
Daniel Nelson
7b08f9d099 Add support for standard proxy env vars in outputs. (#3212) 2017-09-08 15:35:20 -07:00
Daniel Nelson
d0b690f040 Fix short tests on darwin (#3099) 2017-09-08 13:03:37 -07:00
Daniel Nelson
98ca22597d Update changelog 2017-09-06 14:29:03 -07:00
Raúl Benencia
99dfc69fbb Include mount mode option in disk metrics (#3027) 2017-09-06 14:28:11 -07:00
Daniel Nelson
144862354a Update changelog 2017-09-06 14:20:38 -07:00
Daniel Nelson
402a0f16e1 Fix typo 2017-09-06 14:19:42 -07:00
Pavel Gurkov
5d4eec606f Add Kafka output plugin topic_suffix option (#3196) 2017-09-06 14:18:26 -07:00
Daniel Nelson
ab1c11b06d Add 1.4.0 release date 2017-09-05 17:14:11 -07:00
Daniel Nelson
864ea1efaf Improve question title in FAQ 2017-09-05 17:12:36 -07:00
Daniel Nelson
4fb1c3a2bc Add FAQ doc with dns resolver information 2017-09-05 13:12:11 -07:00
Daniel Nelson
9796d3c99d Use ip address for default InfluxDB ip in config
Helps with initial setup if localhost cannot be resolved due to the pure
go resolver.
2017-09-05 12:55:21 -07:00
Daniel Nelson
98e784faf3 Sort metrics before comparing in graphite test 2017-09-05 12:50:30 -07:00
rdxmb
16d6011ca1 Fix docker image name in docs (#3193) 2017-09-05 11:44:51 -07:00
Daniel Nelson
f43af72785 Update changelog 2017-08-31 13:43:47 -07:00
Daniel Nelson
28d16188b3 Fix panic when handling string fields with escapes (#3188) 2017-08-30 21:16:37 -07:00
Daniel Nelson
19f3264073 Update changelog 2017-08-29 16:27:02 -07:00
Daniel Nelson
8225bd0173 Convert bool fields to int in graphite serializer 2017-08-29 16:22:03 -07:00
Seua Polyakov
3806424aab Skip non-numerical values in graphite format (#3179) 2017-08-29 15:59:38 -07:00
Daniel Nelson
ef8876b70b Move changelog item to 1.4 2017-08-28 17:17:03 -07:00
Daniel Nelson
5fd8ab36d3 Update changelog 2017-08-28 17:08:44 -07:00
Jeff Nickoloff
ac1fa05672 Added CloudWatch metric constraint validation (#3183) 2017-08-28 16:56:03 -07:00
Daniel Nelson
73d57c8a02 Update changelog 2017-08-28 16:30:51 -07:00
Nevins
95fe0e43f5 Add support for sharding based on metric name (#3170) 2017-08-28 16:24:38 -07:00
Daniel Nelson
02f7b0d030 Update changelog 2017-08-28 16:11:00 -07:00
Dylan Meissner
a9a40cbf87 HTTP headers can be added to InfluxDB output (#3182) 2017-08-28 16:08:50 -07:00
Daniel Nelson
a98496591a Update changelog 2017-08-25 18:08:33 -07:00
Ashton Kinslow
0a6541dfa8 Fix NSQ input plugin when used with version 1.0.0-compat 2017-08-25 18:06:48 -07:00
Daniel Nelson
8ecc58639a Close response bodies in http_listener test 2017-08-25 13:58:45 -07:00
Daniel Nelson
6abecd0ac7 Update changelog 2017-08-25 12:59:19 -07:00
Rickard von Essen
0502b65316 Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149) 2017-08-25 12:57:35 -07:00
Daniel Nelson
e400fcf5da Update changelog 2017-08-25 11:55:59 -07:00
Jan Willem Janssen
d449833de9 Fix parsing of SHM remotes in ntpq input (#3163) 2017-08-25 11:54:06 -07:00
Daniel Nelson
58751fa4df Update fail2ban documentation 2017-08-25 11:42:07 -07:00
Daniel Nelson
656ce31d98 Fix amqp_consumer data_format documentation
closes #3164
2017-08-24 13:17:29 -07:00
Daniel Nelson
485e273187 Add links to nightly builds 2017-08-23 15:42:25 -07:00
Daniel Nelson
f95c239a3f Update changelog 2017-08-23 15:21:48 -07:00
Daniel Nelson
ae24a0754b Escape backslash within string fields (#3161) 2017-08-23 15:17:26 -07:00
Daniel Nelson
f253623231 Update changelog 2017-08-23 15:16:04 -07:00
Rickard von Essen
f0db4fd901 Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.
2017-08-23 15:14:32 -07:00
Daniel Nelson
8c68bd9ddb Update changelog 2017-08-22 17:03:00 -07:00
Daniel Nelson
9fc7220c2e Don't start Telegraf on install in Amazon Linux (#3156) 2017-08-22 17:01:59 -07:00
Daniel Nelson
6597b55477 Update changelog 2017-08-22 16:55:15 -07:00
Daniel Nelson
1f4a997164 Don't retry points beyond retention policy (#3155) 2017-08-22 16:52:26 -07:00
Daniel Nelson
5224b526f4 Hide output of git describe 2017-08-22 13:32:52 -07:00
Rickard von Essen
371638ce56 Enable fail2ban on all platforms (#3151) 2017-08-22 12:58:00 -07:00
Rickard von Essen
53c5d3a290 Enable chrony for all platforms (#3152) 2017-08-22 11:49:51 -07:00
Daniel Nelson
b480022330 Update config directory documentation 2017-08-22 11:33:26 -07:00
Daniel Nelson
ccf17a9f93 Cache intermediate objects during build 2017-08-21 17:26:55 -07:00
Chris Goller
13a6b917c3 Add JSON input support to zipkin plugin (#3150) 2017-08-21 17:24:54 -07:00
Daniel Nelson
1f1e9cc49f Add win_services to the readme 2017-08-18 17:57:30 -07:00
Daniel Nelson
70c2b83f00 Update histogram aggregator documentation (#3133) 2017-08-18 13:24:05 -07:00
Daniel Nelson
4de264ffc8 Remove version test 2017-08-18 11:08:48 -07:00
Daniel Nelson
36c2c88fd2 Update example config 2017-08-17 18:54:06 -07:00
Daniel Nelson
e31d91f0f9 Add queues to rabbitmq documentation (#3135) 2017-08-17 18:52:27 -07:00
Daniel Nelson
3006ccbf2f Update master for 1.5 development 2017-08-16 16:54:15 -07:00
Daniel Nelson
8b588ea37f Update sample config 2017-08-16 16:46:40 -07:00
Daniel Nelson
7608251633 Add tomcat input to changelog and readme 2017-08-16 15:36:56 -07:00
Daniel Nelson
1e9d7cd6e9 Add error status handle to tomcat input 2017-08-16 15:33:47 -07:00
mlindes
a91457e001 Add tomcat input plugin (#3112) 2017-08-16 15:33:20 -07:00
Daniel Nelson
fd3a9bf46a Update changelog 2017-08-16 12:26:00 -07:00
Daniel Nelson
ca394fcfb2 Discard logging from tail library (#3128) 2017-08-16 12:06:07 -07:00
Daniel Nelson
3819607511 Allow using system plugin in Windows (#3127) 2017-08-16 12:05:46 -07:00
Daniel Nelson
eb0215c382 Remove log message on ping timeout (#3126) 2017-08-16 11:59:41 -07:00
Daniel Nelson
09153c815c Move http_response headers to end of configuration.
If the subtable comes before other options, they will be placed in the
subtable.
2017-08-15 11:50:08 -07:00
Daniel Nelson
9bc13f143e Test for nil metric before reading tags in logparser 2017-08-15 11:43:16 -07:00
Daniel Nelson
032348c7a5 Update changelog 2017-08-14 14:51:28 -07:00
Bob Shannon
5fbdd09aaf Add gzip content-encoding support to influxdb output (#2978) 2017-08-14 14:50:15 -07:00
Daniel Nelson
7d5dae5a08 Improve apache input docs (#3120) 2017-08-11 17:50:51 -07:00
Daniel Nelson
54be037911 Use double hyphen in cli examples 2017-08-11 16:26:54 -07:00
Daniel Nelson
5003809e97 Merge LDFLAGS from env into build 2017-08-11 16:26:54 -07:00
G-Research
1b50f14d55 Build NTPQ input on Windows (#3117) 2017-08-11 13:36:25 -07:00
Patrick Hemmer
b0109b3550 Add weighted_io_time to diskio input (#3119) 2017-08-11 11:49:42 -07:00
Daniel Nelson
257b460f61 Update changelog 2017-08-10 12:41:09 -07:00
Daniel Nelson
287a44de5e Skip compilcation of logparser and tail on solaris (#3113)
Allows compilation for solaris
2017-08-10 12:36:11 -07:00
Daniel Nelson
73897d1f1c Update changelog 2017-08-10 10:22:11 -07:00
Daniel Nelson
1e2d594af0 Converge to typed value in prometheus output (#3104) 2017-08-10 10:19:28 -07:00
Daniel Nelson
83c003e594 Update changelog 2017-08-09 11:48:36 -07:00
Daniel Nelson
84ce9629a8 Tweak formatting of varnish README 2017-08-09 11:48:12 -07:00
Daniel Nelson
3c14b46f6f Fix ordering of all target 2017-08-09 11:47:55 -07:00
Benjamin Stromski
8a2373e8c8 Add option to run varnish under sudo (#3097) 2017-08-09 11:38:54 -07:00
Daniel Nelson
cb04fa1e9c Add diskio %util sample query 2017-08-09 11:28:27 -07:00
Seva Poliakov
92af42a847 Remove tag_env duplicate from docker README (#3109) 2017-08-09 10:21:22 -07:00
Daniel Nelson
bceb020d72 Update changelog and readme 2017-08-08 11:50:16 -07:00
Rodolphe Blancho
d9deb266df Add salesforce input plugin (#3075) 2017-08-08 11:48:01 -07:00
Slawomir Skowron
f3435f1c59 Add TCP listener for statsd input (#2293) 2017-08-08 11:41:26 -07:00
Daniel Nelson
f9573ad969 Remove Godeps_windows from build.py 2017-08-07 17:43:06 -07:00
Daniel Nelson
40aacd9046 Fix artifact redirection 2017-08-07 17:41:52 -07:00
Daniel Nelson
5e73f3e816 Only upload nightly if on master branch 2017-08-07 17:24:35 -07:00
Daniel Nelson
a1e7a5f474 Upload as nightly builds if PACKAGE set 2017-08-07 17:16:34 -07:00
Daniel Nelson
828c5817f9 Update changelog 2017-08-07 16:18:01 -07:00
Daniel Nelson
3e27134872 Add path tag to logparser containing path of logfile (#3098) 2017-08-07 16:16:31 -07:00
Daniel Nelson
1fb5373962 Build releases with -w -s ldflags 2017-08-07 15:47:20 -07:00
Daniel Nelson
75e6ebcf93 Update changelog 2017-08-07 14:39:22 -07:00
Vlasta Hajek
e21f2de8b8 Add Windows Services input plugin (#3023) 2017-08-07 14:36:15 -07:00
Daniel Nelson
795f02ab88 Cleanup Makefile (#3089) 2017-08-03 11:54:05 -07:00
Daniel Nelson
360d03e301 Update changelog and readme 2017-08-02 18:02:41 -07:00
Daniel Nelson
137b312fa9 Add Zipkin input plugin (#3080) 2017-08-02 17:58:26 -07:00
Daniel Nelson
ce12913bc2 Update precision documentation and examples
Precision is no longer used by the InfluxDB output.

closes #3079
2017-08-01 15:02:36 -07:00
Daniel Nelson
d82c5062b8 Add Appveyor continuous integration (#3074) 2017-07-31 16:12:09 -07:00
Daniel Nelson
6666e6a5a7 Update changelog 2017-07-31 11:37:32 -07:00
Vladislav Mugultyanov
9c0aadf445 Add histogram aggregator plugin (#2387) 2017-07-31 11:33:51 -07:00
Daniel Nelson
3bd14ed229 Update changelog 2017-07-31 11:30:27 -07:00
DanKans
5e95367f6c Sanitize password from couchbase metric (#3033) 2017-07-31 11:29:14 -07:00
Jeff Ashton
c31e7d0b91 Fix win_perf_counters tests (#3068) 2017-07-31 11:03:26 -07:00
Oscar Sironi
f8c84302a4 Add config file path troubleshooting advice for Windows (#3071) 2017-07-31 10:58:12 -07:00
Daniel Nelson
9143670d6e Update changelog 2017-07-27 17:19:33 -07:00
Daniel Nelson
f0bd69d904 Add tls options to docker input (#3063) 2017-07-27 17:18:44 -07:00
Daniel Nelson
7179290dea Update changelog 2017-07-27 15:21:52 -07:00
Daniel Nelson
c4297f40ad Allow iptable entries with trailing text (#3060) 2017-07-27 15:21:06 -07:00
Daniel Nelson
0d4c954e01 Update changelog 2017-07-27 15:15:11 -07:00
Daniel Nelson
d6cf9f4f30 Fix docker memory and cpu reporting in Windows (#3043) 2017-07-27 15:12:29 -07:00
Daniel Nelson
5f88be022c Add circleci parameter to build packages 2017-07-26 17:13:50 -07:00
Daniel Nelson
284ab79a37 Set 1.3.5 release date 2017-07-26 15:53:49 -07:00
Daniel Nelson
2bd6c80506 Update changelog 2017-07-25 17:12:45 -07:00
Daniel Nelson
0ca936a12e Default to localhost if zookeeper has no servers set (#3056) 2017-07-25 17:08:32 -07:00
Daniel Nelson
a26fc52181 Fix panic in logparser if file cannot be opened (#3055) 2017-07-25 17:08:03 -07:00
Daniel Nelson
83f575fcea Add redis_version field to redis input (#3054) 2017-07-25 17:07:43 -07:00
Daniel Nelson
ffd1f25b75 Update changelog 2017-07-25 16:09:48 -07:00
Daniel Nelson
1658404cea Update changelog 2017-07-25 15:43:13 -07:00
Daniel Nelson
82ea04f188 Fix prometheus output cannot be reloaded (#3053) 2017-07-25 15:41:18 -07:00
xin053
273d0b85b0 Correct spelling of toml field in mysql input (#3051) 2017-07-25 10:57:27 -07:00
Théophile Helleboid - chtitux
f3917ec5ff Fix typo in postgresql_extensible/README.md (#3052) 2017-07-25 10:39:14 -07:00
Daniel Nelson
428455e032 Update changelog 2017-07-24 18:26:29 -07:00
Daniel Nelson
573bd4aa32 Start first aggregator period at startup time (#3050)
Fixes issue where metrics collected immediately after startup would not
be aggregated.
2017-07-24 18:25:05 -07:00
Oskar
ab5205f8c3 Fix go vet under windows (#3046) 2017-07-24 12:36:33 -07:00
Daniel Nelson
85aa212467 Update changelog 2017-07-21 16:57:28 -07:00
Daniel Nelson
840d19db35 Add network option to dns_query (#3042) 2017-07-21 16:56:08 -07:00
Daniel Nelson
1c267e9b16 Update changelog 2017-07-21 15:46:22 -07:00
Andy Cobaugh
1ff6e92193 Add input plugin for OpenLDAP (#2612) 2017-07-21 15:44:20 -07:00
Daniel Nelson
c82c0e596b Update changelog 2017-07-21 14:31:25 -07:00
Daniel Nelson
31ce98fa91 Don't match pattern on any error (#3040)
This prevents a pattern with no wildcards from matching in case
permissions is denied.
2017-07-21 14:28:14 -07:00
Daniel Nelson
4d66db1603 Update changelog 2017-07-21 14:26:39 -07:00
Yann Cézard
681d20083a Only report cpu usage for online cpus in docker input (#3035) 2017-07-21 14:25:17 -07:00
Daniel Nelson
4ee74ff54b Document GNU make requirement 2017-07-21 11:15:00 -07:00
Daniel Nelson
16073e4172 Update changelog 2017-07-21 10:57:39 -07:00
Daniel Nelson
3c204d409d Line wrap documentation 2017-07-21 10:57:12 -07:00
DanKans
d903a9142d Fix filtering when both pass and drop match an item (#3036)
Adjust logic in functions responsible for passing metrics in order to be able
to process them correctly in case where pass and drop are defined together.
2017-07-21 10:53:57 -07:00
Daniel Nelson
a2d4453269 Update changelog 2017-07-19 13:09:49 -07:00
DanKans
34c042c7dc Fix combined tagdrop/tagpass filtering (#3031) 2017-07-19 13:08:40 -07:00
Daniel Nelson
4dfe2312d0 Switch skipped kafka test 2017-07-18 18:18:57 -07:00
Daniel Nelson
c740dce36d Update download information in readme 2017-07-18 13:54:38 -07:00
Daniel Nelson
475a926d43 Update changelog 2017-07-18 11:03:07 -07:00
DanKans
d2626f1da6 Fix ntpq parse issue when using dns_lookup (#3026) 2017-07-18 11:01:08 -07:00
soldierkam
f5a8415c78 Add read timeout to socket_listener 2017-07-17 18:34:36 -07:00
Daniel Nelson
1d416a4213 Remove command in example output 2017-07-17 15:08:17 -07:00
Daniel Nelson
731ab9773d Update changelog 2017-07-17 12:01:35 -07:00
Daniel Nelson
d8f7b76253 Prevent startup if intervals are 0 2017-07-17 11:58:47 -07:00
Daniel Nelson
dbe2f79019 Update changelog 2017-07-14 10:45:32 -07:00
Bob Shannon
ef63908541 Add result_type field to net_response input plugin (#2990) 2017-07-14 10:43:36 -07:00
Daniel Nelson
27e47614c6 Add credits for new plugins to changelog 2017-07-13 16:14:18 -07:00
Daniel Nelson
dc4a133b11 Update changelog 2017-07-13 16:00:09 -07:00
DanKans
f4d67d8c3c Add fluentd input plugin (#2661) 2017-07-13 15:58:20 -07:00
Daniel Nelson
785798611e Update changelog 2017-07-13 15:39:45 -07:00
Daniel Nelson
b165ce4cd5 Prevent possible deadlock when using aggregators (#3016)
Looping the metrics back through the same channel could result in a
deadlock, by using a new channel and locking the processor we can ensure
that all stages can make continual progress.
2017-07-13 15:34:21 -07:00
Daniel Nelson
d9d1ca5a46 Add release date for 1.3.4 2017-07-12 17:15:38 -07:00
Daniel Nelson
2c10806fef Update changelog 2017-07-12 12:04:43 -07:00
Daniel Nelson
5d2c093105 Prevent Write from being called concurrently (#3011) 2017-07-12 12:03:23 -07:00
Daniel Nelson
f68bab1667 Update changelog 2017-07-11 15:55:44 -07:00
Daniel Nelson
1388e2cf92 Do not allow metrics with trailing slashes (#3007)
It is not possible to encode a measurement, tag, or field whose last
character is a backslash due to it being an unescapable character.
Because the tight coupling between line protocol and the internal metric
model, prevent metrics like this from being created.

Measurements with a trailing slash are not allowed and the point will be
dropped.  Tags and fields with a trailing a slash will be dropped from
the point.
2017-07-11 15:54:38 -07:00
Daniel Nelson
af318f4959 Update changelog 2017-07-11 14:10:09 -07:00
JSH
9f244cf1ac Fix chrony plugin does not track system time offset (#2989) 2017-07-11 14:08:40 -07:00
Daniel Nelson
885aa8e6e1 Update changelog 2017-07-10 19:07:28 -07:00
Daniel Nelson
945446b36f Fix handling of escapes within fieldset (#3003)
Line protocol does not require or allow escaping of backslash, the only
requirement for a byte to be escaped is if it is an escapable char and
preceeded immediately by a slash.
2017-07-10 19:05:18 -07:00
Daniel Nelson
4209ebfa6e Update changelog 2017-07-10 12:23:16 -07:00
Daniel Nelson
79f8ed874a Update elastic version to 5.0.41 (#2999) 2017-07-10 12:18:56 -07:00
Daniel Nelson
739d97639a Update dependencies 2017-07-10 12:01:22 -07:00
Wesley Merkel
ac8e28f436 Add link to Graylog input to README.md (#2995) 2017-07-10 11:22:37 -07:00
Daniel Nelson
2740a3ba44 Update changelog 2017-07-05 14:29:59 -07:00
Song Wenhao
0f850400f2 Display error message if prometheus output fails to listen (#2984) 2017-07-05 14:28:44 -07:00
Daniel Nelson
74a764d549 Update changelog 2017-06-29 16:17:08 -07:00
Aleksey Shirokih
a8a637809e Change default prometheus_client port (#2973) 2017-06-29 14:03:42 -07:00
Daniel Nelson
75dbf2b0f8 Set release date for 1.3.3 2017-06-28 13:05:06 -07:00
Daniel Nelson
90909ae708 Fix build on Windows (#2972) 2017-06-27 16:31:28 -07:00
Daniel Nelson
d40e441240 Use git sha1 as version if not tagged (#2969) 2017-06-27 13:24:06 -07:00
Adam Perlin
cc3d420551 Fix several bugs in minecraft input (#2970) 2017-06-27 13:14:07 -07:00
Daniel Nelson
f2bb4acd4a Update changelog 2017-06-26 15:25:06 -07:00
Bob Shannon
a7595c918a Fix panic in elasticsearch input if cannot determine master (#2954) 2017-06-26 15:23:53 -07:00
Daniel Nelson
a52f90122b Update changelog 2017-06-26 15:15:31 -07:00
Bob Shannon
d217cdc1a6 Add optional usage_active and time_active CPU metrics (#2943) 2017-06-26 15:13:38 -07:00
Daniel Nelson
d5b6f92f3f Log aerospike field value on error 2017-06-26 14:48:22 -07:00
Daniel Nelson
1a636abaaf Update changelog 2017-06-26 14:31:17 -07:00
vodolaz095
1fdbfa4719 Add support for RethinkDB 1.0 handshake protocol (#2963)
Allow rethinkdb input plugin to work with RethinkDB 2.3.5+ databases that requires username,password authorization and Handshake protocol v1.0

* remove top level header not required in sample config

* remove top level header not required in sample config
2017-06-26 14:29:48 -07:00
Daniel Nelson
22fc130e97 Update changelog 2017-06-23 16:56:36 -07:00
Ayrdrie
a726579d50 Add Minecraft input plugin (#2960) 2017-06-23 16:54:12 -07:00
Daniel Nelson
d774c2a170 Update changelog 2017-06-23 11:13:00 -07:00
MatthewCh
6d5bb35f84 Support HOST_PROC in processes and linux_sysctl_fs inputs (#2924) 2017-06-23 11:11:33 -07:00
Daniel Nelson
e028f10586 Update changelog 2017-06-23 11:04:13 -07:00
Daniel Nelson
9276318faf Fix bug parsing default timestamps with modified precision (#2949) 2017-06-23 10:59:04 -07:00
Daniel Nelson
82a04d904d Use strings.Join in statsd input (#2947) 2017-06-21 16:24:23 -07:00
Daniel Nelson
364da9a83d Update changelog 2017-06-21 12:46:57 -07:00
grugrut
ca9cec2c84 Add input plugin for Fail2ban (#2875) 2017-06-21 12:42:13 -07:00
Daniel Nelson
9211985c63 Update changelog 2017-06-21 12:39:09 -07:00
Daniel Nelson
929ba0a637 Remove label value sanitization in prometheus output (#2939) 2017-06-21 12:36:29 -07:00
Daniel Nelson
dcdcb70cb1 Update changelog 2017-06-19 11:52:53 -07:00
Eugene Shilin
cb5a12de3d Add standard SSL options to mysql input (#2933) 2017-06-19 11:42:43 -07:00
Artem Kovardin
193e8fa5ad More explicit 404 error in cassandra input (#2936) 2017-06-19 11:06:49 -07:00
trastle
00b37a7c0d Update README for Prometheus Client Output (#2452) 2017-06-19 11:04:08 -07:00
Daniel Nelson
736322dfc9 Set default ping count in Windows
fixes #2934
2017-06-16 13:39:55 -07:00
Daniel Nelson
ba364988de Document that ping_interval is non-linux only 2017-06-16 13:32:04 -07:00
Daniel Nelson
a729a44284 Update changelog 2017-06-16 13:18:27 -07:00
Daniel Nelson
3ecfd32df5 Allow dos line endings in tail and logparser (#2920)
Parsing dos line ending delimited line protocol is still illegal in most
cases.
2017-06-16 13:16:48 -07:00
Daniel Nelson
ea1888bd26 Update changelog 2017-06-16 12:06:40 -07:00
Simone Rotondo
674c24f987 Add HTTP Proxy support to influxdb output (#2929) 2017-06-16 12:05:08 -07:00
Daniel Nelson
ca72df5868 Update 1.3.2 release date 2017-06-14 12:16:47 -07:00
Daniel Nelson
ea787b83bf Update changelog 2017-06-13 18:07:12 -07:00
Daniel Nelson
949072e8dc Ensure prometheus metrics have same set of labels (#2857) 2017-06-13 18:04:26 -07:00
Daniel Nelson
246f342e6a Update changelog 2017-06-13 17:19:33 -07:00
Daniel Nelson
619b5d4c14 Change node_name to be a tag in aerospike input (#2918) 2017-06-13 17:09:38 -07:00
Daniel Nelson
b0efc22140 Update changelog 2017-06-13 14:10:33 -07:00
Heston Kan
5d1efdbfda Add min/max response time on linux/darwin to ping (#2908) 2017-06-13 14:09:17 -07:00
Daniel Nelson
e3ccd473d2 Update changelog 2017-06-13 13:44:07 -07:00
Dheeraj Dwivedi
f0cbfe4d67 Add secure connection support to graphite output (#2602) 2017-06-13 13:42:11 -07:00
Daniel Nelson
40d8e582ee Update changelog 2017-06-12 18:32:50 -07:00
Daniel Nelson
02b55fe77f Update aws-sdk-go dependency to latest release. (#2912) 2017-06-12 18:31:27 -07:00
Daniel Nelson
0c53de6700 Update changelog 2017-06-08 16:55:27 -07:00
Daniel Nelson
b277e6e2d7 Fix support for mongodb/leofs urls without scheme (#2900)
This was broken by changes in go 1.8 to url.Parse.  This change allows
the string but prompts the user to move to the correct url string.
2017-06-08 16:52:01 -07:00
Daniel Nelson
de4a312eba Update changelog 2017-06-08 13:20:44 -07:00
Matteo Cerutti
4b3b16ef1a Add wildcard support for container inclusion/exclusion (#2793) 2017-06-08 13:17:31 -07:00
Daniel Nelson
4c534433aa Skip kafka_consumer_integration_test due to issue on CircleCI 2017-06-07 18:31:52 -07:00
Daniel Nelson
f9447d01d4 Add release note to changelog regarding kafka_consumer 2017-06-07 18:27:12 -07:00
Seuf
2092443cd7 Add Kafka 0.9+ consumer support (#2487) 2017-06-07 18:22:28 -07:00
Bob Shannon
1c73caba04 Add SSL/TLS support to nginx input plugin (#2883) 2017-06-07 17:52:10 -07:00
Daniel Nelson
84dbf8bb25 Update changelog 2017-06-07 13:46:06 -07:00
Daniel Nelson
a275e6792a Fix metric splitting edge cases (#2896)
Metrics needing one extra byte to fit the output buffer would not be split, so we would emit lines without a line ending. Metrics which overflowed by exactly one field length would be split one field too late, causing truncated fields.
2017-06-07 13:37:54 -07:00
Daniel Nelson
de7fb2acfe Update changelog 2017-06-06 13:55:11 -07:00
Frederick Roth
91f2764cd5 Add result_type field for http_response input (#2814) 2017-06-06 13:39:07 -07:00
Daniel Nelson
4e91b18bbe Update changelog 2017-06-06 11:56:19 -07:00
Mariusz Brzeski
56a7ffe0e4 Fix timeout option in Windows ping input sample configuration (#2885) 2017-06-06 11:55:01 -07:00
Daniel Nelson
f9462d4fff Update changelog 2017-06-05 14:47:34 -07:00
Sebastian Borza
035905d65e Add timezone support to logparser timestamps (#2882) 2017-06-05 14:45:11 -07:00
Daniel Nelson
a47e6e6efe Update changelog 2017-06-05 12:46:50 -07:00
Daniel Nelson
5bab4616ff Fix udp metric splitting (#2880) 2017-06-05 12:44:29 -07:00
Daniel Nelson
37e01808b5 Set 1.3.1 release date 2017-05-31 15:00:31 -07:00
Daniel Nelson
0b6db905ff Generate sha256 hashes when packaging 2017-05-31 12:29:39 -07:00
Daniel Nelson
9529199a44 Update changelog 2017-05-30 17:40:37 -07:00
Daniel Nelson
be03abd464 Fix length calculation of split metric buffer (#2869) 2017-05-30 17:38:32 -07:00
Daniel Nelson
04aa732e94 Update changelog 2017-05-30 11:04:39 -07:00
Steve Nardone
e7f9db297e Fix panic in mongo input (#2848) 2017-05-30 11:02:26 -07:00
Daniel Nelson
24ea9fdc4d Update changelog 2017-05-26 12:12:18 -07:00
Matteo Cerutti
02d168705c MySQL input: log and continue on field parse error (#2855) 2017-05-26 12:09:43 -07:00
Daniel Nelson
7d7206b3e2 Update changelog 2017-05-25 16:20:29 -07:00
Daniel Nelson
03ca3975b5 Update gopsutil version
fixes #2856
2017-05-25 16:11:49 -07:00
Daniel Nelson
e1088b9eee Update changelog 2017-05-25 13:39:16 -07:00
Daniel Nelson
f47924ffc5 Fix influxdb output database quoting (#2851) 2017-05-25 13:25:52 -07:00
Olivier Lambert
a96f85c847 Add documentation for fetching metrics on Caddy HTTP and Prometheus (#2853) 2017-05-25 13:07:49 -07:00
Sylvain Boily
9148871608 Documentation privilege requirements for specific procstat metrics (#2787) 2017-05-25 13:06:27 -07:00
Matteo Cerutti
7d198f0a68 Add timeout option to ipmi_sensor plugin - solves #2817 (#2818) 2017-05-22 13:41:34 -07:00
Daniel Nelson
1459fab4d6 Remove changelog item from pull request template
Person who merges PR is now expected to update the CHANGELOG.
2017-05-22 12:06:48 -07:00
Daniel Nelson
b0bd4d55f5 Update CHANGELOG with fixed issue #1137 2017-05-22 12:01:22 -07:00
Steven Burgart
9ab688d62c Fix multiple plugin loading in win_perf_counters (#2800) 2017-05-22 11:58:00 -07:00
Daniel Nelson
8fdc2aec80 Update dependency license file 2017-05-19 18:03:49 -07:00
Lukasz Jagiello
91690b1d3e Consul plugin README typo (#2829) 2017-05-19 11:37:31 -07:00
Daniel Nelson
c61cd73eff Update changelog 2017-05-18 18:11:49 -07:00
rsingh2411
93e638d63e Add Docker container environment variables as tags. Only whitelisted #2580 (#2581) 2017-05-18 16:58:34 -07:00
mced
501c22478e [enh] set db_version at 0 if query version fails (#2819) 2017-05-18 13:52:56 -07:00
Daniel Nelson
7155e90f66 Update changelog for #2815 2017-05-16 17:37:51 -07:00
Timo Mihaljov
c53d9fa9b7 Handle process termination during read from /proc (#2816)
Fixes #2815.
2017-05-16 17:33:35 -07:00
Frederick Roth
ac5ac3161f Fixed inconsistency between HasIntField and IntField (#2813) 2017-05-16 15:25:30 -07:00
Daniel Nelson
bfeb3020a3 Add release date for 1.3.0 2017-05-15 19:52:35 -07:00
Daniel Nelson
b01ecdccff Add back the changelog entry for 2141 2017-05-15 12:54:03 -07:00
Daniel Nelson
da99777f6f Only split metrics if there is an udp output (#2799) 2017-05-12 15:34:05 -07:00
Zack Zatkin-Gold
dd537b3382 Fix telegraf example arguments (#2788)
Many of the examples provided within documentation are using a single
dash for the command line arguments, but the telegraf executable
explicitly has two dashes.

There are also some inconsistencies with the ordering of the command
line argument examples.  I've ordered them so that the examples will
show: config, config-directory, input-filter, test
2017-05-12 15:22:29 -07:00
Sebastian Borza
f74687dcc0 split metrics based on UDPPayload size (#2795) 2017-05-12 14:45:50 -07:00
Daniel Nelson
a47aa0dcc2 Merge branch 'reuse-transport' 2017-05-10 18:19:21 -07:00
Daniel Nelson
17d883c602 Ensure keep-alive is not used in http_response input.
Using Keep-Alive would change the timing for already established
connections.  Previous to this commit, Keep-Alive worked only when using
a response_string_match due to failure to close the request body.
2017-05-10 14:40:55 -07:00
Daniel Nelson
a1446a60f7 Update changelog 2017-05-10 13:11:33 -07:00
Daniel Nelson
1931aac284 Fix http_response input creation of transport on every gather 2017-05-09 16:23:38 -07:00
Daniel Nelson
b88eb0f59d Fix prometheus input creation of transport on every gather 2017-05-09 16:21:49 -07:00
Daniel Nelson
e7ad2d0463 Fix apache input creation of transport on every gather. 2017-05-09 16:19:56 -07:00
Daniel Nelson
c28ffb11cb Merge branch 'update-readme' 2017-05-09 13:50:19 -07:00
Daniel Nelson
018fd5ce5b Add missing plugins to README 2017-05-09 13:50:12 -07:00
Daniel Nelson
cd0ec0185a Update contributing section
Hoping this will encourage more non-plugin contributions.
2017-05-09 13:50:12 -07:00
Adrian Sadłocha
8124cfa3ed Improve PostgreSQL plugin documentation (#2777) 2017-05-09 12:58:43 -07:00
Lukasz Jagiello
5af985ef5f Add support for self-signed certs to InfluxDB input plugin (#2773) 2017-05-08 15:20:24 -07:00
Sylvain Boily
1ebd1aaa41 Systemd does not see all shutdowns as failures (#2716) 2017-05-08 11:48:29 -07:00
Daniel Nelson
de3f52b990 Update cloudwatch documentation
Mention that some metrics are available only at larger intervals than 5
minutes.  Update dead links to new locations and example config.

closes #1907
2017-05-08 11:31:20 -07:00
Daniel Nelson
4200018a0b Enable s390x builds
closes #2766
2017-05-05 14:39:56 -07:00
Daniel Nelson
67cd1669cc Add SLES11 support to rpm package (#2768) 2017-05-05 14:29:40 -07:00
Sébastien
a8cfe03ba8 fix systemd path in order to add compatibility with SuSe (#2499) 2017-05-05 14:04:33 -07:00
ceseuron
e2983383e4 Fixed sqlserver input to work with case sensitive server collation. (#2749)
Fixed a problem with sqlserver input where database properties are not returned by Telegraf when SQL Server has been set up with a case sensitive server-level collation.

* Added bugfix entry to CHANGELOG.md for sqlserver collation input fix.
2017-05-04 10:47:03 -07:00
Daniel Nelson
8cf0dc769b Add 1.4 section to changelog 2017-05-03 17:29:34 -07:00
Daniel Nelson
613de8a80d Remove documentation in kafka_consumer for metric_buffer 2017-05-03 11:51:49 -07:00
Damien Krotkine
f5c890cc1d reflect zookeeper chroot config in readme (#2759) 2017-05-03 11:50:08 -07:00
Daniel Nelson
f7f1eaef65 Return an error if no valid patterns. (#2753) 2017-05-02 14:54:38 -07:00
Alexander Blagoev
188703e204 Improve redis input documentation (#2708) 2017-05-02 11:43:07 -07:00
Patrick Hemmer
52c19af0ba fix close on closed socket_writer (#2748) 2017-05-02 11:06:49 -07:00
Daniel Nelson
5c88965084 Add initial documentation for rabbitmq input. (#2745) 2017-05-01 18:55:48 -07:00
Daniel Nelson
6e76731b7e Don't log error creating database on connect (#2740)
closes #2739
2017-04-28 15:58:46 -07:00
Daniel Nelson
c7a0e40c87 Update telegraf.conf 2017-04-28 13:47:32 -07:00
Daniel Nelson
086a2f5f12 Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:46:23 -07:00
Daniel Nelson
1da1c4753e Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:40:58 -07:00
Daniel Nelson
a083e1af7d Use go 1.8.1 for CI and Release builds (#2732) 2017-04-27 16:18:11 -07:00
Daniel Nelson
052e88ad5e Fix grammar 2017-04-27 14:59:18 -07:00
Daniel Nelson
b9ce455bba Update telegraf.conf 2017-04-27 11:53:32 -07:00
Seuf
cd103c85db Added SASL options for ouput kafka plugin (#2721) 2017-04-27 11:50:25 -07:00
Ross McDonald
a3feacbd2f Kapacitor input plugin (#2031) 2017-04-27 11:47:22 -07:00
Daniel Nelson
e1a734c525 Fix logfile documentation 2017-04-27 11:38:49 -07:00
Daniel Nelson
53ab56de72 Update haproxy README 2017-04-27 11:23:37 -07:00
Seuf
4e2fe598ac Added SSL configuration for input haproxy (#2723) 2017-04-27 11:20:41 -07:00
Daniel Nelson
5fe5c46c6d Fix amqp output block on write if disconnected (#2727)
fixes #2603
2017-04-27 11:10:30 -07:00
Damien Krotkine
153304d92b it's -> its (#2728) 2017-04-27 11:10:00 -07:00
Damien Krotkine
cb9aecbf04 it's -> its (#2729) 2017-04-27 11:06:40 -07:00
Nevins
c66e2896c6 add option to randomize Kinesis partition key (#2705) 2017-04-26 10:54:24 -07:00
Jeff Zellner
9b874dff8d Update README.md (#2719) 2017-04-25 13:17:15 -07:00
Daniel Nelson
b243faa22b Don't close stdout on config reload. (#2707)
fixes #2528
2017-04-24 16:18:58 -07:00
Patrick Hemmer
8f5cd6c2ae add keep-alive support to socket_listener & socket_writer (#2697)
closes #2635
2017-04-24 13:14:42 -07:00
Alexander Blagoev
3c28b93514 Improve procstat input documentation (#2699)
closes #1895
2017-04-24 11:18:55 -07:00
Patrick Hemmer
06baf7cf78 use AddError everywhere (#2372) 2017-04-24 11:13:26 -07:00
Alexander Blagoev
801f6cb8a0 System net input documentation (#2698)
closes #2166
2017-04-24 11:03:53 -07:00
Daniel Nelson
3684ec6315 Update EXAMPLE_README.md 2017-04-21 14:27:36 -07:00
Daniel Nelson
da0773151b Use C locale when running sadf (#2690)
fixes #1911
2017-04-21 10:55:54 -07:00
Daniel Nelson
38e1c1de77 Update commit hash of tail fork 2017-04-20 16:29:39 -07:00
Daniel Nelson
799c8bed29 Add fix for network aliases to changelog
Change was made in gopsutil
2017-04-20 15:34:30 -07:00
Alexander Blagoev
a237301932 Memcached input documentation (#2685)
Closes #2615
2017-04-20 11:25:22 -07:00
Oleg Grytsynevych
b03d78d00f win_perf_counters: Format errors reported by pdh.dll in human-readable format (#2338) 2017-04-20 11:22:44 -07:00
Martin
748ca7d503 Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems (#2360) 2017-04-20 11:19:33 -07:00
Daniel Nelson
bf30ef89ee Fix ipmi_sensor config is shared between all plugin instances (#2684) 2017-04-19 17:02:44 -07:00
Daniel Nelson
3690e1b9bf Add diskio for darwin to changelog 2017-04-19 13:42:24 -07:00
Patrick Hemmer
2542ef6d62 change jolokia input to use bulk requests (#2253) 2017-04-18 13:00:41 -07:00
Nikolay Denev
eb7ef5392e Simplify system.DiskUsage() (#2630) 2017-04-18 11:42:58 -07:00
Ross McDonald
70b3e763e7 Add input for receiving papertrail webhooks (#2038) 2017-04-17 13:49:36 -07:00
François de Metz
58ee962679 GitHub webhooks: check signature (#2493) 2017-04-17 11:42:03 -07:00
Daniel Nelson
dc5779e2a7 Rename heap_objects_bytes to heap_objects in internal plugin. (#2674)
* Rename heap_objects_bytes to heap_objects in internal plugin.

This field does not contain bytes

fixes #2671
2017-04-14 17:32:14 -07:00
Daniel Nelson
b968759d10 Use variadic disk.IOCounters() function 2017-04-14 13:48:02 -07:00
Daniel Nelson
b90a5b48a1 Improve logparser README (#2664) 2017-04-14 13:47:43 -07:00
calerogers
a12e082dbe Refactor interrupts plugin code (#2670) 2017-04-14 13:40:36 -07:00
calerogers
cadd845b36 Irqstat input plugin (#2494)
closes #2469
2017-04-13 15:53:02 -07:00
ingosus
dff216c44d Feature #1820: add testing without outputs (#2446) 2017-04-13 12:59:28 -07:00
Gregory Kman
45c9b867f6 Update ping-input-plugin Readme (#2651) 2017-04-12 17:46:48 -07:00
Chris Goffinet
9388fff1f7 Fixed content-type header in output plugin OpenTSDB (#2663) 2017-04-12 17:40:10 -07:00
Daniel Nelson
3e0c55bff9 Update grok version (#2662) 2017-04-12 17:10:17 -07:00
Jesús Roncero
49ab4e26f8 Nagios plugin documentation fix (#2659) 2017-04-12 12:04:44 -07:00
Daniel Nelson
360b10c4de Clarify precision documentation (#2655) 2017-04-12 10:42:11 -07:00
Daniel Nelson
2c98e5ae66 Add collectd parser (#2654) 2017-04-12 10:41:26 -07:00
Nick Irvine
0193cbee51 Add max_message_len in kafka_consumer input (#2636) 2017-04-11 12:05:39 -07:00
Daniel Nelson
f55af7d21f Use name filter for IOCounters in diskio (#2649)
Use IOCountersForNames for disk counters.
2017-04-11 11:41:09 -07:00
Patrick Hemmer
516dffa4c4 set default measurement name on snmp input (#2639) 2017-04-10 16:45:02 -07:00
Daniel Nelson
62b5c1f7e7 Add support for precision in http_listener (#2644) 2017-04-10 16:39:40 -07:00
Daniel Nelson
07c428ef89 Use random port in http_listener tests 2017-04-10 14:39:39 -07:00
Vladimir S
aa722fac9b Add dmcache input plugin (#1667) 2017-04-07 15:39:43 -07:00
Rajaseelan Ganeswaran
7cc4ca2341 Add sample config stanza for CPU (#2620) 2017-04-06 14:44:02 -07:00
Victor Yunevich
92fa20cef2 ipmi_sensor: allow @ symbol in password (#2633) 2017-04-06 14:40:34 -07:00
Daniel Nelson
c9f8308f27 Update filtering documentation (#2631) 2017-04-06 12:06:08 -07:00
James
5ffc9fd379 fix postgresql connection leak (#2611) 2017-04-04 17:37:44 -07:00
Daniel Nelson
8bf193dc06 Update httpjson documentation (#2619)
closes  #2536
2017-04-03 18:34:04 -07:00
Patrick Hemmer
f2805fd4aa socket_listener: clean up unix socket file on start & stop (#2618) 2017-04-03 18:06:51 -07:00
Shakeel Sorathia
35e4390168 Docker: optionally add labels as tags (#2425) 2017-04-03 13:43:15 -07:00
Patrick Hemmer
51c99d5b67 add support for linux sysctl fs metrics (#2609) 2017-03-31 14:01:02 -07:00
Daniel Nelson
540f98e228 Fix possible deadlock when output cannot write. (#2610) 2017-03-31 12:45:28 -07:00
Dmitry Ulyanov
c980c92cd5 Added pprof tool (#2512) 2017-03-29 18:28:43 -07:00
Daniel Nelson
9495b615f5 Update changelog for #2587 2017-03-29 17:15:11 -07:00
tjmcs
fb1c7d0154 Adds a new json_timestamp_units configuration parameter (#2587) 2017-03-29 17:12:29 -07:00
Patrick Hemmer
03ee6022f3 fix race in testutil Accumulator.Wait() (#2598) 2017-03-29 17:03:06 -07:00
djjorjinho
cc5b2f68b6 fix timestamp parsing on prometheus plugin (#2596) 2017-03-29 15:04:29 -07:00
Daniel Nelson
2d7f612bd7 Use fork of hpcloud/tail (#2595) 2017-03-29 14:25:33 -07:00
Daniel Nelson
9e036b2d65 Remove wait loop in riemann tests
This testcase still has a race condition but I believe it is when the
test does not complete quickly enough.
2017-03-28 13:05:10 -07:00
mgresser
1100a98f11 Removed duplicate evictions metric (#2577) 2017-03-28 10:47:00 -07:00
Daniel Nelson
37689f4df6 Add elasticsearch output to changelog 2017-03-28 10:22:28 -07:00
Daniel Nelson
78c7f4e4af Add write timeout to Riemann output (#2576) 2017-03-27 15:49:45 -07:00
Daniel Nelson
84a9f91f5c Skip elasticsearch output integration test in short mode 2017-03-27 15:05:06 -07:00
Daniel Nelson
5612df48f9 Update telegraf.conf 2017-03-27 14:49:04 -07:00
Daniel Nelson
0fa9001453 Clarify influxdb output url format
closes #2568
2017-03-24 16:04:18 -07:00
Patrick Hemmer
995546e7c6 snmp: support table indexes as tags (#2366) 2017-03-24 12:06:52 -07:00
Patrick Hemmer
1402c158b7 remove sleep from tests (#2555) 2017-03-24 12:03:36 -07:00
Oskar
616b66f5cb Multi instances in win_perf_counters (#2352) 2017-03-22 12:04:58 -07:00
Daniel Nelson
70a0a84882 Really fix procstat initialization 2017-03-21 11:40:51 -07:00
Daniel Nelson
5c33c760c7 Fix procstat initialization 2017-03-21 10:59:41 -07:00
Leandro Piccilli
bb28fb256b Add Elasticsearch 5.x output (#2332) 2017-03-20 17:47:57 -07:00
Daniel Nelson
a962e958eb Refactor procstat input (#2540)
fixes #1636 
fixes #2315
2017-03-17 16:49:11 -07:00
Patrick Hemmer
8514acdc3c return error on unsupported serializer data format (#2542) 2017-03-17 10:14:03 -07:00
Antoine Augusti
426182b81a Update default value for Cloudwatch rate limit (#2520) 2017-03-15 15:20:18 -07:00
Daniel Nelson
7a5d857846 Add support for new SSL configuration to mongodb (#2522)
closes #2519
2017-03-10 11:27:55 -08:00
519 changed files with 41394 additions and 4975 deletions

View File

@@ -1,5 +1,5 @@
### Required for all PRs:
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
- [ ] README.md updated (if adding a new plugin)
- [ ] Signed [CLA](https://influxdata.com/community/cla/).
- [ ] Associated README.md updated.
- [ ] Has appropriate unit tests.

View File

@@ -1,4 +1,356 @@
## v1.3 [unreleased]
## v1.5.3 [2018-03-14]
### Bugfixes
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
- [#3778](https://github.com/influxdata/telegraf/issues/3778): Fix ping plugin not reporting zero durations.
- [#3807](https://github.com/influxdata/telegraf/issues/3807): Fix memory leak in postgresql_extensible.
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Disable keepalive in mqtt output to prevent deadlock.
- [#3786](https://github.com/influxdata/telegraf/pull/3786): Fix collation difference in sqlserver input.
- [#3871](https://github.com/influxdata/telegraf/pull/3871): Fix uptime metric in passenger input plugin.
- [#3851](https://github.com/influxdata/telegraf/issues/3851): Add output of stderr in case of error to exec log message.
## v1.5.2 [2018-01-30]
### Bugfixes
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
- [#3680](https://github.com/influxdata/telegraf/pull/3680): Reconnect before sending graphite metrics if disconnected.
- [#3693](https://github.com/influxdata/telegraf/pull/3693): Align aggregator period with internal ticker to avoid skipping metrics.
- [#3629](https://github.com/influxdata/telegraf/issues/3629): Fix a potential deadlock when using aggregators.
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Limit wait time for writes in mqtt output.
- [#3698](https://github.com/influxdata/telegraf/issues/3698): Revert change in graphite output where dot in field key was replaced by underscore.
- [#3710](https://github.com/influxdata/telegraf/issues/3710): Add timeout to wavefront output write.
- [#3725](https://github.com/influxdata/telegraf/issues/3725): Exclude master_replid fields from redis input.
## v1.5.1 [2018-01-10]
### Bugfixes
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
- [#3593](https://github.com/influxdata/telegraf/pull/3593): Set Content-Type charset in influxdb output and allow it be overridden.
- [#3594](https://github.com/influxdata/telegraf/pull/3594): Document permissions setup for postfix input.
- [#3633](https://github.com/influxdata/telegraf/pull/3633): Fix deliver_get field in rabbitmq input.
- [#3607](https://github.com/influxdata/telegraf/issues/3607): Escape environment variables during config toml parsing.
## v1.5 [2017-12-14]
### New Plugins
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
- [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv
- [cratedb](./plugins/outputs/wavefront/README.md) - Thanks to @felixge
- [dcos](./plugins/inputs/dcos/README.md) - Thanks to @influxdata
- [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
- [teamspeak](./plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1
- [unbound](./plugins/inputs/unbound/README.md) - Thanks to @aromeyer
- [wavefront](./plugins/outputs/wavefront/README.md) - Thanks to @puckpuck
### Release Notes
- In the `kinesis` output, use of the `partition_key` and
`use_random_partitionkey` options has been deprecated in favor of the
`partition` subtable. This allows for more flexible methods to set the
partition key such as by metric name or by tag.
- With the release of the new improved `jolokia2` input, the legacy `jolokia`
plugin is deprecated and will be removed in a future release. Users of this
plugin are encouraged to update to the new `jolokia2` plugin.
### Features
- [#3170](https://github.com/influxdata/telegraf/pull/3170): Add support for sharding based on metric name.
- [#3196](https://github.com/influxdata/telegraf/pull/3196): Add Kafka output plugin topic_suffix option.
- [#3027](https://github.com/influxdata/telegraf/pull/3027): Include mount mode option in disk metrics.
- [#3191](https://github.com/influxdata/telegraf/pull/3191): TLS and MTLS enhancements to HTTPListener input plugin.
- [#3213](https://github.com/influxdata/telegraf/pull/3213): Add polling method to logparser and tail inputs.
- [#3211](https://github.com/influxdata/telegraf/pull/3211): Add timeout option for kubernetes input.
- [#3234](https://github.com/influxdata/telegraf/pull/3234): Add support for timing sums in statsd input.
- [#2617](https://github.com/influxdata/telegraf/issues/2617): Add resource limit monitoring to procstat.
- [#3236](https://github.com/influxdata/telegraf/pull/3236): Add support for k8s service DNS discovery to prometheus input.
- [#3245](https://github.com/influxdata/telegraf/pull/3245): Add configurable metrics endpoint to prometheus output.
- [#3214](https://github.com/influxdata/telegraf/pull/3214): Add new nginx_plus input plugin.
- [#3215](https://github.com/influxdata/telegraf/pull/3215): Add support for NSQLookupd to nsq_consumer.
- [#2278](https://github.com/influxdata/telegraf/pull/2278): Add redesigned Jolokia input plugin.
- [#3106](https://github.com/influxdata/telegraf/pull/3106): Add configurable separator for metrics and fields in opentsdb output.
- [#1692](https://github.com/influxdata/telegraf/pull/1692): Add support for the rollbar occurrence webhook event.
- [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin.
- [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input.
- [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin.
- [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data.
- [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input.
- [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input.
- [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator.
- [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input.
- [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields.
- [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier.
- [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins.
- [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents.
- [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result.
- [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin.
- [#3406](https://github.com/influxdata/telegraf/pull/3406): Add support for SSL settings to ElasticSearch output plugin.
- [#3315](https://github.com/influxdata/telegraf/pull/3315): Add Teamspeak 3 input plugin.
- [#3305](https://github.com/influxdata/telegraf/pull/3305): Add modification_time field to filestat input plugin.
- [#2019](https://github.com/influxdata/telegraf/pull/2019): Add Solr input plugin.
- [#3210](https://github.com/influxdata/telegraf/pull/3210): Add CrateDB output plugin.
- [#3459](https://github.com/influxdata/telegraf/pull/3459): Add systemd unit pid and cgroup matching to procstat.
- [#3477](https://github.com/influxdata/telegraf/pull/3477): Add Particle Webhook Plugin.
- [#3471](https://github.com/influxdata/telegraf/pull/3471): Use MAX() instead of SUM() for latency measurements in sqlserver.
- [#3490](https://github.com/influxdata/telegraf/pull/3490): Add index by week number to Elasticsearch output.
- [#3434](https://github.com/influxdata/telegraf/pull/3434): Add unbound input plugin.
- [#3449](https://github.com/influxdata/telegraf/pull/3449): Add opensmtpd input plugin.
- [#3470](https://github.com/influxdata/telegraf/pull/3470): Add support for tags in the index name in elasticsearch output.
- [#2553](https://github.com/influxdata/telegraf/pull/2553): Add postfix input plugin.
- [#3424](https://github.com/influxdata/telegraf/pull/3424): Add bond input plugin.
- [#3518](https://github.com/influxdata/telegraf/pull/3518): Add slab to mem plugin.
- [#3519](https://github.com/influxdata/telegraf/pull/3519): Add input plugin for DC/OS.
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
### Bugfixes
- [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload.
- [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock.
- [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions.
- [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types.
- [#3430](https://github.com/influxdata/telegraf/issues/3430): Always ignore autofs filesystems in disk input.
- [#3326](https://github.com/influxdata/telegraf/issues/3326): Fail metrics parsing on unescaped quotes.
- [#3473](https://github.com/influxdata/telegraf/pull/3473): Whitelist allowed char classes for graphite output.
- [#3488](https://github.com/influxdata/telegraf/pull/3488): Use hexadecimal ids and lowercase names in zipkin input.
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
## v1.4.5 [2017-12-01]
### Bugfixes
- [#3500](https://github.com/influxdata/telegraf/issues/3500): Fix global variable collection when using interval_slow option in mysql input.
- [#3486](https://github.com/influxdata/telegraf/issues/3486): Fix error getting net connections info in netstat input.
- [#3529](https://github.com/influxdata/telegraf/issues/3529): Fix HOST_MOUNT_PREFIX in docker with disk input.
## v1.4.4 [2017-11-08]
### Bugfixes
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
- [#3311](https://github.com/influxdata/telegraf/issues/3311): Fix error getting pids in netstat input.
- [#3339](https://github.com/influxdata/telegraf/issues/3339): Support HOST_VAR envvar to locate /var in system input.
- [#3383](https://github.com/influxdata/telegraf/issues/3383): Use current time if docker container read time is zero value.
## v1.4.3 [2017-10-25]
### Bugfixes
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query.
- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux.
- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb.
- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output.
- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit.
- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value.
- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin.
- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6.
- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems.
## v1.4.2 [2017-10-10]
### Bugfixes
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
## v1.4.1 [2017-09-26]
### Bugfixes
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
## v1.4 [2017-09-05]
### Release Notes
- The `kafka_consumer` input has been updated to support Kafka 0.9 and
above style consumer offset handling. The previous version of this plugin
supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy`
plugin.
- In the `aerospike` input the `node_name` field has been changed to be a tag
for both the `aerospike_node` and `aerospike_namespace` measurements.
- The default prometheus_client port has been changed to 9273.
### New Plugins
- [fail2ban](./plugins/inputs/fail2ban/README.md) - Thanks to @grugrut
- [fluentd](./plugins/inputs/fluentd/README.md) - Thanks to @DanKans
- [histogram](./plugins/aggregators/histogram/README.md) - Thanks to @vlamug
- [minecraft](./plugins/inputs/minecraft/README.md) - Thanks to @adamperlin & @Ayrdrie
- [openldap](./plugins/inputs/openldap/README.md) - Thanks to @cobaugh
- [salesforce](./plugins/inputs/salesforce/README.md) - Thanks to @rody
- [tomcat](./plugins/inputs/tomcat/README.md) - Thanks to @mlindes
- [win_services](./plugins/inputs/win_services/README.md) - Thanks to @vlastahajek
- [zipkin](./plugins/inputs/zipkin/README.md) - Thanks to @adamperlin & @Ayrdrie
### Features
- [#2487](https://github.com/influxdata/telegraf/pull/2487): Add Kafka 0.9+ consumer support
- [#2773](https://github.com/influxdata/telegraf/pull/2773): Add support for self-signed certs to InfluxDB input plugin
- [#2293](https://github.com/influxdata/telegraf/pull/2293): Add TCP listener for statsd input
- [#2581](https://github.com/influxdata/telegraf/pull/2581): Add Docker container environment variables as tags. Only whitelisted
- [#2817](https://github.com/influxdata/telegraf/pull/2817): Add timeout option to IPMI sensor plugin
- [#2883](https://github.com/influxdata/telegraf/pull/2883): Add support for an optional SSL/TLS configuration to nginx input plugin
- [#2882](https://github.com/influxdata/telegraf/pull/2882): Add timezone support for logparser timestamps.
- [#2814](https://github.com/influxdata/telegraf/pull/2814): Add result_type field for http_response input.
- [#2734](https://github.com/influxdata/telegraf/pull/2734): Add include/exclude filters for docker containers.
- [#2602](https://github.com/influxdata/telegraf/pull/2602): Add secure connection support to graphite output.
- [#2908](https://github.com/influxdata/telegraf/pull/2908): Add min/max response time on linux/darwin to ping.
- [#2929](https://github.com/influxdata/telegraf/pull/2929): Add HTTP Proxy support to influxdb output.
- [#2933](https://github.com/influxdata/telegraf/pull/2933): Add standard SSL options to mysql input.
- [#2875](https://github.com/influxdata/telegraf/pull/2875): Add input plugin for fail2ban.
- [#2924](https://github.com/influxdata/telegraf/pull/2924): Support HOST_PROC in processes and linux_sysctl_fs inputs.
- [#2960](https://github.com/influxdata/telegraf/pull/2960): Add Minecraft input plugin.
- [#2963](https://github.com/influxdata/telegraf/pull/2963): Add support for RethinkDB 1.0 handshake protocol.
- [#2943](https://github.com/influxdata/telegraf/pull/2943): Add optional usage_active and time_active CPU metrics.
- [#2973](https://github.com/influxdata/telegraf/pull/2973): Change default prometheus_client port.
- [#2661](https://github.com/influxdata/telegraf/pull/2661): Add fluentd input plugin.
- [#2990](https://github.com/influxdata/telegraf/pull/2990): Add result_type field to net_response input plugin.
- [#2571](https://github.com/influxdata/telegraf/pull/2571): Add read timeout to socket_listener
- [#2612](https://github.com/influxdata/telegraf/pull/2612): Add input plugin for OpenLDAP.
- [#3042](https://github.com/influxdata/telegraf/pull/3042): Add network option to dns_query.
- [#3054](https://github.com/influxdata/telegraf/pull/3054): Add redis_version field to redis input.
- [#3063](https://github.com/influxdata/telegraf/pull/3063): Add tls options to docker input.
- [#2387](https://github.com/influxdata/telegraf/pull/2387): Add histogram aggregator plugin.
- [#3080](https://github.com/influxdata/telegraf/pull/3080): Add zipkin input plugin.
- [#3023](https://github.com/influxdata/telegraf/pull/3023): Add Windows Services input plugin.
- [#3098](https://github.com/influxdata/telegraf/pull/3098): Add path tag to logparser containing path of logfile.
- [#3075](https://github.com/influxdata/telegraf/pull/3075): Add salesforce input plugin.
- [#3097](https://github.com/influxdata/telegraf/pull/3097): Add option to run varnish under sudo.
- [#3119](https://github.com/influxdata/telegraf/pull/3119): Add weighted_io_time to diskio input.
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
### Bugfixes
- [#2607](https://github.com/influxdata/telegraf/issues/2607): Improve logging of errors in Cassandra input.
- [#2819](https://github.com/influxdata/telegraf/pull/2819): [enh] set db_version at 0 if query version fails
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
- [#2716](https://github.com/influxdata/telegraf/pull/2716): Systemd does not see all shutdowns as failures
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix multiple plugin loading in win_perf_counters.
- [#2855](https://github.com/influxdata/telegraf/pull/2855): MySQL input: log and continue on field parse error.
- [#2885](https://github.com/influxdata/telegraf/pull/2885): Fix timeout option in Windows ping input sample configuration.
- [#2911](https://github.com/influxdata/telegraf/issues/2911): Fix Kinesis output plugin in govcloud.
- [#2917](https://github.com/influxdata/telegraf/issues/2917): Fix Aerospike input adds all nodes to a single series.
- [#2452](https://github.com/influxdata/telegraf/pull/2452): Improve Prometheus Client output documentation.
- [#2984](https://github.com/influxdata/telegraf/pull/2984): Display error message if prometheus output fails to listen.
- [#2997](https://github.com/influxdata/telegraf/issues/2997): Fix elasticsearch output content type detection warning.
- [#2914](https://github.com/influxdata/telegraf/issues/2914): Prevent possible deadlock when using aggregators.
- [#2860](https://github.com/influxdata/telegraf/issues/2860): Fix combined tagdrop/tagpass filtering.
- [#3036](https://github.com/influxdata/telegraf/pull/3036): Fix filtering when both pass and drop match an item.
- [#2964](https://github.com/influxdata/telegraf/issues/2964): Only report cpu usage for online cpus in docker input.
- [#3050](https://github.com/influxdata/telegraf/pull/3050): Start first aggregator period at startup time.
- [#2906](https://github.com/influxdata/telegraf/issues/2906): Fix panic in logparser if file cannot be opened.
- [#2886](https://github.com/influxdata/telegraf/issues/2886): Default to localhost if zookeeper has no servers set.
- [#2457](https://github.com/influxdata/telegraf/issues/2457): Fix docker memory and cpu reporting in Windows.
- [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text.
- [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric.
- [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output.
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
## v1.3.5 [2017-07-26]
### Bugfixes
- [#3049](https://github.com/influxdata/telegraf/issues/3049): Fix prometheus output cannot be reloaded.
- [#3037](https://github.com/influxdata/telegraf/issues/3037): Fix filestat reporting exists when cannot list directory.
- [#2386](https://github.com/influxdata/telegraf/issues/2386): Fix ntpq parse issue when using dns_lookup.
- [#2554](https://github.com/influxdata/telegraf/issues/2554): Fix panic when agent.interval = "0s".
## v1.3.4 [2017-07-12]
### Bugfixes
- [#3001](https://github.com/influxdata/telegraf/issues/3001): Fix handling of escape characters within fields.
- [#2988](https://github.com/influxdata/telegraf/issues/2988): Fix chrony plugin does not track system time offset.
- [#3004](https://github.com/influxdata/telegraf/issues/3004): Do not allow metrics with trailing slashes.
- [#3011](https://github.com/influxdata/telegraf/issues/3011): Prevent Write from being called concurrently.
## v1.3.3 [2017-06-28]
### Bugfixes
- [#2915](https://github.com/influxdata/telegraf/issues/2915): Allow dos line endings in tail and logparser.
- [#2937](https://github.com/influxdata/telegraf/issues/2937): Remove label value sanitization in prometheus output.
- [#2948](https://github.com/influxdata/telegraf/issues/2948): Fix bug parsing default timestamps with modified precision.
- [#2954](https://github.com/influxdata/telegraf/issues/2954): Fix panic in elasticsearch input if cannot determine master.
## v1.3.2 [2017-06-14]
### Bugfixes
- [#2862](https://github.com/influxdata/telegraf/issues/2862): Fix InfluxDB UDP metric splitting.
- [#2888](https://github.com/influxdata/telegraf/issues/2888): Fix mongodb/leofs urls without scheme.
- [#2822](https://github.com/influxdata/telegraf/issues/2822): Fix inconsistent label dimensions in prometheus output.
## v1.3.1 [2017-05-31]
### Bugfixes
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
- [#2851](https://github.com/influxdata/telegraf/pull/2851): Fix InfluxDB output database quoting.
- [#2856](https://github.com/influxdata/telegraf/issues/2856): Fix net input on older Linux kernels.
- [#2848](https://github.com/influxdata/telegraf/pull/2848): Fix panic in mongo input.
- [#2869](https://github.com/influxdata/telegraf/pull/2869): Fix length calculation of split metric buffer.
## v1.3 [2017-05-15]
### Release Notes
@@ -41,6 +393,9 @@ be deprecated eventually.
### Features
- [#2721](https://github.com/influxdata/telegraf/pull/2721): Added SASL options for kafka output plugin.
- [#2723](https://github.com/influxdata/telegraf/pull/2723): Added SSL configuration for input haproxy.
- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin.
- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
- [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
@@ -56,10 +411,33 @@ be deprecated eventually.
- [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
- [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
- [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
- [#2512](https://github.com/influxdata/telegraf/pull/2512): Added pprof tool.
- [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
- [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
- [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
- [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
- [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
- [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
- [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
- [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
- [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
- [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
- [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
- [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin
- [#2705](https://github.com/influxdata/telegraf/pull/2705): Kinesis output: add use_random_partitionkey option
- [#2635](https://github.com/influxdata/telegraf/issues/2635): add tcp keep-alive to socket_listener & socket_writer
- [#2031](https://github.com/influxdata/telegraf/pull/2031): Add Kapacitor input plugin
- [#2732](https://github.com/influxdata/telegraf/pull/2732): Use go 1.8.1
- [#2712](https://github.com/influxdata/telegraf/issues/2712): Documentation for rabbitmq input plugin
- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files.
### Bugfixes
- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password
- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
- [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
- [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
@@ -67,6 +445,7 @@ be deprecated eventually.
- [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
- [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
- [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
- [#2360](https://github.com/influxdata/telegraf/pull/2360): Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems
- [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
- [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
- [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
@@ -78,7 +457,23 @@ be deprecated eventually.
- [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
- [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
- [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
- [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
- [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
- [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
- [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
- [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
- [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
- [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
- [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
- [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
- [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
- [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
- [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
- [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces
- [#1911](https://github.com/influxdata/telegraf/issues/1911): Sysstat plugin needs LANG=C or similar locale
- [#2528](https://github.com/influxdata/telegraf/issues/2528): File output closes standard streams on reload.
- [#2603](https://github.com/influxdata/telegraf/issues/2603): AMQP output disconnect blocks all outputs
- [#2706](https://github.com/influxdata/telegraf/issues/2706): Improve documentation for redis input plugin
## v1.2.1 [2017-02-01]

View File

@@ -12,7 +12,7 @@ but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
## GoDoc
@@ -52,7 +52,7 @@ See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
plugin can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this plugin does.
Let's say you've written a plugin that emits metrics about processes on the
@@ -124,7 +124,7 @@ You should also add the following to your SampleConfig() return:
```toml
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
@@ -183,7 +183,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
output can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this output does.
### Output Example
@@ -254,7 +254,7 @@ You should also add the following to your SampleConfig() return:
```toml
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
@@ -287,7 +287,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in `telegraf -sample-config`.
processor can be configured. This is include in the output of `telegraf config`.
* The `Description` function should say in one line what this processor does.
### Processor Example
@@ -344,7 +344,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
aggregator can be configured. This is include in `telegraf -sample-config`.
aggregator can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of each
@@ -457,29 +457,28 @@ func init() {
## Unit Tests
Before opening a pull request you should run the linter checks and
the short tests.
### Execute linter
execute `make lint`
### Execute short tests
execute `make test-short`
execute `make test`
### Execute long tests
### Execute integration tests
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
Running the integration tests requires several docker containers to be
running. You can start the containers with:
```
make docker-run
```
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
a simple mock will suffice.
And run the full test suite with:
```
make test-all
```
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running
Use `make docker-kill` to stop the containers.

52
Godeps
View File

@@ -1,62 +1,92 @@
github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/aws/aws-sdk-go 7524cb911daddd6e5c9195def8e59ae892bef8d9
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker b89aff1afa1f61993ab2ba18fd62d9375a195f5d
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/klauspost/crc32 cb6bfca970f6908083f26f39a79009d608efd5cd
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/shirou/gopsutil d371ba1293cb48fedc6850526ea48b3846c54f2c
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6

View File

@@ -1,11 +0,0 @@
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8

147
Makefile
View File

@@ -1,56 +1,77 @@
VERSION := $(shell sh -c 'git describe --always --tags')
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
PREFIX := /usr/local
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
COMMIT := $(shell git rev-parse --short HEAD)
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
endif
# Standard Telegraf build
default: prepare build
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
# Windows build
windows: prepare-windows build-windows
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
endif
# Only run the build (no dependency grabbing)
build:
go install -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
all:
$(MAKE) deps
$(MAKE) telegraf
build-windows:
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
deps:
go get github.com/sparrc/gdm
gdm restore
build-for-docker:
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
telegraf:
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
install: telegraf
mkdir -p $(DESTDIR)$(PREFIX)/bin/
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
test:
go test -short ./...
test-windows:
go test ./plugins/inputs/ping/...
go test ./plugins/inputs/win_perf_counters/...
go test ./plugins/inputs/win_services/...
lint:
go vet ./...
test-all: lint
go test ./...
# run package script
package:
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
./scripts/build.py --package --platform=all --arch=all
# Get dependencies and use gdm to checkout changesets
prepare:
go get github.com/sparrc/gdm
gdm restore
clean:
-rm -f telegraf
-rm -f telegraf.exe
# Use the windows godeps file to prepare dependencies
prepare-windows:
go get github.com/sparrc/gdm
gdm restore
gdm restore -f Godeps_windows
docker-image:
./scripts/build.py --package --platform=linux --arch=amd64
cp build/telegraf*$(COMMIT)*.deb .
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
# Run all docker containers necessary for unit tests
# Run all docker containers necessary for integration tests
docker-run:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
--link zookeeper:zookeeper \
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
-e KAFKA_ADVERTISED_PORT=9092 \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CREATE_TOPICS="test:1:1" \
-p "9092:9092" \
-d wurstmeister/kafka
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
docker run --name memcached -p "11211:11211" -d memcached
docker run --name postgres -p "5432:5432" -d postgres
@@ -60,37 +81,47 @@ docker-run:
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
docker run --name nats -p "4222:4222" -d nats
docker run --name openldap \
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
docker run --name cratedb \
-p "6543:5432" \
-d crate:2.2 \
-Cnetwork.host=0.0.0.0 \
-Ctransport.host=localhost \
-Clicense.enterprise=false
# Run docker containers necessary for CircleCI unit tests
# Run docker containers necessary for integration tests; skipping services provided
# by CircleCI
docker-run-circle:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
--link zookeeper:zookeeper \
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
-e KAFKA_ADVERTISED_PORT=9092 \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CREATE_TOPICS="test:1:1" \
-p "9092:9092" \
-d wurstmeister/kafka
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
docker run --name nats -p "4222:4222" -d nats
docker run --name openldap \
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
# Kill all docker containers, ignore errors
docker-kill:
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper cratedb
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper cratedb
# Run full unit tests using docker containers (includes setup and teardown)
test: vet docker-kill docker-run
# Sleeping for kafka leadership election, TSDB setup, etc.
sleep 60
# SUCCESS, running tests
go test -race ./...
# Run "short" unit tests
test-short: vet
go test -short ./...
vet:
go vet ./...
.PHONY: test test-short vet build default
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
package clean docker-run docker-run-circle docker-kill docker-image

136
README.md
View File

@@ -5,8 +5,7 @@ and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from well known services (like Hadoop, Postgres, or Redis) and third party
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
from local or remote services.
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
@@ -20,20 +19,20 @@ For more information on Processor and Aggregator plugins please [read this](./do
New plugins are designed to be easy to contribute,
we'll eagerly accept pull
requests and will manage the set of plugins that Telegraf supports.
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
new plugins.
## Contributing
There are many ways to contribute:
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
- [Contribute plugins](CONTRIBUTING.md)
## Installation:
You can either download the binaries directly from the
[downloads](https://www.influxdata.com/downloads) page.
A few alternate installs are available here as well:
### FreeBSD tarball:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-VERSION_freebsd_amd64.tar.gz
You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page
or from the [releases](https://github.com/influxdata/telegraf/releases) section.
### Ansible Role:
@@ -41,52 +40,80 @@ Ansible role: https://github.com/rossmcdonald/telegraf
### From Source:
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
which gets installed via the Makefile
if you don't have it already. You also must build with golang version 1.8+.
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
which is installed by the Makefile if you don't have it already.
1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
3. Run `go get github.com/influxdata/telegraf`
3. Run `go get -d github.com/influxdata/telegraf`
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
### Nightly Builds
These builds are generated from the master branch:
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb)
- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm)
- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb)
- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm)
- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz)
- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz)
- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb)
- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm)
- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz)
- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz)
- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz)
- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz)
- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz)
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz)
## How to use it:
See usage with:
```
telegraf --help
./telegraf --help
```
#### Generate a telegraf config file:
```
telegraf config > telegraf.conf
./telegraf config > telegraf.conf
```
#### Generate config with only cpu input & influxdb output plugins defined
#### Generate config with only cpu input & influxdb output plugins defined:
```
telegraf --input-filter cpu --output-filter influxdb config
./telegraf --input-filter cpu --output-filter influxdb config
```
#### Run a single telegraf collection, outputing metrics to stdout
#### Run a single telegraf collection, outputing metrics to stdout:
```
telegraf --config telegraf.conf -test
./telegraf --config telegraf.conf --test
```
#### Run telegraf with all plugins defined in config file
#### Run telegraf with all plugins defined in config file:
```
telegraf --config telegraf.conf
./telegraf --config telegraf.conf
```
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
```
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
```
@@ -102,6 +129,7 @@ configuration options.
* [apache](./plugins/inputs/apache)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
* [bond](./plugins/inputs/bond)
* [cassandra](./plugins/inputs/cassandra)
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
@@ -110,58 +138,79 @@ configuration options.
* [conntrack](./plugins/inputs/conntrack)
* [couchbase](./plugins/inputs/couchbase)
* [couchdb](./plugins/inputs/couchdb)
* [DC/OS](./plugins/inputs/dcos)
* [disque](./plugins/inputs/disque)
* [dmcache](./plugins/inputs/dmcache)
* [dns query time](./plugins/inputs/dns_query)
* [docker](./plugins/inputs/docker)
* [dovecot](./plugins/inputs/dovecot)
* [elasticsearch](./plugins/inputs/elasticsearch)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [fail2ban](./plugins/inputs/fail2ban)
* [filestat](./plugins/inputs/filestat)
* [fluentd](./plugins/inputs/fluentd)
* [graylog](./plugins/inputs/graylog)
* [haproxy](./plugins/inputs/haproxy)
* [hddtemp](./plugins/inputs/hddtemp)
* [http_response](./plugins/inputs/http_response)
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [internal](./plugins/inputs/internal)
* [influxdb](./plugins/inputs/influxdb)
* [interrupts](./plugins/inputs/interrupts)
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
* [iptables](./plugins/inputs/iptables)
* [jolokia](./plugins/inputs/jolokia)
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [jolokia2](./plugins/inputs/jolokia2)
* [kapacitor](./plugins/inputs/kapacitor)
* [kubernetes](./plugins/inputs/kubernetes)
* [leofs](./plugins/inputs/leofs)
* [lustre2](./plugins/inputs/lustre2)
* [mailchimp](./plugins/inputs/mailchimp)
* [memcached](./plugins/inputs/memcached)
* [mesos](./plugins/inputs/mesos)
* [minecraft](./plugins/inputs/minecraft)
* [mongodb](./plugins/inputs/mongodb)
* [mysql](./plugins/inputs/mysql)
* [net_response](./plugins/inputs/net_response)
* [nginx](./plugins/inputs/nginx)
* [nginx_plus](./plugins/inputs/nginx_plus)
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [openldap](./plugins/inputs/openldap)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [pf](./plugins/inputs/pf)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
* [postgresql](./plugins/inputs/postgresql)
* [postfix](./plugins/inputs/postfix)
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [postgresql](./plugins/inputs/postgresql)
* [powerdns](./plugins/inputs/powerdns)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
* [puppetagent](./plugins/inputs/puppetagent)
* [rabbitmq](./plugins/inputs/rabbitmq)
* [raindrops](./plugins/inputs/raindrops)
* [redis](./plugins/inputs/redis)
* [rethinkdb](./plugins/inputs/rethinkdb)
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
* [smart](./plugins/inputs/smart)
* [snmp](./plugins/inputs/snmp)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [solr](./plugins/inputs/solr)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [teamspeak](./plugins/inputs/teamspeak)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [unbound](./plugins/input/unbound)
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu
@@ -174,6 +223,7 @@ configuration options.
* processes
* kernel (/proc/stat)
* kernel (/proc/vmstat)
* linux_sysctl_fs (/proc/sys/fs)
Telegraf can also collect metrics via the following service plugins:
@@ -192,7 +242,20 @@ Telegraf can also collect metrics via the following service plugins:
* [filestack](./plugins/inputs/webhooks/filestack)
* [github](./plugins/inputs/webhooks/github)
* [mandrill](./plugins/inputs/webhooks/mandrill)
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [particle](./plugins/inputs/webhooks/particle)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [zipkin](./plugins/inputs/zipkin)
Telegraf is able to parse the following input data formats into metrics, these
formats may be used with input plugins supporting the `data_format` option:
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
## Processor Plugins
@@ -200,7 +263,9 @@ Telegraf can also collect metrics via the following service plugins:
## Aggregator Plugins
* [basicstats](./plugins/aggregators/basicstats)
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
## Output Plugins
@@ -209,8 +274,10 @@ Telegraf can also collect metrics via the following service plugins:
* [amqp](./plugins/outputs/amqp) (rabbitmq)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [cratedb](./plugins/outputs/cratedb)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
* [elasticsearch](./plugins/outputs/elasticsearch)
* [file](./plugins/outputs/file)
* [graphite](./plugins/outputs/graphite)
* [graylog](./plugins/outputs/graylog)
@@ -227,9 +294,4 @@ Telegraf can also collect metrics via the following service plugins:
* [socket_writer](./plugins/outputs/socket_writer)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
## Contributing
Please see the
[contributing guide](CONTRIBUTING.md)
for details on contributing a plugin to Telegraf.
* [wavefront](./plugins/outputs/wavefront)

View File

@@ -28,6 +28,18 @@ type Accumulator interface {
tags map[string]string,
t ...time.Time)
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
AddSummary(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
AddHistogram(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
SetPrecision(precision, interval time.Duration)
AddError(err error)

View File

@@ -76,6 +76,28 @@ func (ac *accumulator) AddCounter(
}
}
func (ac *accumulator) AddSummary(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddHistogram(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
// AddError passes a runtime error to the accumulator.
// The error will be tagged with the plugin name and written to the log.
func (ac *accumulator) AddError(err error) {

View File

@@ -247,12 +247,12 @@ func (a *Agent) flush() {
}
// flusher monitors the metrics input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 300)
// create an output metric channel and a gorouting that continously passes
// create an output metric channel and a gorouting that continuously passes
// each metric onto the output plugins & aggregators.
outMetricC := make(chan telegraf.Metric, 100)
var wg sync.WaitGroup
@@ -291,6 +291,35 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-shutdown:
if len(aggC) > 0 {
// keep going until aggC is flushed
continue
}
return
case metric := <-aggC:
metrics := []telegraf.Metric{metric}
for _, processor := range a.Config.Processors {
metrics = processor.Apply(metrics...)
}
for _, m := range metrics {
for i, o := range a.Config.Outputs {
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(m.Copy())
}
}
}
}
}
}()
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
semaphore := make(chan struct{}, 1)
for {
@@ -339,6 +368,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
// channel shared between all input threads for accumulating metrics
metricC := make(chan telegraf.Metric, 100)
aggC := make(chan telegraf.Metric, 100)
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
@@ -367,7 +397,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil {
if err := a.flusher(shutdown, metricC, aggC); err != nil {
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
@@ -377,7 +407,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
for _, aggregator := range a.Config.Aggregators {
go func(agg *models.RunningAggregator) {
defer wg.Done()
acc := NewAccumulator(agg, metricC)
acc := NewAccumulator(agg, aggC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
agg.Run(acc, shutdown)

33
appveyor.yml Normal file
View File

@@ -0,0 +1,33 @@
image: Previous Visual Studio 2015
version: "{build}"
cache:
- C:\Cache
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
environment:
GOPATH: C:\gopath
platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version
- go env
build_script:
- cmd: C:\GnuWin32\bin\make
test_script:
- cmd: C:\GnuWin32\bin\make test-windows
artifacts:
- path: telegraf.exe

View File

@@ -1,12 +1,13 @@
machine:
services:
- docker
- memcached
- redis
- rabbitmq-server
post:
- sudo service zookeeper stop
- go version
- sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.8.linux-amd64.tar.gz
- wget https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.9.4.linux-amd64.tar.gz
- go version
dependencies:

View File

@@ -4,6 +4,8 @@ import (
"flag"
"fmt"
"log"
"net/http"
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
"os"
"os/signal"
"runtime"
@@ -24,6 +26,8 @@ import (
var fDebug = flag.Bool("debug", false,
"turn on debug logging")
var pprofAddr = flag.String("pprof-addr", "",
"pprof address to listen on, not activate pprof if empty")
var fQuiet = flag.Bool("quiet", false,
"run in quiet mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
@@ -47,16 +51,15 @@ var fAggregatorFilters = flag.String("aggregator-filter", "",
var fProcessorFilters = flag.String("processor-filter", "",
"filter the processors to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
"print usage for a plugin, ie, 'telegraf --usage mysql'")
var fService = flag.String("service", "",
"operate on the service")
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
var (
version string
commit string
branch string
nextVersion = "1.5.0"
version string
commit string
branch string
)
func init() {
@@ -77,8 +80,8 @@ Usage:
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
@@ -87,6 +90,7 @@ The commands & flags are:
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
--quiet run in quiet mode
Examples:
@@ -98,13 +102,16 @@ Examples:
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf -test
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
# run telegraf with pprof
telegraf --config telegraf.conf --pprof-addr localhost:6060
`
var stop chan struct{}
@@ -136,13 +143,23 @@ func reloadLoop(
log.Fatal("E! " + err.Error())
}
}
if len(c.Outputs) == 0 {
if !*fTest && len(c.Outputs) == 0 {
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
}
if len(c.Inputs) == 0 {
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
}
if int64(c.Agent.Interval.Duration) <= 0 {
log.Fatalf("E! Agent interval must be positive, found %s",
c.Agent.Interval.Duration)
}
if int64(c.Agent.FlushInterval.Duration) <= 0 {
log.Fatalf("E! Agent flush_interval must be positive; found %s",
c.Agent.Interval.Duration)
}
ag, err := agent.NewAgent(c)
if err != nil {
log.Fatal("E! " + err.Error())
@@ -188,7 +205,7 @@ func reloadLoop(
}
}()
log.Printf("I! Starting Telegraf (version %s)\n", version)
log.Printf("I! Starting Telegraf %s\n", displayVersion())
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("I! Tags enabled: %s", c.ListTags())
@@ -246,6 +263,13 @@ func (p *program) Stop(s service.Service) error {
return nil
}
func displayVersion() string {
if version == "" {
return fmt.Sprintf("v%s~%s", nextVersion, commit)
}
return "v" + version
}
func main() {
flag.Usage = func() { usageExit(0) }
flag.Parse()
@@ -267,10 +291,27 @@ func main() {
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
}
if *pprofAddr != "" {
go func() {
pprofHostPort := *pprofAddr
parts := strings.Split(pprofHostPort, ":")
if len(parts) == 2 && parts[0] == "" {
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
}
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
log.Fatal("E! " + err.Error())
}
}()
}
if len(args) > 0 {
switch args[0] {
case "version":
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case "config":
config.PrintSampleConfig(
@@ -298,7 +339,7 @@ func main() {
}
return
case *fVersion:
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case *fSampleConfig:
config.PrintSampleConfig(

View File

@@ -39,6 +39,11 @@ metrics as they pass through Telegraf:
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
Use [measurement filtering](CONFIGURATION.md#measurement-filtering)
to control which metrics are passed through a processor or aggregator. If a
metric is filtered out the metric bypasses the plugin and is passed downstream
to the next plugin.
**Processor** plugins process metrics as they pass through and immediately emit
results based on the values they process. For example, this could be printing
all metrics or adding a tag to all metrics that pass through.

View File

@@ -24,11 +24,17 @@ Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
When using the `.deb` or `.rpm` packages, you can define environment variables
in the `/etc/default/telegraf` file.
## Configuration file locations
The location of the configuration file can be set via the `--config` command
line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
the `-config-directory` command line flag is used.
line flag.
When the `--config-directory` command line flag is used files ending with
`.conf` in the specified directory will also be included in the Telegraf
configuration.
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
@@ -66,11 +72,14 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
This is primarily to avoid
large write spikes for users running a large number of telegraf instances.
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
* **precision**: By default, precision will be set to the same timestamp order
as the collection interval, with the maximum being 1s. Precision will NOT
be used for service inputs, such as logparser and statsd. Valid values are
"ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stdout.
* **precision**:
By default or when set to "0s", precision will be set to the same
timestamp order as the collection interval, with the maximum being 1s.
Precision will NOT be used for service inputs. It is up to each individual
service input to set the timestamp at the appropriate precision.
Valid time units are "ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stderr.
* **debug**: Run telegraf in debug mode.
* **quiet**: Run telegraf in quiet mode (error messages only).
* **hostname**: Override default hostname, if empty use os.Hostname().
@@ -89,9 +98,13 @@ you can configure that here.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the input plugin.
## Output Configuration
There are no generic configuration options available for all outputs.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the output plugin.
## Aggregator Configuration
@@ -112,6 +125,10 @@ aggregator and will not get sent to the output plugins.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters be used to
limit what metrics are handled by the aggregator. Excluded metrics are passed
downstream to the next aggregator.
## Processor Configuration
The following config parameters are available for all processors:
@@ -119,36 +136,50 @@ The following config parameters are available for all processors:
* **order**: This is the order in which the processor(s) get executed. If this
is not specified then processor execution order will be random.
The [measurement filtering](#measurement-filtering) can parameters may be used
to limit what metrics are handled by the processor. Excluded metrics are
passed downstream to the next processor.
#### Measurement Filtering
Filters can be configured per input, output, processor, or aggregator,
see below for examples.
* **namepass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against
measurement names and if it matches, the field is emitted.
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
* **fieldpass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against field names
and if it matches, the field is emitted. fieldpass is not available for outputs.
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
fielddrop is not available for outputs.
* **tagpass**: tag names and arrays of strings that are used to filter
measurements by the current input. Each string in the array is tested as a glob
match against the tag name, and if it matches the measurement is emitted.
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
emitted. This is tested on measurements that have passed the tagpass test.
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
As opposed to tagdrop, which will drop an entire measurement based on it's
tags, tagexclude simply strips the given tag keys from the measurement. This
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
as it is more efficient to filter out tags at the ingestion point.
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
the tag keys in the final measurement.
* **namepass**:
An array of glob pattern strings. Only points whose measurement name matches
a pattern in this list are emitted.
* **namedrop**:
The inverse of `namepass`. If a match is found the point is discarded. This
is tested on points after they have passed the `namepass` test.
* **fieldpass**:
An array of glob pattern strings. Only fields whose field key matches a
pattern in this list are emitted. Not available for outputs.
* **fielddrop**:
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the point. This is tested on points after
they have passed the `fieldpass` test. Not available for outputs.
* **tagpass**:
A table mapping tag keys to arrays of glob pattern strings. Only points
that contain a tag key in the table and a tag value matching one of its
patterns is emitted.
* **tagdrop**:
The inverse of `tagpass`. If a match is found the point is discarded. This
is tested on points after they have passed the `tagpass` test.
* **taginclude**:
An array of glob pattern strings. Only tags with a tag key matching one of
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
point based on its tag, `taginclude` removes all non matching tags from the
point. This filter can be used on both inputs & outputs, but it is
_recommended_ to be used on inputs, as it is more efficient to filter out tags
at the ingestion point.
* **tagexclude**:
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
will be discarded from the point.
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
must be defined at the _end_ of the plugin definition, otherwise subsequent
plugin config options will be interpreted as part of the tagpass/tagdrop
tables.
#### Input Configuration Examples
@@ -168,7 +199,6 @@ fields which begin with `time_`.
[[outputs.influxdb]]
url = "http://192.168.59.103:8086" # required.
database = "telegraf" # required.
precision = "s"
# INPUTS
[[inputs.cpu]]
@@ -307,21 +337,18 @@ to avoid measurement collisions:
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf"
precision = "s"
# Drop all measurements that start with "aerospike"
namedrop = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-aerospike-data"
precision = "s"
# Only accept aerospike data:
namepass = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-cpu0-data"
precision = "s"
# Only store measurements where the tag "cpu" matches the value "cpu0"
[outputs.influxdb.tagpass]
cpu = ["cpu0"]
@@ -362,3 +389,15 @@ to the system load metrics due to the `namepass` parameter.
[[outputs.file]]
files = ["stdout"]
```
#### Processor Configuration Examples:
Print only the metrics with `cpu` as the measurement name, all metrics are
passed to the output:
```toml
[[processors.printer]]
namepass = "cpu"
[[outputs.file]]
files = ["/tmp/metrics.out"]
```

View File

@@ -7,6 +7,7 @@ Telegraf is able to parse the following input data formats into metrics:
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
@@ -40,7 +41,7 @@ example, in the exec plugin:
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -67,7 +68,7 @@ metrics are parsed directly into Telegraf metrics.
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
@@ -117,7 +118,7 @@ For example, if you had this configuration:
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -161,7 +162,7 @@ For example, if the following configuration:
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -232,7 +233,7 @@ name of the plugin.
name_override = "entropy_available"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "value"
@@ -390,7 +391,7 @@ There are many more options available,
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "graphite"
@@ -427,14 +428,54 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
```toml
[[inputs.exec]]
## Commands array
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "nagios"
```
# Collectd:
The collectd format parses the collectd binary network protocol. Tags are
created for host, instance, type, and type instance. All collectd values are
added as float64 fields.
For more information about the binary network protocol see
[here](https://collectd.org/wiki/index.php/Binary_protocol).
You can control the cryptographic settings with parser options. Create an
authentication file and set `collectd_auth_file` to the path of the file, then
set the desired security level in `collectd_security_level`.
Additional information including client setup can be found
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
You can also change the path to the typesdb or add additional typesdb using
`collectd_typesdb`.
#### Collectd Configuration:
```toml
[[inputs.socket_listener]]
service_address = "udp://127.0.0.1:25826"
name_prefix = "collectd_"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "collectd"
## Authentication file for cryptographic security levels
collectd_auth_file = "/etc/collectd/auth_file"
## One of none (default), sign, or encrypt
collectd_security_level = "encrypt"
## Path of to TypesDB specifications
collectd_typesdb = ["/usr/share/collectd/types.db"]
```

View File

@@ -36,7 +36,7 @@ config option, for example, in the `file` output plugin:
files = ["stdout"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
@@ -60,7 +60,7 @@ metrics are serialized directly into InfluxDB line-protocol.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
@@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
### Graphite Configuration:
```toml
@@ -104,7 +107,7 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "graphite"
@@ -143,8 +146,18 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
json_timestamp_units = "1ns"
```
By default, the timestamp that is output in JSON data format serialized Telegraf
metrics is in seconds. The precision of this timestamp can be adjusted for any output
by adding the optional `json_timestamp_units` parameter to the configuration for
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
output in hundredths of a second (`10ms`).

46
docs/FAQ.md Normal file
View File

@@ -0,0 +1,46 @@
# Frequently Asked Questions
### Q: How can I monitor the Docker Engine Host from within a container?
You will need to setup several volume mounts as well as some environment
variables:
```
docker run --name telegraf
-v /:/hostfs:ro
-v /etc:/hostfs/etc:ro
-v /proc:/hostfs/proc:ro
-v /sys:/hostfs/sys:ro
-v /var/run/utmp:/var/run/utmp:ro
-e HOST_ETC=/hostfs/etc
-e HOST_PROC=/hostfs/proc
-e HOST_SYS=/hostfs/sys
-e HOST_MOUNT_PREFIX=/hostfs
telegraf
```
### Q: Why do I get a "no such host" error resolving hostnames that other
programs can resolve?
Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution).
This resolver behaves differently than the C library functions but is more
efficient when used with the Go runtime.
If you encounter problems or want to use more advanced name resolution methods
that are unsupported by the pure Go resolver, you can switch to the cgo
resolver.
If running manually set:
```
export GODEBUG=netdns=cgo
```
If running as a service add the environment variable to `/etc/default/telegraf`:
```
GODEBUG=netdns=cgo
```
### Q: When will the next version be released?
The latest release date estimate can be viewed on the
[milestones](https://github.com/influxdata/telegraf/milestones) page.

View File

@@ -1,33 +1,104 @@
# List
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
# Licenses of dependencies
When distributed in a binary form, Telegraf may contain portions of the
following works:
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)

24
docs/PROFILING.md Normal file
View File

@@ -0,0 +1,24 @@
# Telegraf profiling
Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
By default, the profiling is turned off.
To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
```
telegraf --config telegraf.conf --pprof-addr localhost:6060
```
There are several paths to get different profiling information:
To look at the heap profile:
`go tool pprof http://localhost:6060/debug/pprof/heap`
or to look at a 30-second CPU profile:
`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.

View File

@@ -37,3 +37,9 @@ Telegraf can manage its own service through the --service flag:
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
Troubleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
--config C:\"Program Files"\Telegraf\telegraf.conf

File diff suppressed because it is too large Load Diff

View File

@@ -63,8 +63,8 @@
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".

View File

@@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter {
}
return &out
}
type IncludeExcludeFilter struct {
include Filter
exclude Filter
}
func NewIncludeExcludeFilter(
include []string,
exclude []string,
) (Filter, error) {
in, err := Compile(include)
if err != nil {
return nil, err
}
ex, err := Compile(exclude)
if err != nil {
return nil, err
}
return &IncludeExcludeFilter{in, ex}, nil
}
func (f *IncludeExcludeFilter) Match(s string) bool {
if f.include != nil {
if !f.include.Match(s) {
return false
}
}
if f.exclude != nil {
if f.exclude.Match(s) {
return false
}
}
return true
}

View File

@@ -45,9 +45,11 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
select {
case b.buf <- metrics[i]:
default:
b.mu.Lock()
MetricsDropped.Incr(1)
<-b.buf
b.buf <- metrics[i]
b.mu.Unlock()
}
}
}

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"io/ioutil"
"log"
"math"
"os"
"path/filepath"
"regexp"
@@ -39,6 +40,11 @@ var (
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\w+`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
// Config specifies the URL/user/password for the database that telegraf
@@ -84,8 +90,8 @@ type AgentConfig struct {
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// By default, precision will be set to the same timestamp order as the
// collection interval, with the maximum being 1s.
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
@@ -125,7 +131,7 @@ type AgentConfig struct {
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatability
// compatibility
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode
@@ -229,10 +235,13 @@ var header = `# Telegraf Configuration
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "ns", "us" (or "µs"), "ms", "s".
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
@@ -679,12 +688,17 @@ func (c *Config) LoadConfig(path string) error {
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatability only.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
// parseFile loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
@@ -698,8 +712,9 @@ func parseFile(fpath string) (*ast.Table, error) {
env_vars := envVarRe.FindAll(contents, -1)
for _, env_var := range env_vars {
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
if env_val != "" {
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
if ok {
env_val = escapeEnv(env_val)
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
}
}
@@ -1229,6 +1244,34 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
}
}
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdAuthFile = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_security_level"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdSecurityLevel = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
}
}
}
}
}
c.MetricName = name
delete(tbl.Fields, "data_format")
@@ -1236,6 +1279,9 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
delete(tbl.Fields, "templates")
delete(tbl.Fields, "tag_keys")
delete(tbl.Fields, "data_type")
delete(tbl.Fields, "collectd_auth_file")
delete(tbl.Fields, "collectd_security_level")
delete(tbl.Fields, "collectd_typesdb")
return parsers.NewParser(c)
}
@@ -1244,7 +1290,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
c := &serializers.Config{}
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
@@ -1274,9 +1320,26 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
}
}
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
timestampVal, err := time.ParseDuration(str.Value)
if err != nil {
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
}
// now that we have a duration, truncate it to the nearest
// power of ten (just in case)
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
c.TimestampUnits = time.Duration(new_nanoseconds)
}
}
}
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "prefix")
delete(tbl.Fields, "template")
delete(tbl.Fields, "json_timestamp_units")
return serializers.NewSerializer(c)
}

View File

@@ -60,7 +60,7 @@
# Kafka topic for producer messages
topic = "telegraf"
# Telegraf tag to use as a routing key
# ie, if this tag exists, it's value will be used as the routing key
# ie, if this tag exists, its value will be used as the routing key
routing_tag = "host"
@@ -143,19 +143,31 @@
[[inputs.diskio]]
# no configuration
# read metrics from a Kafka topic
# read metrics from a Kafka 0.9+ topic
[[inputs.kafka_consumer]]
# topic(s) to consume
## kafka brokers
brokers = ["localhost:9092"]
## topic(s) to consume
topics = ["telegraf"]
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# read metrics from a Kafka legacy topic
[[inputs.kafka_consumer_legacy]]
## topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
# the name of the consumer group
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
# Maximum number of points to buffer between collection intervals
point_buffer = 100000
# Offset (must be either "oldest" or "newest")
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
# An array of URI to gather stats about LeoFS.

View File

@@ -1,37 +0,0 @@
package errchan
import (
"fmt"
"strings"
)
type ErrChan struct {
C chan error
}
// New returns an error channel of max length 'n'
// errors can be sent to the ErrChan.C channel, and will be returned when
// ErrChan.Error() is called.
func New(n int) *ErrChan {
return &ErrChan{
C: make(chan error, n),
}
}
// Error closes the ErrChan.C channel and returns an error if there are any
// non-nil errors, otherwise returns nil.
func (e *ErrChan) Error() error {
close(e.C)
var out string
for err := range e.C {
if err != nil {
out += "[" + err.Error() + "], "
}
}
if out != "" {
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
}
return nil
}

View File

@@ -45,7 +45,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
if !g.hasMeta {
out := make(map[string]os.FileInfo)
info, err := os.Stat(g.path)
if !os.IsNotExist(err) {
if err == nil {
out[g.path] = info
}
return out
@@ -55,7 +55,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
files, _ := filepath.Glob(g.path)
for _, file := range files {
info, err := os.Stat(file)
if !os.IsNotExist(err) {
if err == nil {
out[file] = info
}
}

View File

@@ -1,6 +1,7 @@
package globpath
import (
"os"
"runtime"
"strings"
"testing"
@@ -70,3 +71,20 @@ func getTestdataDir() string {
_, filename, _, _ := runtime.Caller(1)
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
}
func TestMatch_ErrPermission(t *testing.T) {
tests := []struct {
input string
expected map[string]os.FileInfo
}{
{"/root/foo", map[string]os.FileInfo{}},
{"/root/f*", map[string]os.FileInfo{}},
}
for _, test := range tests {
glob, err := Compile(test.input)
require.NoError(t, err)
actual := glob.Match()
require.Equal(t, test.expected, actual)
}
}

View File

@@ -40,6 +40,7 @@ func TestSnakeCase(t *testing.T) {
var (
sleepbin, _ = exec.LookPath("sleep")
echobin, _ = exec.LookPath("echo")
shell, _ = exec.LookPath("sh")
)
func TestRunTimeout(t *testing.T) {
@@ -84,13 +85,13 @@ func TestCombinedOutput(t *testing.T) {
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
// the same output from a failed command.
func TestCombinedOutputError(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "foo")
cmd := exec.Command(shell, "-c", "false")
expected, err := cmd.CombinedOutput()
cmd2 := exec.Command(sleepbin, "foo")
cmd2 := exec.Command(shell, "-c", "false")
actual, err := CombinedOutputTimeout(cmd2, time.Second)
assert.Error(t, err)
@@ -98,10 +99,10 @@ func TestCombinedOutputError(t *testing.T) {
}
func TestRunError(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "foo")
cmd := exec.Command(shell, "-c", "false")
err := RunTimeout(cmd, time.Second)
assert.Error(t, err)

View File

@@ -132,6 +132,7 @@ func (f *Filter) Apply(
return true
}
// IsActive checking if filter is active
func (f *Filter) IsActive() bool {
return f.isActive
}
@@ -139,43 +140,66 @@ func (f *Filter) IsActive() bool {
// shouldNamePass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldNamePass(key string) bool {
if f.namePass != nil {
pass := func(f *Filter) bool {
if f.namePass.Match(key) {
return true
}
return false
}
if f.nameDrop != nil {
drop := func(f *Filter) bool {
if f.nameDrop.Match(key) {
return false
}
return true
}
if f.namePass != nil && f.nameDrop != nil {
return pass(f) && drop(f)
} else if f.namePass != nil {
return pass(f)
} else if f.nameDrop != nil {
return drop(f)
}
return true
}
// shouldFieldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldFieldPass(key string) bool {
if f.fieldPass != nil {
pass := func(f *Filter) bool {
if f.fieldPass.Match(key) {
return true
}
return false
}
if f.fieldDrop != nil {
drop := func(f *Filter) bool {
if f.fieldDrop.Match(key) {
return false
}
return true
}
if f.fieldPass != nil && f.fieldDrop != nil {
return pass(f) && drop(f)
} else if f.fieldPass != nil {
return pass(f)
} else if f.fieldDrop != nil {
return drop(f)
}
return true
}
// shouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
if f.TagPass != nil {
pass := func(f *Filter) bool {
for _, pat := range f.TagPass {
if pat.filter == nil {
continue
@@ -189,7 +213,7 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
return false
}
if f.TagDrop != nil {
drop := func(f *Filter) bool {
for _, pat := range f.TagDrop {
if pat.filter == nil {
continue
@@ -203,6 +227,18 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
return true
}
// Add additional logic in case where both parameters are set.
// see: https://github.com/influxdata/telegraf/issues/2860
if f.TagPass != nil && f.TagDrop != nil {
// return true only in case when tag pass and won't be dropped (true, true).
// in case when the same tag should be passed and dropped it will be dropped (true, false).
return pass(f) && drop(f)
} else if f.TagPass != nil {
return pass(f)
} else if f.TagDrop != nil {
return drop(f)
}
return true
}

View File

@@ -357,3 +357,88 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
"mytag": "foobar",
}, pretags)
}
// TestFilter_FilterNamePassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
inputData := []string{"name1", "name2", "name3", "name4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
NamePass: []string{"name1", "name2"},
NameDrop: []string{"name1", "name3"},
}
require.NoError(t, f.Compile())
for i, name := range inputData {
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
}
}
// TestFilter_FilterFieldPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
inputData := []string{"field1", "field2", "field3", "field4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
FieldPass: []string{"field1", "field2"},
FieldDrop: []string{"field1", "field3"},
}
require.NoError(t, f.Compile())
for i, field := range inputData {
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
}
}
// TestFilter_FilterTagsPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
inputData := []map[string]string{
{"tag1": "1", "tag2": "3"},
{"tag1": "1", "tag2": "2"},
{"tag1": "2", "tag2": "1"},
{"tag1": "4", "tag2": "1"},
}
expectedResult := []bool{false, true, false, false}
filterPass := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"1", "4"},
},
}
filterDrop := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"4"},
},
TagFilter{
Name: "tag2",
Filter: []string{"3"},
},
}
f := Filter{
TagDrop: filterDrop,
TagPass: filterPass,
}
require.NoError(t, f.Compile())
for i, tag := range inputData {
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
}
}

View File

@@ -3,6 +3,7 @@ package models
import (
"log"
"math"
"strings"
"time"
"github.com/influxdata/telegraf"
@@ -77,7 +78,27 @@ func makemetric(
}
}
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
log.Printf("D! Measurement [%s] tag [%s] "+
"ends with a backslash, skipping", measurement, k)
delete(tags, k)
continue
} else if strings.HasSuffix(v, `\`) {
log.Printf("D! Measurement [%s] tag [%s] has a value "+
"ending with a backslash, skipping", measurement, k)
delete(tags, k)
continue
}
}
for k, v := range fields {
if strings.HasSuffix(k, `\`) {
log.Printf("D! Measurement [%s] field [%s] "+
"ends with a backslash, skipping", measurement, k)
delete(fields, k)
continue
}
// Validate uint64 and float64 fields
// convert all int & uint types to int64
switch val := v.(type) {
@@ -128,6 +149,8 @@ func makemetric(
delete(fields, k)
continue
}
case string:
fields[k] = v
default:
fields[k] = v
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMakeMetricNoFields(t *testing.T) {
@@ -332,6 +333,129 @@ func TestMakeMetricNameSuffix(t *testing.T) {
)
}
func TestMakeMetric_TrailingSlash(t *testing.T) {
now := time.Now()
tests := []struct {
name string
measurement string
fields map[string]interface{}
tags map[string]string
expectedNil bool
expectedMeasurement string
expectedFields map[string]interface{}
expectedTags map[string]string
}{
{
name: "Measurement cannot have trailing slash",
measurement: `cpu\`,
fields: map[string]interface{}{
"value": int64(42),
},
tags: map[string]string{},
expectedNil: true,
},
{
name: "Field key with trailing slash dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
`bad\`: `xyzzy`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
},
expectedTags: map[string]string{},
},
{
name: "Field value with trailing slash okay",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
expectedTags: map[string]string{},
},
{
name: "Must have one field after dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"bad": math.NaN(),
},
tags: map[string]string{},
expectedNil: true,
},
{
name: "Tag key with trailing slash dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
},
tags: map[string]string{
`host\`: "localhost",
"a": "x",
},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
},
expectedTags: map[string]string{
"a": "x",
},
},
{
name: "Tag value with trailing slash dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
},
tags: map[string]string{
`host`: `localhost\`,
"a": "x",
},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
},
expectedTags: map[string]string{
"a": "x",
},
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
m := ri.MakeMetric(
tc.measurement,
tc.fields,
tc.tags,
telegraf.Untyped,
now)
if tc.expectedNil {
require.Nil(t, m)
} else {
require.NotNil(t, m)
require.Equal(t, tc.expectedMeasurement, m.Name())
require.Equal(t, tc.expectedFields, m.Fields())
require.Equal(t, tc.expectedTags, m.Tags())
}
})
}
}
type testInput struct{}
func (t *testInput) Description() string { return "" }

View File

@@ -2,6 +2,7 @@ package models
import (
"log"
"sync"
"time"
"github.com/influxdata/telegraf"
@@ -34,6 +35,9 @@ type RunningOutput struct {
metrics *buffer.Buffer
failMetrics *buffer.Buffer
// Guards against concurrent calls to the Output as described in #3009
sync.Mutex
}
func NewRunningOutput(
@@ -169,6 +173,8 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
if nMetrics == 0 {
return nil
}
ro.Lock()
defer ro.Unlock()
start := time.Now()
err := ro.Output.Write(metrics)
elapsed := time.Since(start)

View File

@@ -1,11 +1,15 @@
package models
import (
"sync"
"github.com/influxdata/telegraf"
)
type RunningProcessor struct {
Name string
Name string
sync.Mutex
Processor telegraf.Processor
Config *ProcessorConfig
}
@@ -24,6 +28,9 @@ type ProcessorConfig struct {
}
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
rp.Lock()
defer rp.Unlock()
ret := []telegraf.Metric{}
for _, metric := range in {

View File

@@ -4,11 +4,14 @@ import (
"io"
"log"
"os"
"regexp"
"time"
"github.com/influxdata/wlog"
)
var prefixRegex = regexp.MustCompile("^[DIWE]!")
// newTelegrafWriter returns a logging-wrapped writer.
func newTelegrafWriter(w io.Writer) io.Writer {
return &telegrafLog{
@@ -21,7 +24,13 @@ type telegrafLog struct {
}
func (t *telegrafLog) Write(b []byte) (n int, err error) {
return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
var line []byte
if !prefixRegex.Match(b) {
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
} else {
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
}
return t.writer.Write(line)
}
// SetupLogging configures the logging output.

View File

@@ -51,6 +51,19 @@ func TestErrorWriteLogToFile(t *testing.T) {
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
}
func TestAddDefaultLogLevel(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(true, false, tmpfile.Name())
log.Printf("TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
}
func BenchmarkTelegrafLogWrite(b *testing.B) {
var msg = []byte("test")
var buf bytes.Buffer

View File

@@ -13,6 +13,8 @@ const (
Counter
Gauge
Untyped
Summary
Histogram
)
type Metric interface {

View File

@@ -20,8 +20,14 @@ var (
// stringFieldEscaper is for escaping string field values only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
stringFieldUnEscaper = strings.NewReplacer(
`\"`, `"`,
`\\`, `\`,
)
)
func escape(s string, t string) string {

View File

@@ -6,6 +6,7 @@ import (
"hash/fnv"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
@@ -20,11 +21,14 @@ func New(
t time.Time,
mType ...telegraf.ValueType,
) (telegraf.Metric, error) {
if len(fields) == 0 {
return nil, fmt.Errorf("Metric cannot be made without any fields")
}
if len(name) == 0 {
return nil, fmt.Errorf("Metric cannot be made with an empty name")
return nil, fmt.Errorf("missing measurement name")
}
if len(fields) == 0 {
return nil, fmt.Errorf("%s: must have one or more fields", name)
}
if strings.HasSuffix(name, `\`) {
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
}
var thisType telegraf.ValueType
@@ -44,6 +48,13 @@ func New(
// pre-allocate exact size of the tags slice
taglen := 0
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
}
if strings.HasSuffix(v, `\`) {
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
}
if len(k) == 0 || len(v) == 0 {
continue
}
@@ -67,6 +78,10 @@ func New(
// pre-allocate capacity of the fields slice
fieldlen := 0
for k, _ := range fields {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
}
// 10 bytes is completely arbitrary, but will at least prevent some
// amount of allocations. There's a small possibility this will create
// slightly more allocations for a metric that has many short fields.
@@ -87,8 +102,31 @@ func New(
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Returns -1 if not found.
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
// not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if buf[keyi-1] != '\\' {
break
} else {
keyi++
}
}
return keyi
}
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
// to b in buf that is not escaped. Allows for the escape char `\` to be
// escaped. Returns -1 if not found.
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
@@ -218,7 +256,7 @@ func (m *metric) SerializeTo(dst []byte) int {
}
func (m *metric) Split(maxSize int) []telegraf.Metric {
if m.Len() < maxSize {
if m.Len() <= maxSize {
return []telegraf.Metric{m}
}
var out []telegraf.Metric
@@ -248,7 +286,7 @@ func (m *metric) Split(maxSize int) []telegraf.Metric {
// if true, then we need to create a metric _not_ including the currently
// selected field
if len(m.fields[i:j])+len(fields)+constant > maxSize {
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
// if false, then we'll create a metric including the currently
// selected field anyways. This means that the given maxSize is too
// small for a single field to fit.
@@ -286,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewMetric(t *testing.T) {
@@ -30,7 +31,7 @@ func TestNewMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@@ -249,11 +250,15 @@ func TestNewMetric_Fields(t *testing.T) {
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
"quote_string": `x"y`,
"backslash_quote_string": `x\"y`,
"backslash": `x\y`,
"ends_with_backslash": `x\`,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
@@ -366,7 +371,7 @@ func TestIndexUnescapedByte(t *testing.T) {
{
in: []byte(`foo\\bar`),
b: 'b',
expected: 5,
expected: -1,
},
{
in: []byte(`foobar`),
@@ -409,7 +414,7 @@ func TestNewGaugeMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@@ -431,7 +436,7 @@ func TestNewCounterMetric(t *testing.T) {
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
@@ -458,7 +463,7 @@ func TestSplitMetric(t *testing.T) {
assert.Len(t, split70, 3)
split60 := m.Split(60)
assert.Len(t, split60, 4)
assert.Len(t, split60, 5)
}
// test splitting metric into various max lengths
@@ -578,6 +583,42 @@ func TestSplitMetric_OneField(t *testing.T) {
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
}
func TestSplitMetric_ExactSize(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
actual := m.Split(m.Len())
// check that no copy was made
require.Equal(t, &m, &actual[0])
}
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
actual := m.Split(m.Len() - 1)
require.Equal(t, 2, len(actual))
}
func TestNewMetricAggregate(t *testing.T) {
now := time.Now()
@@ -648,3 +689,49 @@ func TestEmptyTagValueOrKey(t *testing.T) {
assert.NoError(t, err)
}
func TestNewMetric_TrailingSlash(t *testing.T) {
now := time.Now()
tests := []struct {
name string
tags map[string]string
fields map[string]interface{}
}{
{
name: `cpu\`,
fields: map[string]interface{}{
"value": int64(42),
},
},
{
name: "cpu",
fields: map[string]interface{}{
`value\`: "x",
},
},
{
name: "cpu",
tags: map[string]string{
`host\`: "localhost",
},
fields: map[string]interface{}{
"value": int64(42),
},
},
{
name: "cpu",
tags: map[string]string{
"host": `localhost\`,
},
fields: map[string]interface{}{
"value": int64(42),
},
},
}
for _, tc := range tests {
_, err := New(tc.name, tc.tags, tc.fields, now)
assert.Error(t, err)
}
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"fmt"
"strconv"
"time"
"github.com/influxdata/telegraf"
@@ -40,10 +41,18 @@ const (
)
func Parse(buf []byte) ([]telegraf.Metric, error) {
return ParseWithDefaultTime(buf, time.Now())
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
}
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
return ParseWithDefaultTimePrecision(buf, t, "")
}
func ParseWithDefaultTimePrecision(
buf []byte,
t time.Time,
precision string,
) ([]telegraf.Metric, error) {
if len(buf) == 0 {
return []telegraf.Metric{}, nil
}
@@ -63,7 +72,7 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
continue
}
m, err := parseMetric(buf[i:i+j], t)
m, err := parseMetric(buf[i:i+j], t, precision)
if err != nil {
i += j + 1 // increment i past the previous newline
errStr += " " + err.Error()
@@ -80,7 +89,10 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
return metrics, nil
}
func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
func parseMetric(buf []byte,
defaultTime time.Time,
precision string,
) (telegraf.Metric, error) {
var dTime string
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
pos, key, err := scanKey(buf, 0)
@@ -114,9 +126,23 @@ func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
return nil, err
}
// apply precision multiplier
var nsec int64
multiplier := getPrecisionMultiplier(precision)
if len(ts) > 0 && multiplier > 1 {
tsint, err := parseIntBytes(ts, 10, 64)
if err != nil {
return nil, err
}
nsec := multiplier * tsint
ts = []byte(strconv.FormatInt(nsec, 10))
}
m := &metric{
fields: fields,
t: ts,
nsec: nsec,
}
// parse out the measurement name
@@ -300,7 +326,9 @@ func scanTagsValue(buf []byte, i int) (int, int, error) {
func scanFields(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
quoted := false
// track how many '"" we've seen since last '='
quotes := 0
// tracks how many '=' we've seen
equals := 0
@@ -324,13 +352,17 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
// Only quote values in the field value since quotes are not significant
// in the field key
if buf[i] == '"' && equals > commas {
quoted = !quoted
i++
quotes++
if quotes > 2 {
break
}
continue
}
// If we see an =, ensure that there is at least on char before and after it
if buf[i] == '=' && !quoted {
if buf[i] == '=' && quotes != 1 {
quotes = 0
equals++
// check for "... =123" but allow "a\ =123"
@@ -372,18 +404,18 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
}
}
if buf[i] == ',' && !quoted {
if buf[i] == ',' && quotes != 1 {
commas++
}
// reached end of block?
if buf[i] == ' ' && !quoted {
if buf[i] == ' ' && quotes != 1 {
break
}
i++
}
if quoted {
if quotes != 0 && quotes != 2 {
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
}
@@ -621,10 +653,28 @@ func skipWhitespace(buf []byte, i int) int {
}
// makeError is a helper function for making a metric parsing error.
// reason is the reason that the error occured.
// reason is the reason why the error occurred.
// buf should be the current buffer we are parsing.
// i is the current index, to give some context on where in the buffer we are.
func makeError(reason string, buf []byte, i int) error {
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
reason, buf, i)
}
// getPrecisionMultiplier will return a multiplier for the precision specified.
func getPrecisionMultiplier(precision string) int64 {
d := time.Nanosecond
switch precision {
case "u":
d = time.Microsecond
case "ms":
d = time.Millisecond
case "s":
d = time.Second
case "m":
d = time.Minute
case "h":
d = time.Hour
}
return int64(d)
}

View File

@@ -364,6 +364,41 @@ func TestParseNegativeTimestamps(t *testing.T) {
}
}
func TestParsePrecision(t *testing.T) {
for _, tt := range []struct {
line string
precision string
expected int64
}{
{"test v=42 1491847420", "s", 1491847420000000000},
{"test v=42 1491847420123", "ms", 1491847420123000000},
{"test v=42 1491847420123456", "u", 1491847420123456000},
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
} {
metrics, err := ParseWithDefaultTimePrecision(
[]byte(tt.line+"\n"), time.Now(), tt.precision)
assert.NoError(t, err)
assert.Equal(t, tt.expected, metrics[0].UnixNano())
}
}
func TestParsePrecisionUnsetTime(t *testing.T) {
for _, tt := range []struct {
line string
precision string
}{
{"test v=42", "s"},
{"test v=42", "ns"},
} {
_, err := ParseWithDefaultTimePrecision(
[]byte(tt.line+"\n"), time.Now(), tt.precision)
assert.NoError(t, err)
}
}
func TestParseMaxKeyLength(t *testing.T) {
key := ""
for {

View File

@@ -57,7 +57,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
// this for-loop is the sunny-day scenario, where we are given a
// buffer that is large enough to hold at least a single metric.
// all of the cases below it are edge-cases.
if r.metrics[r.iM].Len() < len(p[i:]) {
if r.metrics[r.iM].Len() <= len(p[i:]) {
i += r.metrics[r.iM].SerializeTo(p[i:])
} else {
break
@@ -76,7 +76,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
if len(tmp) > 1 {
r.splitMetrics = tmp
r.state = split
if r.splitMetrics[0].Len() < len(p) {
if r.splitMetrics[0].Len() <= len(p) {
i += r.splitMetrics[0].SerializeTo(p)
r.iSM = 1
} else {
@@ -99,7 +99,7 @@ func (r *reader) Read(p []byte) (n int, err error) {
}
case split:
if r.splitMetrics[r.iSM].Len() < len(p) {
if r.splitMetrics[r.iSM].Len() <= len(p) {
// write the current split metric
i += r.splitMetrics[r.iSM].SerializeTo(p)
r.iSM++
@@ -131,6 +131,10 @@ func (r *reader) Read(p []byte) (n int, err error) {
r.iSM++
if r.iSM == len(r.splitMetrics) {
r.iM++
if r.iM == len(r.metrics) {
r.state = done
return i, io.EOF
}
r.state = normal
} else {
r.state = split

View File

@@ -4,12 +4,13 @@ import (
"io"
"io/ioutil"
"regexp"
"strings"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func BenchmarkMetricReader(b *testing.B) {
@@ -116,6 +117,140 @@ func TestMetricReader_OverflowMetric(t *testing.T) {
}
}
// Regression test for when a metric is the same size as the buffer.
//
// Previously EOF would not be set until the next call to Read.
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{"a": int64(1)}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, m1.Len())
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
// Regression test for when a metric requires to be split and one of the
// split metrics is exactly the size of the buffer.
//
// Previously an empty string would be returned on the next Read without error,
// and then next Read call would panic.
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 30)
// foo a=1i,bb=2i 1481032190000000000\n // len 35
//
// Requires this specific split order:
// foo a=1i 1481032190000000000\n // len 29
// foo bb=2i 1481032190000000000\n // len 30
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
// Regression test for when a metric requires to be split and one of the
// split metrics is larger than the buffer.
//
// Previously the metric index would be set incorrectly causing a panic.
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{
"a": int64(1),
"bbb": int64(2),
}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 30)
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
//
// foo a=1i 1481032190000000000\n // len 29
// foo bbb=2i 1481032190000000000\n // len 31
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
// Regression test for when a split metric exactly fits in the buffer.
//
// Previously the metric would be overflow split when not required.
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 29)
// foo a=1i,b=2i 1481032190000000000\n // len 34
//
// foo a=1i 1481032190000000000\n // len 29
// foo b=2i 1481032190000000000\n // len 29
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
ts := time.Unix(1481032190, 0)
m, _ := New("foo", map[string]string{},
@@ -485,3 +620,94 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
assert.Equal(t, test.err, err, test.expRegex)
}
}
func TestReader_Read(t *testing.T) {
epoch := time.Unix(0, 0)
type args struct {
name string
tags map[string]string
fields map[string]interface{}
t time.Time
mType []telegraf.ValueType
}
tests := []struct {
name string
args args
expected []byte
}{
{
name: "escape backslashes in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\`},
t: epoch,
},
expected: []byte(`cpu value="test\\" 0`),
},
{
name: "escape quote in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test"`},
t: epoch,
},
expected: []byte(`cpu value="test\"" 0`),
},
{
name: "escape quote and backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\"`},
t: epoch,
},
expected: []byte(`cpu value="test\\\"" 0`),
},
{
name: "escape multiple backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\\`},
t: epoch,
},
expected: []byte(`cpu value="test\\\\" 0`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := make([]byte, 512)
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
require.NoError(t, err)
r := NewReader([]telegraf.Metric{m})
num, err := r.Read(buf)
if err != io.EOF {
require.NoError(t, err)
}
line := string(buf[:num])
// This is done so that we can use raw strings in the test spec
noeol := strings.TrimRight(line, "\n")
require.Equal(t, string(tt.expected), noeol)
require.Equal(t, len(tt.expected)+1, num)
})
}
}
func TestMetricRoundtrip(t *testing.T) {
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
`
metrics, err := Parse([]byte(lp))
require.NoError(t, err)
r := NewReader(metrics)
buf := make([]byte, 128)
_, err = r.Read(buf)
require.NoError(t, err)
metrics, err = Parse(buf)
require.NoError(t, err)
}

View File

@@ -1,5 +1,7 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
)

View File

@@ -0,0 +1,43 @@
# BasicStats Aggregator Plugin
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
emitting the aggregate every `period` seconds.
### Configuration:
```toml
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
```
### Measurements & Fields:
- measurement1
- field1_count
- field1_max
- field1_min
- field1_mean
- field1_s2 (variance)
- field1_stdev (standard deviation)
### Tags:
No tags are applied by this aggregator.
### Example Output:
```
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
```

View File

@@ -0,0 +1,155 @@
package basicstats
import (
"math"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type BasicStats struct {
cache map[uint64]aggregate
}
func NewBasicStats() telegraf.Aggregator {
mm := &BasicStats{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]basicstats
name string
tags map[string]string
}
type basicstats struct {
count float64
min float64
max float64
mean float64
M2 float64 //intermedia value for variance/stdev
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *BasicStats) SampleConfig() string {
return sampleConfig
}
func (m *BasicStats) Description() string {
return "Keep the aggregate basicstats of each metric passing through."
}
func (m *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]basicstats),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
continue
}
tmp := m.cache[id].fields[k]
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
//variable initialization
x := fv
mean := tmp.mean
M2 := tmp.M2
//counter compute
n := tmp.count + 1
tmp.count = n
//mean compute
delta := x - mean
mean = mean + delta/n
tmp.mean = mean
//variance/stdev compute
M2 = M2 + delta*(x-mean)
tmp.M2 = M2
//max/min compute
if fv < tmp.min {
tmp.min = fv
} else if fv > tmp.max {
tmp.max = fv
}
//store final data
m.cache[id].fields[k] = tmp
}
}
}
}
func (m *BasicStats) Push(acc telegraf.Accumulator) {
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
fields[k+"_count"] = v.count
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
fields[k+"_mean"] = v.mean
//v.count always >=1
if v.count > 1 {
variance := v.M2 / (v.count - 1)
fields[k+"_s2"] = variance
fields[k+"_stdev"] = math.Sqrt(variance)
}
//if count == 1 StdDev = infinite => so I won't send data
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (m *BasicStats) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
})
}

View File

@@ -0,0 +1,151 @@
package basicstats
import (
"math"
"testing"
"time"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": float64(2),
"d": float64(2),
},
time.Now(),
)
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": float64(4),
"d": float64(6),
"e": float64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"b_count": float64(2), //b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), //c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"d_count": float64(2), //d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(1),
"b_min": float64(1),
"b_mean": float64(1),
"c_count": float64(1), //c
"c_max": float64(2),
"c_min": float64(2),
"c_mean": float64(2),
"d_count": float64(1), //d
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(3),
"b_min": float64(3),
"b_mean": float64(3),
"c_count": float64(1), //c
"c_max": float64(4),
"c_min": float64(4),
"c_mean": float64(4),
"d_count": float64(1), //d
"d_max": float64(6),
"d_min": float64(6),
"d_mean": float64(6),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View File

@@ -0,0 +1,97 @@
# Histogram Aggregator Plugin
The histogram aggregator plugin creates histograms containing the counts of
field values within a range.
Values added to a bucket are also added to the larger buckets in the
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
Bucket counts however are not reset between periods and will be non-strictly
increasing while Telegraf is running.
#### Design
Each metric is passed to the aggregator and this aggregator searches
histogram buckets for those fields, which have been specified in the
config. If buckets are found, the aggregator will increment +1 to the appropriate
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
seconds this data will be forwarded to the outputs.
The algorithm of hit counting to buckets was implemented on the base
of the algorithm which is implemented in the Prometheus
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
### Configuration
```toml
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
```
The user is responsible for defining the bounds of the histogram bucket as
well as the measurement name and fields to aggregate.
Each histogram config section must contain a `buckets` and `measurement_name`
option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
The `buckets` option contains a list of floats which specify the bucket
boundaries. Each float value defines the inclusive upper bound of the bucket.
The `+Inf` bucket is added automatically and does not need to be defined.
### Measurements & Fields:
The postfix `bucket` will be added to each field key.
- measurement1
- field1_bucket
- field2_bucket
### Tags:
All measurements are given the tag `le`. This tag has the border value of
bucket. It means that the metric value is less than or equal to the value of
this tag. For example, let assume that we have the metric value 10 and the
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
10, because the metrics value is passed into bucket with right border value
`10`.
### Example Output:
```
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000
```

View File

@@ -0,0 +1,315 @@
package histogram
import (
"sort"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
// bucketTag is the tag, which contains right bucket border
const bucketTag = "le"
// bucketInf is the right bucket border for infinite values
const bucketInf = "+Inf"
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
type HistogramAggregator struct {
Configs []config `toml:"config"`
buckets bucketsByMetrics
cache map[uint64]metricHistogramCollection
}
// config is the config, which contains name, field of metric and histogram buckets.
type config struct {
Metric string `toml:"measurement_name"`
Fields []string `toml:"fields"`
Buckets buckets `toml:"buckets"`
}
// bucketsByMetrics contains the buckets grouped by metric and field name
type bucketsByMetrics map[string]bucketsByFields
// bucketsByFields contains the buckets grouped by field name
type bucketsByFields map[string]buckets
// buckets contains the right borders buckets
type buckets []float64
// metricHistogramCollection aggregates the histogram data
type metricHistogramCollection struct {
histogramCollection map[string]counts
name string
tags map[string]string
}
// counts is the number of hits in the bucket
type counts []int64
// groupedByCountFields contains grouped fields by their count and fields values
type groupedByCountFields struct {
name string
tags map[string]string
fieldsWithCount map[string]int64
}
// NewHistogramAggregator creates new histogram aggregator
func NewHistogramAggregator() telegraf.Aggregator {
h := &HistogramAggregator{}
h.buckets = make(bucketsByMetrics)
h.resetCache()
return h
}
var sampleConfig = `
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
`
// SampleConfig returns sample of config
func (h *HistogramAggregator) SampleConfig() string {
return sampleConfig
}
// Description returns description of aggregator plugin
func (h *HistogramAggregator) Description() string {
return "Create aggregate histograms."
}
// Add adds new hit to the buckets
func (h *HistogramAggregator) Add(in telegraf.Metric) {
bucketsByField := make(map[string][]float64)
for field := range in.Fields() {
buckets := h.getBuckets(in.Name(), field)
if buckets != nil {
bucketsByField[field] = buckets
}
}
if len(bucketsByField) == 0 {
return
}
id := in.HashID()
agr, ok := h.cache[id]
if !ok {
agr = metricHistogramCollection{
name: in.Name(),
tags: in.Tags(),
histogramCollection: make(map[string]counts),
}
}
for field, value := range in.Fields() {
if buckets, ok := bucketsByField[field]; ok {
if agr.histogramCollection[field] == nil {
agr.histogramCollection[field] = make(counts, len(buckets)+1)
}
if value, ok := convert(value); ok {
index := sort.SearchFloat64s(buckets, value)
agr.histogramCollection[field][index]++
}
}
}
h.cache[id] = agr
}
// Push returns histogram values for metrics
func (h *HistogramAggregator) Push(acc telegraf.Accumulator) {
metricsWithGroupedFields := []groupedByCountFields{}
for _, aggregate := range h.cache {
for field, counts := range aggregate.histogramCollection {
h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts)
}
}
for _, metric := range metricsWithGroupedFields {
acc.AddFields(metric.name, makeFieldsWithCount(metric.fieldsWithCount), metric.tags)
}
}
// groupFieldsByBuckets groups fields by metric buckets which are represented as tags
func (h *HistogramAggregator) groupFieldsByBuckets(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
tags map[string]string,
counts []int64,
) {
count := int64(0)
for index, bucket := range h.getBuckets(name, field) {
count += counts[index]
tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64)
h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags))
}
count += counts[len(counts)-1]
tags[bucketTag] = bucketInf
h.groupField(metricsWithGroupedFields, name, field, count, tags)
}
// groupField groups field by count value
func (h *HistogramAggregator) groupField(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
count int64,
tags map[string]string,
) {
for key, metric := range *metricsWithGroupedFields {
if name == metric.name && isTagsIdentical(tags, metric.tags) {
(*metricsWithGroupedFields)[key].fieldsWithCount[field] = count
return
}
}
fieldsWithCount := map[string]int64{
field: count,
}
*metricsWithGroupedFields = append(
*metricsWithGroupedFields,
groupedByCountFields{name: name, tags: tags, fieldsWithCount: fieldsWithCount},
)
}
// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has
// small value, we will get a histogram with a small amount of the distribution.
func (h *HistogramAggregator) Reset() {}
// resetCache resets cached counts(hits) in the buckets
func (h *HistogramAggregator) resetCache() {
h.cache = make(map[uint64]metricHistogramCollection)
}
// getBuckets finds buckets and returns them
func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 {
if buckets, ok := h.buckets[metric][field]; ok {
return buckets
}
for _, config := range h.Configs {
if config.Metric == metric {
if !isBucketExists(field, config) {
continue
}
if _, ok := h.buckets[metric]; !ok {
h.buckets[metric] = make(bucketsByFields)
}
h.buckets[metric][field] = sortBuckets(config.Buckets)
}
}
return h.buckets[metric][field]
}
// isBucketExists checks if buckets exists for the passed field
func isBucketExists(field string, cfg config) bool {
if len(cfg.Fields) == 0 {
return true
}
for _, fl := range cfg.Fields {
if fl == field {
return true
}
}
return false
}
// sortBuckets sorts the buckets if it is needed
func sortBuckets(buckets []float64) []float64 {
for i, bucket := range buckets {
if i < len(buckets)-1 && bucket >= buckets[i+1] {
sort.Float64s(buckets)
break
}
}
return buckets
}
// convert converts interface to concrete type
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
// copyTags copies tags
func copyTags(tags map[string]string) map[string]string {
copiedTags := map[string]string{}
for key, val := range tags {
copiedTags[key] = val
}
return copiedTags
}
// isTagsIdentical checks the identity of two list of tags
func isTagsIdentical(originalTags, checkedTags map[string]string) bool {
if len(originalTags) != len(checkedTags) {
return false
}
for tagName, tagValue := range originalTags {
if tagValue != checkedTags[tagName] {
return false
}
}
return true
}
// makeFieldsWithCount assigns count value to all metric fields
func makeFieldsWithCount(fieldsWithCountIn map[string]int64) map[string]interface{} {
fieldsWithCountOut := map[string]interface{}{}
for field, count := range fieldsWithCountIn {
fieldsWithCountOut[field+"_bucket"] = count
}
return fieldsWithCountOut
}
// init initializes histogram aggregator plugin
func init() {
aggregators.Add("histogram", func() telegraf.Aggregator {
return NewHistogramAggregator()
})
}

View File

@@ -0,0 +1,210 @@
package histogram
import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
// NewTestHistogram creates new test histogram aggregation with specified config
func NewTestHistogram(cfg []config) telegraf.Aggregator {
htm := &HistogramAggregator{Configs: cfg}
htm.buckets = make(bucketsByMetrics)
htm.resetCache()
return htm
}
// firstMetric1 is the first test metric
var firstMetric1, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.3),
"b": float64(40),
},
time.Now(),
)
// firstMetric1 is the first test metric with other value
var firstMetric2, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.9),
"c": float64(40),
},
time.Now(),
)
// secondMetric is the second metric
var secondMetric, _ = metric.New(
"second_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(105),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
// BenchmarkApply runs benchmarks
func BenchmarkApply(b *testing.B) {
histogram := NewHistogramAggregator()
for n := 0; n < b.N; n++ {
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
}
}
// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
func TestHistogramWithPeriodAndOneField(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Push(acc)
if len(acc.Metrics) != 6 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf)
}
// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields
func TestHistogramWithPeriodAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
histogram.Push(acc)
if len(acc.Metrics) != 12 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf)
}
// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
// getting added in different periods) for all fields
func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf)
acc.ClearMetrics()
histogram.Add(firstMetric2)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
}
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
func TestWrongBucketsOrder(t *testing.T) {
defer func() {
if r := recover(); r != nil {
assert.Equal(
t,
"histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a",
fmt.Sprint(r),
)
}
}()
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
histogram.Add(firstMetric2)
}
// assertContainsTaggedField is help functions to test histogram data
func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) {
acc.Lock()
defer acc.Unlock()
for _, checkedMetric := range acc.Metrics {
// check metric name
if checkedMetric.Measurement != metricName {
continue
}
// check "le" tag
if checkedMetric.Tags[bucketTag] != le {
continue
}
// check fields
isFieldsIdentical := true
for field := range fields {
if _, ok := checkedMetric.Fields[field]; !ok {
isFieldsIdentical = false
break
}
}
if !isFieldsIdentical {
continue
}
// check fields with their counts
if assert.Equal(t, fields, checkedMetric.Fields) {
return
}
assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName))
}
assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields))
}

View File

@@ -1,45 +1,61 @@
# Example Input Plugin
The example plugin gathers metrics about example things
The example plugin gathers metrics about example things. This description
explains at a high level what the plugin does and provides links to where
additional information can be found.
### Configuration:
This section contains the default TOML to configure the plugin. You can
generate it using `telegraf --usage <plugin-name>`.
```toml
# Description
[[inputs.example]]
# SampleConfig
example_option = "example_value"
```
### Measurements & Fields:
### Metrics:
<optional description>
Here you should add an optional description and links to where the user can
get more information about the measurements.
If the output is determined dynamically based on the input source, or there
are more metrics than can reasonably be listed, describe how the input is
mapped to the output.
- measurement1
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- field3 (integer, bytes)
### Tags:
- All measurements have the following tags:
- tags:
- tag1 (optional description)
- tag2
- measurement2 has the following tags:
- fields:
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- tags:
- tag3
- fields:
- field3 (integer, bytes)
### Sample Queries:
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
This section should contain some useful InfluxDB queries that can be used to
get started with the plugin or to generate dashboards. For each query listed,
describe at a high level what data is returned.
Get the max, mean, and min for the measurement in the last hour:
```
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
```
### Example Output:
This section shows example output in Line Protocol format. You can often use
`telegraf --input-filter <plugin-name> --test` or use the `file` output to get
this information.
```
$ ./telegraf -config telegraf.conf -input-filter example -test
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
```

File diff suppressed because one or more lines are too long

View File

@@ -10,7 +10,6 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
as "github.com/aerospike/aerospike-client-go"
@@ -41,17 +40,16 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
}
var wg sync.WaitGroup
errChan := errchan.New(len(a.Servers))
wg.Add(len(a.Servers))
for _, server := range a.Servers {
go func(serv string) {
defer wg.Done()
errChan.C <- a.gatherServer(serv, acc)
acc.AddError(a.gatherServer(serv, acc))
}(server)
}
wg.Wait()
return errChan.Error()
return nil
}
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
@@ -75,10 +73,9 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
for _, n := range nodes {
tags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
fields := map[string]interface{}{
"node_name": n.GetName(),
}
fields := make(map[string]interface{})
stats, err := as.RequestNodeStats(n)
if err != nil {
return err
@@ -88,7 +85,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if err == nil {
fields[strings.Replace(k, "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v)
}
}
acc.AddFields("aerospike_node", fields, tags, time.Now())
@@ -102,11 +99,10 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
for _, namespace := range namespaces {
nTags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
nTags["namespace"] = namespace
nFields := map[string]interface{}{
"node_name": n.GetName(),
}
nFields := make(map[string]interface{})
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
if err != nil {
continue
@@ -121,7 +117,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if err == nil {
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1])
}
}
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())

View File

@@ -19,12 +19,14 @@ func TestAerospikeStatistics(t *testing.T) {
var acc testutil.Accumulator
err := a.Gather(&acc)
err := acc.GatherError(a.Gather)
require.NoError(t, err)
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasTag("aerospike_node", "node_name"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestAerospikeStatisticsPartialErr(t *testing.T) {
@@ -41,12 +43,11 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
var acc testutil.Accumulator
err := a.Gather(&acc)
require.Error(t, err)
require.Error(t, acc.GatherError(a.Gather))
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestAerospikeParseValue(t *testing.T) {

View File

@@ -5,6 +5,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
@@ -14,13 +15,17 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
_ "github.com/influxdata/telegraf/plugins/inputs/dcos"
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
@@ -29,10 +34,14 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
@@ -40,19 +49,25 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
_ "github.com/influxdata/telegraf/plugins/inputs/postfix"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
@@ -64,22 +79,30 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/smart"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/unbound"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
_ "github.com/influxdata/telegraf/plugins/inputs/win_services"
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
_ "github.com/influxdata/telegraf/plugins/inputs/zipkin"
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
)

View File

@@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}

View File

@@ -1,55 +1,84 @@
# Telegraf plugin: Apache
# Apache Input Plugin
#### Plugin arguments:
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
- **username** string: Username for HTTP basic authentication
- **password** string: Password for HTTP basic authentication
- **timeout** duration: time that the HTTP connection will remain waiting for response. Default 4 seconds ("4s")
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
##### Optional SSL Config
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documenation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
- **ssl_ca** string: the full path for the SSL CA certicate
- **ssl_cert** string: the full path for the SSL certificate
- **ssl_key** string: the full path for the key file
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
### Configuration:
#### Description
```toml
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
The Apache plugin collects from the /server-status?auto URL. See
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
example. And
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
mod_status documentation.
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
# Measurements:
## Maximum time to receive response.
# response_timeout = "5s"
Meta:
- tags: `port=<port>`, `server=url`
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
```
- apache_TotalAccesses
- apache_TotalkBytes
- apache_CPULoad
- apache_Uptime
- apache_ReqPerSec
- apache_BytesPerSec
- apache_BytesPerReq
- apache_BusyWorkers
- apache_IdleWorkers
- apache_ConnsTotal
- apache_ConnsAsyncWriting
- apache_ConnsAsyncKeepAlive
- apache_ConnsAsyncClosing
### Measurements & Fields:
### Scoreboard measurements
- apache
- BusyWorkers (float)
- BytesPerReq (float)
- BytesPerSec (float)
- ConnsAsyncClosing (float)
- ConnsAsyncKeepAlive (float)
- ConnsAsyncWriting (float)
- ConnsTotal (float)
- CPUChildrenSystem (float)
- CPUChildrenUser (float)
- CPULoad (float)
- CPUSystem (float)
- CPUUser (float)
- IdleWorkers (float)
- Load1 (float)
- Load5 (float)
- Load15 (float)
- ParentServerConfigGeneration (float)
- ParentServerMPMGeneration (float)
- ReqPerSec (float)
- ServerUptimeSeconds (float)
- TotalAccesses (float)
- TotalkBytes (float)
- Uptime (float)
- apache_scboard_waiting
- apache_scboard_starting
- apache_scboard_reading
- apache_scboard_sending
- apache_scboard_keepalive
- apache_scboard_dnslookup
- apache_scboard_closing
- apache_scboard_logging
- apache_scboard_finishing
- apache_scboard_idle_cleanup
- apache_scboard_open
The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state:
- apache
- scboard_closing (float)
- scboard_dnslookup (float)
- scboard_finishing (float)
- scboard_idle_cleanup (float)
- scboard_keepalive (float)
- scboard_logging (float)
- scboard_open (float)
- scboard_reading (float)
- scboard_sending (float)
- scboard_starting (float)
- scboard_waiting (float)
### Tags:
- All measurements have the following tags:
- port
- server
### Example Output:
```
apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
```

View File

@@ -8,6 +8,7 @@ import (
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
@@ -28,18 +29,22 @@ type Apache struct {
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
client *http.Client
}
var sampleConfig = `
## An array of Apache status URI to gather stats.
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## user credentials for basic HTTP authentication
username = "myuser"
password = "mypassword"
## Timeout to the complete conection and reponse time in seconds
response_timeout = "25s" ## default to 5 seconds
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
@@ -65,55 +70,51 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
n.ResponseTimeout.Duration = time.Second * 5
}
var outerr error
var errch = make(chan error)
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
}
go func(addr *url.URL) {
errch <- n.gatherUrl(addr, acc)
}(addr)
}
// Drain channel, waiting for all requests to finish and save last error.
for range n.Urls {
if err := <-errch; err != nil {
outerr = err
}
}
return outerr
}
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
var tr *http.Transport
if addr.Scheme == "https" {
tlsCfg, err := internal.GetTLSConfig(
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
if n.client == nil {
client, err := n.createHttpClient()
if err != nil {
return err
}
tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
TLSClientConfig: tlsCfg,
}
} else {
tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
n.client = client
}
var wg sync.WaitGroup
wg.Add(len(n.Urls))
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
go func(addr *url.URL) {
defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
}(addr)
}
wg.Wait()
return nil
}
func (n *Apache) createHttpClient() (*http.Client, error) {
tlsCfg, err := internal.GetTLSConfig(
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: tr,
Timeout: n.ResponseTimeout.Duration,
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: n.ResponseTimeout.Duration,
}
return client, nil
}
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
req, err := http.NewRequest("GET", addr.String(), nil)
if err != nil {
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
@@ -123,7 +124,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
req.SetBasicAuth(n.Username, n.Password)
}
resp, err := client.Do(req)
resp, err := n.client.Do(req)
if err != nil {
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
}

View File

@@ -41,7 +41,7 @@ func TestHTTPApache(t *testing.T) {
}
var acc testutil.Accumulator
err := a.Gather(&acc)
err := acc.GatherError(a.Gather)
require.NoError(t, err)
fields := map[string]interface{}{

View File

@@ -70,7 +70,7 @@ Using this configuration:
When run with:
```
./telegraf -config telegraf.conf -input-filter bcache -test
./telegraf --config telegraf.conf --input-filter bcache --test
```
It produces:

View File

@@ -0,0 +1,85 @@
# Bond Input Plugin
The Bond Input plugin collects network bond interface status, bond's slaves interfaces
status and failures count of bond's slaves interfaces.
The plugin collects these metrics from `/proc/net/bonding/*` files.
### Configuration:
```toml
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
```
### Measurements & Fields:
- bond
- active_slave (for active-backup mode)
- status
- bond_slave
- failures
- status
### Description:
```
active_slave
Currently active slave interface for active-backup mode.
status
Status of bond interface or bonds's slave interface (down = 0, up = 1).
failures
Amount of failures for bond's slave interface.
```
### Tags:
- bond
- bond
- bond_slave
- bond
- interface
### Example output:
Configuration:
```
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
bond_interfaces = ["bond0", "bond1"]
```
Run:
```
telegraf --config telegraf.conf --input-filter bond --test
```
Output:
```
* Plugin: inputs.bond, Collection 1
> bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000
> bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000
> bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000
> bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000
```

204
plugins/inputs/bond/bond.go Normal file
View File

@@ -0,0 +1,204 @@
package bond
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// default host proc path
const defaultHostProc = "/proc"
// env host proc variable name
const envProc = "HOST_PROC"
type Bond struct {
HostProc string `toml:"host_proc"`
BondInterfaces []string `toml:"bond_interfaces"`
}
var sampleConfig = `
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
`
func (bond *Bond) Description() string {
return "Collect bond interface status, slaves statuses and failures count"
}
func (bond *Bond) SampleConfig() string {
return sampleConfig
}
func (bond *Bond) Gather(acc telegraf.Accumulator) error {
// load proc path, get default value if config value and env variable are empty
bond.loadPath()
// list bond interfaces from bonding directory or gather all interfaces.
bondNames, err := bond.listInterfaces()
if err != nil {
return err
}
for _, bondName := range bondNames {
bondAbsPath := bond.HostProc + "/net/bonding/" + bondName
file, err := ioutil.ReadFile(bondAbsPath)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err))
continue
}
rawFile := strings.TrimSpace(string(file))
err = bond.gatherBondInterface(bondName, rawFile, acc)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err))
}
}
return nil
}
func (bond *Bond) gatherBondInterface(bondName string, rawFile string, acc telegraf.Accumulator) error {
splitIndex := strings.Index(rawFile, "Slave Interface:")
if splitIndex == -1 {
splitIndex = len(rawFile)
}
bondPart := rawFile[:splitIndex]
slavePart := rawFile[splitIndex:]
err := bond.gatherBondPart(bondName, bondPart, acc)
if err != nil {
return err
}
err = bond.gatherSlavePart(bondName, slavePart, acc)
if err != nil {
return err
}
return nil
}
func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.Accumulator) error {
fields := make(map[string]interface{})
tags := map[string]string{
"bond": bondName,
}
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Currently Active Slave") {
fields["active_slave"] = value
}
if strings.Contains(name, "MII Status") {
fields["status"] = 0
if value == "up" {
fields["status"] = 1
}
acc.AddFields("bond", fields, tags)
return nil
}
}
if err := scanner.Err(); err != nil {
return err
}
return fmt.Errorf("Couldn't find status info for '%s' ", bondName)
}
func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error {
var slave string
var status int
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Slave Interface") {
slave = value
}
if strings.Contains(name, "MII Status") {
status = 0
if value == "up" {
status = 1
}
}
if strings.Contains(name, "Link Failure Count") {
count, err := strconv.Atoi(value)
if err != nil {
return err
}
fields := map[string]interface{}{
"status": status,
"failures": count,
}
tags := map[string]string{
"bond": bondName,
"interface": slave,
}
acc.AddFields("bond_slave", fields, tags)
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
// loadPath can be used to read path firstly from config
// if it is empty then try read from env variable
func (bond *Bond) loadPath() {
if bond.HostProc == "" {
bond.HostProc = proc(envProc, defaultHostProc)
}
}
// proc can be used to read file paths from env
func proc(env, path string) string {
// try to read full file path
if p := os.Getenv(env); p != "" {
return p
}
// return default path
return path
}
func (bond *Bond) listInterfaces() ([]string, error) {
var interfaces []string
if len(bond.BondInterfaces) > 0 {
interfaces = bond.BondInterfaces
} else {
paths, err := filepath.Glob(bond.HostProc + "/net/bonding/*")
if err != nil {
return nil, err
}
for _, p := range paths {
interfaces = append(interfaces, filepath.Base(p))
}
}
return interfaces, nil
}
func init() {
inputs.Add("bond", func() telegraf.Input {
return &Bond{}
})
}

View File

@@ -0,0 +1,77 @@
package bond
import (
"testing"
"github.com/influxdata/telegraf/testutil"
)
var sampleTest802 = `
Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008)
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
Transmit Hash Policy: layer2 (0)
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
802.3ad info
LACP rate: fast
Aggregator selection policy (ad_select): stable
bond bond0 has no active aggregator
Slave Interface: eth1
MII Status: up
Link Failure Count: 0
Permanent HW addr: 00:0c:29:f5:b7:11
Aggregator ID: N/A
Slave Interface: eth2
MII Status: up
Link Failure Count: 3
Permanent HW addr: 00:0c:29:f5:b7:1b
Aggregator ID: N/A
`
var sampleTestAB = `
Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: eth2 (primary_reselect always)
Currently Active Slave: eth2
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth3
MII Status: down
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 2
Permanent HW addr:
Slave queue ID: 0
Slave Interface: eth2
MII Status: up
Speed: 100 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr:
`
func TestGatherBondInterface(t *testing.T) {
var acc testutil.Accumulator
bond := &Bond{}
bond.gatherBondInterface("bond802", sampleTest802, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"})
bond.gatherBondInterface("bondAB", sampleTestAB, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"})
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
@@ -123,8 +122,8 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
}
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
} else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
j.metric, out)
j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
j.metric, out))
}
}
@@ -155,8 +154,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
addCassandraMetric(k, c, v.(map[string]interface{}))
}
} else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out)
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out))
return
}
} else {
@@ -164,8 +163,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
c, values.(map[string]interface{}))
} else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out)
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out))
return
}
}
@@ -274,8 +273,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
m = newCassandraMetric(serverTokens["host"], metric, acc)
} else {
// unsupported metric type
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
metric)
acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping",
metric))
continue
}
@@ -283,7 +282,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
serverTokens["port"] + context + metric)
if err != nil {
return err
acc.AddError(err)
continue
}
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
requestUrl.User = url.UserPassword(serverTokens["user"],
@@ -291,8 +291,12 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
}
out, err := c.getAttr(requestUrl)
if err != nil {
acc.AddError(err)
continue
}
if out["status"] != 200.0 {
fmt.Printf("URL returned with status %v\n", out["status"])
acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl))
continue
}
m.addTagsFields(out)

View File

@@ -151,7 +151,7 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
var acc testutil.Accumulator
acc.SetDebug(true)
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics))
@@ -180,7 +180,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
var acc testutil.Accumulator
acc.SetDebug(true)
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics))
@@ -197,16 +197,17 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
}
// Test that the proper values are ignored or collected
func TestHttpJsonOn404(t *testing.T) {
func TestHttp404(t *testing.T) {
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
[]string{HeapMetric})
var acc testutil.Accumulator
err := jolokia.Gather(&acc)
err := acc.GatherError(jolokia.Gather)
assert.Nil(t, err)
assert.Error(t, err)
assert.Equal(t, 0, len(acc.Metrics))
assert.Contains(t, err.Error(), "has status code 404")
}
// Test that the proper values are ignored or collected for class=Cassandra
@@ -214,7 +215,7 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
var acc testutil.Accumulator
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 1, len(acc.Metrics))
@@ -246,7 +247,7 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
var acc testutil.Accumulator
acc.SetDebug(true)
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics))

View File

@@ -200,7 +200,7 @@ All measurements will have the following tags:
*Admin Socket Stats*
<pre>
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
telegraf --config /etc/telegraf/telegraf.conf --config-directory /etc/telegraf/telegraf.d --input-filter ceph --test
* Plugin: ceph, Collection 1
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219

View File

@@ -101,12 +101,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
for _, s := range sockets {
dump, err := perfDump(c.CephBinary, s)
if err != nil {
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
continue
}
data, err := parseDump(dump)
if err != nil {
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
continue
}
for tag, metrics := range data {

View File

@@ -26,7 +26,7 @@ func TestParseSockId(t *testing.T) {
func TestParseMonDump(t *testing.T) {
dump, err := parseDump(monPerfDump)
assert.NoError(t, err)
assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
}

View File

@@ -22,10 +22,11 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error {
for dir := range list {
if dir.err != nil {
return dir.err
acc.AddError(dir.err)
continue
}
if err := g.gatherDir(dir.path, acc); err != nil {
return err
acc.AddError(err)
}
}
@@ -224,7 +225,7 @@ var fileFormats = [...]fileFormat{
}
func numberOrString(s string) interface{} {
i, err := strconv.Atoi(s)
i, err := strconv.ParseInt(s, 10, 64)
if err == nil {
return i
}

View File

@@ -24,24 +24,24 @@ var cg1 = &CGroup{
func TestCgroupStatistics_1(t *testing.T) {
var acc testutil.Accumulator
err := cg1.Gather(&acc)
err := acc.GatherError(cg1.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.stat.cache": 1739362304123123123,
"memory.stat.rss": 1775325184,
"memory.stat.rss_huge": 778043392,
"memory.stat.mapped_file": 421036032,
"memory.stat.dirty": -307200,
"memory.max_usage_in_bytes.0": 0,
"memory.max_usage_in_bytes.1": -1,
"memory.max_usage_in_bytes.2": 2,
"memory.limit_in_bytes": 223372036854771712,
"memory.stat.cache": int64(1739362304123123123),
"memory.stat.rss": int64(1775325184),
"memory.stat.rss_huge": int64(778043392),
"memory.stat.mapped_file": int64(421036032),
"memory.stat.dirty": int64(-307200),
"memory.max_usage_in_bytes.0": int64(0),
"memory.max_usage_in_bytes.1": int64(-1),
"memory.max_usage_in_bytes.2": int64(2),
"memory.limit_in_bytes": int64(223372036854771712),
"memory.use_hierarchy": "12-781",
"notify_on_release": 0,
"notify_on_release": int64(0),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -56,17 +56,17 @@ var cg2 = &CGroup{
func TestCgroupStatistics_2(t *testing.T) {
var acc testutil.Accumulator
err := cg2.Gather(&acc)
err := acc.GatherError(cg2.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/cpu",
}
fields := map[string]interface{}{
"cpuacct.usage_percpu.0": -1452543795404,
"cpuacct.usage_percpu.1": 1376681271659,
"cpuacct.usage_percpu.2": 1450950799997,
"cpuacct.usage_percpu.3": -1473113374257,
"cpuacct.usage_percpu.0": int64(-1452543795404),
"cpuacct.usage_percpu.1": int64(1376681271659),
"cpuacct.usage_percpu.2": int64(1450950799997),
"cpuacct.usage_percpu.3": int64(-1473113374257),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -81,14 +81,14 @@ var cg3 = &CGroup{
func TestCgroupStatistics_3(t *testing.T) {
var acc testutil.Accumulator
err := cg3.Gather(&acc)
err := acc.GatherError(cg3.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -108,14 +108,14 @@ var cg4 = &CGroup{
func TestCgroupStatistics_4(t *testing.T) {
var acc testutil.Accumulator
err := cg4.Gather(&acc)
err := acc.GatherError(cg4.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -140,14 +140,14 @@ var cg5 = &CGroup{
func TestCgroupStatistics_5(t *testing.T) {
var acc testutil.Accumulator
err := cg5.Gather(&acc)
err := acc.GatherError(cg5.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -167,16 +167,16 @@ var cg6 = &CGroup{
func TestCgroupStatistics_6(t *testing.T) {
var acc testutil.Accumulator
err := cg6.Gather(&acc)
err := acc.GatherError(cg6.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.usage_in_bytes": 3513667584,
"memory.usage_in_bytes": int64(3513667584),
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": 9223372036854771712,
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}

View File

@@ -63,6 +63,7 @@ Delete second or Not synchronised.
### Measurements & Fields:
- chrony
- system_time (float, seconds)
- last_offset (float, seconds)
- rms_offset (float, seconds)
- frequency (float, ppm)
@@ -82,9 +83,9 @@ Delete second or Not synchronised.
### Example Output:
```
$ telegraf -config telegraf.conf -input-filter chrony -test
$ telegraf --config telegraf.conf --input-filter chrony --test
* Plugin: chrony, Collection 1
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
```

View File

@@ -1,5 +1,3 @@
// +build linux
package chrony
import (
@@ -92,7 +90,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
}
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
// ignore reference time
if strings.Contains(name, "time") {
if strings.Contains(name, "ref_time") {
continue
}
valueFields := strings.Fields(stats[1])

View File

@@ -1,3 +0,0 @@
// +build !linux
package chrony

View File

@@ -1,5 +1,3 @@
// +build linux
package chrony
import (
@@ -31,6 +29,7 @@ func TestGather(t *testing.T) {
"stratum": "3",
}
fields := map[string]interface{}{
"system_time": 0.000020390,
"last_offset": 0.000012651,
"rms_offset": 0.000025577,
"frequency": -16.001,

View File

@@ -9,8 +9,8 @@ API endpoint. In the following order the plugin will attempt to authenticate.
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
3. Shared profile from `profile` attribute
4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
4. [Environment Variables](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#environment-variables)
5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file)
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
### Configuration:
@@ -20,9 +20,24 @@ API endpoint. In the following order the plugin will attempt to authenticate.
## Amazon Region (required)
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Assumed credentials via STS if role_arn is specified
## 2) explicit credentials from 'access_key' and 'secret_key'
## 3) shared profile from 'profile'
## 4) environment variables
## 5) shared credentials file
## 6) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#profile = ""
#shared_credential_file = ""
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
@@ -42,9 +57,10 @@ API endpoint. In the following order the plugin will attempt to authenticate.
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 10. Optional - default value is 10.
ratelimit = 10
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 400. Optional - default value is 200.
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
ratelimit = 200
## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided
@@ -56,10 +72,6 @@ API endpoint. In the following order the plugin will attempt to authenticate.
[[inputs.cloudwatch.metrics.dimensions]]
name = "LoadBalancerName"
value = "p-example"
[[inputs.cloudwatch.metrics.dimensions]]
name = "AvailabilityZone"
value = "*"
```
#### Requirements and Terminology
@@ -133,6 +145,6 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter cloudwatch -test
$ ./telegraf --config telegraf.conf --input-filter cloudwatch --test
> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
```

View File

@@ -13,7 +13,6 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
internalaws "github.com/influxdata/telegraf/internal/config/aws"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/internal/limiter"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -82,7 +81,7 @@ func (c *CloudWatch) SampleConfig() string {
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
@@ -93,7 +92,7 @@ func (c *CloudWatch) SampleConfig() string {
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = "5m"
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = "5m"
@@ -105,9 +104,10 @@ func (c *CloudWatch) SampleConfig() string {
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 10. Optional - default value is 10.
ratelimit = 10
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 400. Optional - default value is 200.
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
ratelimit = 200
## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided
@@ -185,8 +185,6 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
if err != nil {
return err
}
metricCount := len(metrics)
errChan := errchan.New(metricCount)
now := time.Now()
@@ -201,12 +199,12 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
<-lmtr.C
go func(inm *cloudwatch.Metric) {
defer wg.Done()
c.gatherMetric(acc, inm, now, errChan.C)
acc.AddError(c.gatherMetric(acc, inm, now))
}(m)
}
wg.Wait()
return errChan.Error()
return nil
}
func init() {
@@ -214,7 +212,7 @@ func init() {
ttl, _ := time.ParseDuration("1hr")
return &CloudWatch{
CacheTTL: internal.Duration{Duration: ttl},
RateLimit: 10,
RateLimit: 200,
}
})
}
@@ -284,13 +282,11 @@ func (c *CloudWatch) gatherMetric(
acc telegraf.Accumulator,
metric *cloudwatch.Metric,
now time.Time,
errChan chan error,
) {
) error {
params := c.getStatisticsInput(metric, now)
resp, err := c.client.GetMetricStatistics(params)
if err != nil {
errChan <- err
return
return err
}
for _, point := range resp.Datapoints {
@@ -325,7 +321,7 @@ func (c *CloudWatch) gatherMetric(
acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
}
errChan <- nil
return nil
}
/*

View File

@@ -58,13 +58,13 @@ func TestGather(t *testing.T) {
Namespace: "AWS/ELB",
Delay: internalDuration,
Period: internalDuration,
RateLimit: 10,
RateLimit: 200,
}
var acc testutil.Accumulator
c.client = &mockGatherCloudWatchClient{}
c.Gather(&acc)
acc.GatherError(c.Gather)
fields := map[string]interface{}{}
fields["latency_minimum"] = 0.1
@@ -146,7 +146,7 @@ func TestSelectMetrics(t *testing.T) {
Namespace: "AWS/ELB",
Delay: internalDuration,
Period: internalDuration,
RateLimit: 10,
RateLimit: 200,
Metrics: []*Metric{
&Metric{
MetricNames: []string{"Latency", "RequestCount"},
@@ -207,14 +207,13 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
}
func TestMetricsCacheTimeout(t *testing.T) {
ttl, _ := time.ParseDuration("5ms")
cache := &MetricCache{
Metrics: []*cloudwatch.Metric{},
Fetched: time.Now(),
TTL: ttl,
TTL: time.Minute,
}
assert.True(t, cache.IsValid())
time.Sleep(ttl)
cache.Fetched = time.Now().Add(-time.Minute)
assert.False(t, cache.IsValid())
}

View File

@@ -51,6 +51,6 @@ This input does not use tags.
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter conntrack -test
$ ./telegraf --config telegraf.conf --input-filter conntrack --test
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
```

View File

@@ -11,7 +11,6 @@ import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"log"
"path/filepath"
)
@@ -93,15 +92,15 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
contents, err := ioutil.ReadFile(fName)
if err != nil {
log.Printf("E! failed to read file '%s': %v", fName, err)
acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err))
continue
}
v := strings.TrimSpace(string(contents))
fields[metricKey], err = strconv.ParseFloat(v, 64)
if err != nil {
log.Printf("E! failed to parse metric, expected number but "+
" found '%s': %v", v, err)
acc.AddError(fmt.Errorf("E! failed to parse metric, expected number but "+
" found '%s': %v", v, err))
}
}
}

View File

@@ -1,6 +1,6 @@
# Telegraf Input Plugin: Consul
This plugin will collect statistics about all helath checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
This plugin will collect statistics about all health checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
## Configuration:
@@ -46,7 +46,7 @@ the health check at this sample.
## Example output
```
$ telegraf --config ./telegraf.conf -input-filter consul -test
$ telegraf --config ./telegraf.conf --input-filter consul --test
* Plugin: consul, Collection 1
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036

View File

@@ -69,6 +69,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
config.Datacenter = c.Datacentre
}
if c.Token != "" {
config.Token = c.Token
}
if c.Username != "" {
config.HttpAuth = &api.HttpBasicAuth{
Username: c.Username,

View File

@@ -20,7 +20,7 @@ var sampleChecks = []*api.HealthCheck{
},
}
func TestGatherHealtCheck(t *testing.T) {
func TestGatherHealthCheck(t *testing.T) {
expectedFields := map[string]interface{}{
"check_name": "foo.health",
"status": "passing",

View File

@@ -22,7 +22,7 @@
### couchbase_node
Tags:
- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`
- cluster: sanitized string from `servers` configuration field e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` -> `http://couchbase-0.example.com:8091/endpoint`
- hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091`
Fields:
@@ -48,7 +48,7 @@ Fields:
## Example output
```
$ telegraf -config telegraf.conf -input-filter couchbase -test
$ telegraf --config telegraf.conf --input-filter couchbase --test
* Plugin: couchbase, Collection 1
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112

View File

@@ -1,10 +1,12 @@
package couchbase
import (
"regexp"
"sync"
couchbase "github.com/couchbase/go-couchbase"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"sync"
)
type Couchbase struct {
@@ -19,11 +21,13 @@ var sampleConfig = `
## http://admin:secret@couchbase-0.example.com:8091/
##
## If no servers are specified, then localhost is used as the host.
## If no protocol is specifed, HTTP is used.
## If no protocol is specified, HTTP is used.
## If no port is specified, 8091 is used.
servers = ["http://localhost:8091"]
`
var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`)
func (r *Couchbase) SampleConfig() string {
return sampleConfig
}
@@ -42,19 +46,17 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
var outerr error
for _, serv := range r.Servers {
wg.Add(1)
go func(serv string) {
defer wg.Done()
outerr = r.gatherServer(serv, acc, nil)
acc.AddError(r.gatherServer(serv, acc, nil))
}(serv)
}
wg.Wait()
return outerr
return nil
}
func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error {
@@ -73,15 +75,17 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co
}
pool = &p
}
for i := 0; i < len(pool.Nodes); i++ {
node := pool.Nodes[i]
tags := map[string]string{"cluster": addr, "hostname": node.Hostname}
tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname}
fields := make(map[string]interface{})
fields["memory_free"] = node.MemoryFree
fields["memory_total"] = node.MemoryTotal
acc.AddFields("couchbase_node", fields, tags)
}
for bucketName, _ := range pool.BucketMap {
for bucketName := range pool.BucketMap {
tags := map[string]string{"cluster": addr, "bucket": bucketName}
bs := pool.BucketMap[bucketName].BasicStats
fields := make(map[string]interface{})

File diff suppressed because one or more lines are too long

View File

@@ -63,7 +63,7 @@ httpd statistics:
### Example output:
```
➜ telegraf git:(master) ✗ ./telegraf -config ./config.conf -input-filter couchdb -test
➜ telegraf git:(master) ✗ ./telegraf --config ./config.conf --input-filter couchdb --test
* Plugin: couchdb,
Collection 1
> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0,

View File

@@ -2,13 +2,11 @@ package couchdb
import (
"encoding/json"
"errors"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"net/http"
"reflect"
"strings"
"sync"
"time"
)
@@ -83,34 +81,20 @@ func (*CouchDB) SampleConfig() string {
}
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
errorChannel := make(chan error, len(c.HOSTs))
var wg sync.WaitGroup
for _, u := range c.HOSTs {
wg.Add(1)
go func(host string) {
defer wg.Done()
if err := c.fetchAndInsertData(accumulator, host); err != nil {
errorChannel <- fmt.Errorf("[host=%s]: %s", host, err)
accumulator.AddError(fmt.Errorf("[host=%s]: %s", host, err))
}
}(u)
}
wg.Wait()
close(errorChannel)
// If there weren't any errors, we can return nil now.
if len(errorChannel) == 0 {
return nil
}
// There were errors, so join them all together as one big error.
errorStrings := make([]string, 0, len(errorChannel))
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
return errors.New(strings.Join(errorStrings, "\n"))
return nil
}
var tr = &http.Transport{

View File

@@ -316,5 +316,5 @@ func TestBasic(t *testing.T) {
}
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
require.NoError(t, acc.GatherError(plugin.Gather))
}

View File

@@ -0,0 +1,209 @@
# DC/OS Input Plugin
This input plugin gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/).
**Series Cardinality Warning**
Depending on the work load of your DC/OS cluster, this plugin can quickly
create a high number of series which, when unchecked, can cause high load on
your database.
- Use [measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) liberally to exclude unneeded metrics as well as the node, container, and app inclue/exclude options.
- Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/v1.3/concepts/glossary/#retention-policy-rp).
- Limit the number of series allowed in your database using the `max-series-per-database` and `max-values-per-tag` settings.
- Consider enabling the [TSI](https://docs.influxdata.com/influxdb/v1.3/about_the_project/releasenotes-changelog/#release-notes-8) engine.
- Monitor your [series cardinality](https://docs.influxdata.com/influxdb/v1.3/troubleshooting/frequently-asked-questions/#how-can-i-query-for-series-cardinality).
### Configuration:
```toml
[[inputs.dcos]]
## The DC/OS cluster URL.
cluster_url = "https://dcos-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]
```
#### Enterprise Authentication
When using Enterprise DC/OS, it is recommended to use a service account to
authenticate with the cluster.
The plugin requires the following permissions:
```
dcos:adminrouter:ops:system-metrics full
dcos:adminrouter:ops:mesos full
```
Follow the directions to [create a service account and assign permissions](https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/).
Quick configuration using the Enterprise CLI:
```
dcos security org service-accounts keypair telegraf-sa-key.pem telegraf-sa-cert.pem
dcos security org service-accounts create -p telegraf-sa-cert.pem -d "Telegraf DC/OS input plugin" telegraf
dcos security org users grant telegraf dcos:adminrouter:ops:system-metrics full
dcos security org users grant telegraf dcos:adminrouter:ops:mesos full
```
#### Open Source Authentication
The Open Source DC/OS does not provide service accounts. Instead you can use
of the following options:
1. [Disable authentication](https://dcos.io/docs/1.10/security/managing-authentication/#authentication-opt-out)
2. Use the `token_file` parameter to read a authentication token from a file.
Then `token_file` can be set by using the [dcos cli] to login periodically.
The cli can login for at most XXX days, you will need to ensure the cli
performs a new login before this time expires.
```
dcos auth login --username foo --password bar
dcos config show core.dcos_acs_token > ~/.dcos/token
```
Another option to create a `token_file` is to generate a token using the
cluster secret. This will allow you to set the expiration date manually or
even create a never expiring token. However, if the cluster secret or the
token is compromised it cannot be revoked and may require a full reinstall of
the cluster. For more information on this technique reference
[this blog post](https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add).
### Metrics:
Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/)
for details on interprete field interpretation.
- dcos_node
- tags:
- cluster
- hostname
- path (filesystem fields only)
- interface (network fields only)
- fields:
- system_uptime (float)
- cpu_cores (float)
- cpu_total (float)
- cpu_user (float)
- cpu_system (float)
- cpu_idle (float)
- cpu_wait (float)
- load_1min (float)
- load_5min (float)
- load_15min (float)
- filesystem_capacity_total_bytes (int)
- filesystem_capacity_used_bytes (int)
- filesystem_capacity_free_bytes (int)
- filesystem_inode_total (float)
- filesystem_inode_used (float)
- filesystem_inode_free (float)
- memory_total_bytes (int)
- memory_free_bytes (int)
- memory_buffers_bytes (int)
- memory_cached_bytes (int)
- swap_total_bytes (int)
- swap_free_bytes (int)
- swap_used_bytes (int)
- network_in_bytes (int)
- network_out_bytes (int)
- network_in_packets (float)
- network_out_packets (float)
- network_in_dropped (float)
- network_out_dropped (float)
- network_in_errors (float)
- network_out_errors (float)
- process_count (float)
- dcos_container
- tags:
- cluster
- hostname
- container_id
- task_name
- fields:
- cpus_limit (float)
- cpus_system_time (float)
- cpus_throttled_time (float)
- cpus_user_time (float)
- disk_limit_bytes (int)
- disk_used_bytes (int)
- mem_limit_bytes (int)
- mem_total_bytes (int)
- net_rx_bytes (int)
- net_rx_dropped (float)
- net_rx_errors (float)
- net_rx_packets (float)
- net_tx_bytes (int)
- net_tx_dropped (float)
- net_tx_errors (float)
- net_tx_packets (float)
- dcos_app
- tags:
- cluster
- hostname
- container_id
- task_name
- fields:
- fields are application specific
### Example Output:
```
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/boot filesystem_capacity_free_bytes=918188032i,filesystem_capacity_total_bytes=1063256064i,filesystem_capacity_used_bytes=145068032i,filesystem_inode_free=523958,filesystem_inode_total=524288,filesystem_inode_used=330 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=dummy0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=docker0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18 cpu_cores=2,cpu_idle=81.62,cpu_system=4.19,cpu_total=13.670000000000002,cpu_user=9.48,cpu_wait=0,load_15min=0.7,load_1min=0.22,load_5min=0.6,memory_buffers_bytes=970752i,memory_cached_bytes=1830473728i,memory_free_bytes=1178636288i,memory_total_bytes=3975073792i,process_count=198,swap_free_bytes=859828224i,swap_total_bytes=859828224i,swap_used_bytes=0i,system_uptime=18874 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=lo network_in_bytes=1090992450i,network_in_dropped=0,network_in_errors=0,network_in_packets=1546938,network_out_bytes=1090992450i,network_out_dropped=0,network_out_errors=0,network_out_packets=1546938 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/ filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=minuteman network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=eth0 network_in_bytes=539886216i,network_in_dropped=1,network_in_errors=0,network_in_packets=979808,network_out_bytes=112395836i,network_out_dropped=0,network_out_errors=0,network_out_packets=891239 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=spartan network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=210i,network_out_dropped=0,network_out_errors=0,network_out_packets=3 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/overlay filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=vtep1024 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/var/lib/docker/plugins filesystem_capacity_free_bytes=1668378624i,filesystem_capacity_total_bytes=6641680384i,filesystem_capacity_used_bytes=4973301760i,filesystem_inode_free=3107856,filesystem_inode_total=3248128,filesystem_inode_used=140272 1511859222000000000
dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=d-dcos network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=9a78d34a-3bbf-467e-81cf-a57737f154ee,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 cpus_limit=0.3,cpus_system_time=307.31,cpus_throttled_time=102.029930607,cpus_user_time=268.57,disk_limit_bytes=268435456i,disk_used_bytes=30953472i,mem_limit_bytes=570425344i,mem_total_bytes=13316096i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=cbf19b77-3b8d-4bcf-b81f-824b67279629,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18,task_name=hello-world cpus_limit=0.6,cpus_system_time=25.6,cpus_throttled_time=327.977109217,cpus_user_time=566.54,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=1107296256i,mem_total_bytes=335941632i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=5725e219-f66e-40a8-b3ab-519d85f4c4dc,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=c76e1488-4fb7-4010-a4cf-25725f8173f9,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=cbe0b2f9-061f-44ac-8f15-4844229e8231,hostname=192.168.122.18,task_name=telegraf cpus_limit=0.2,cpus_system_time=8.109999999,cpus_throttled_time=93.183916045,cpus_user_time=17.97,disk_limit_bytes=0i,disk_used_bytes=0i,mem_limit_bytes=167772160i,mem_total_bytes=0i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_container,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 cpus_limit=0.2,cpus_system_time=2.69,cpus_throttled_time=20.064861214,cpus_user_time=6.56,disk_limit_bytes=268435456i,disk_used_bytes=29360128i,mem_limit_bytes=297795584i,mem_total_bytes=13733888i,net_rx_bytes=0i,net_rx_dropped=0,net_rx_errors=0,net_rx_packets=0,net_tx_bytes=0i,net_tx_dropped=0,net_tx_errors=0,net_tx_packets=0 1511859222000000000
dcos_app,cluster=enterprise,container_id=b64115de-3d2a-431d-a805-76e7c46453f1,hostname=192.168.122.18 container_received_bytes_per_sec=0,container_throttled_bytes_per_sec=0 1511859222000000000
```

View File

@@ -0,0 +1,332 @@
package dcos
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
jwt "github.com/dgrijalva/jwt-go"
)
const (
// How long to stayed logged in for
loginDuration = 65 * time.Minute
)
// Client is an interface for communicating with the DC/OS API.
type Client interface {
SetToken(token string)
Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
GetSummary(ctx context.Context) (*Summary, error)
GetContainers(ctx context.Context, node string) ([]Container, error)
GetNodeMetrics(ctx context.Context, node string) (*Metrics, error)
GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error)
GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error)
}
type APIError struct {
StatusCode int
Title string
Description string
}
// Login is request data for logging in.
type Login struct {
UID string `json:"uid"`
Exp int64 `json:"exp"`
Token string `json:"token"`
}
// LoginError is the response when login fails.
type LoginError struct {
Title string `json:"title"`
Description string `json:"description"`
}
// LoginAuth is the response to a successful login.
type LoginAuth struct {
Token string `json:"token"`
}
// Slave is a node in the cluster.
type Slave struct {
ID string `json:"id"`
}
// Summary provides high level cluster wide information.
type Summary struct {
Cluster string
Slaves []Slave
}
// Container is a container on a node.
type Container struct {
ID string
}
type DataPoint struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Unit string `json:"unit"`
Value float64 `json:"value"`
}
// Metrics are the DCOS metrics
type Metrics struct {
Datapoints []DataPoint `json:"datapoints"`
Dimensions map[string]interface{} `json:"dimensions"`
}
// AuthToken is the authentication token.
type AuthToken struct {
Text string
Expire time.Time
}
// ClusterClient is a Client that uses the cluster URL.
type ClusterClient struct {
clusterURL *url.URL
httpClient *http.Client
credentials *Credentials
token string
semaphore chan struct{}
}
type claims struct {
UID string `json:"uid"`
jwt.StandardClaims
}
func (e APIError) Error() string {
if e.Description != "" {
return fmt.Sprintf("%s: %s", e.Title, e.Description)
}
return e.Title
}
func NewClusterClient(
clusterURL *url.URL,
timeout time.Duration,
maxConns int,
tlsConfig *tls.Config,
) *ClusterClient {
httpClient := &http.Client{
Transport: &http.Transport{
MaxIdleConns: maxConns,
TLSClientConfig: tlsConfig,
},
Timeout: timeout,
}
semaphore := make(chan struct{}, maxConns)
c := &ClusterClient{
clusterURL: clusterURL,
httpClient: httpClient,
semaphore: semaphore,
}
return c
}
func (c *ClusterClient) SetToken(token string) {
c.token = token
}
func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
token, err := c.createLoginToken(sa)
if err != nil {
return nil, err
}
exp := time.Now().Add(loginDuration)
body := &Login{
UID: sa.AccountID,
Exp: exp.Unix(),
Token: token,
}
octets, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", c.url("/acs/api/v1/auth/login"), bytes.NewBuffer(octets))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
req = req.WithContext(ctx)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
auth := &LoginAuth{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(auth)
if err != nil {
return nil, err
}
token := &AuthToken{
Text: auth.Token,
Expire: exp,
}
return token, nil
}
loginError := &LoginError{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(loginError)
if err != nil {
err := &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
}
return nil, err
}
err = &APIError{
StatusCode: resp.StatusCode,
Title: loginError.Title,
Description: loginError.Description,
}
return nil, err
}
func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) {
summary := &Summary{}
err := c.doGet(ctx, c.url("/mesos/master/state-summary"), summary)
if err != nil {
return nil, err
}
return summary, nil
}
func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
list := []string{}
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node)
err := c.doGet(ctx, c.url(path), &list)
if err != nil {
return nil, err
}
containers := make([]Container, 0, len(list))
for _, c := range list {
containers = append(containers, Container{ID: c})
}
return containers, nil
}
func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, error) {
metrics := &Metrics{}
err := c.doGet(ctx, url, metrics)
if err != nil {
return nil, err
}
return metrics, nil
}
func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node)
return c.getMetrics(ctx, c.url(path))
}
func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container)
return c.getMetrics(ctx, c.url(path))
}
func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container)
return c.getMetrics(ctx, c.url(path))
}
func createGetRequest(url string, token string) (*http.Request, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
if token != "" {
req.Header.Add("Authorization", "token="+token)
}
req.Header.Add("Accept", "application/json")
return req, nil
}
func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) error {
req, err := createGetRequest(url, c.token)
if err != nil {
return err
}
select {
case c.semaphore <- struct{}{}:
break
case <-ctx.Done():
return ctx.Err()
}
resp, err := c.httpClient.Do(req.WithContext(ctx))
if err != nil {
<-c.semaphore
return err
}
defer func() {
resp.Body.Close()
<-c.semaphore
}()
// Clear invalid token if unauthorized
if resp.StatusCode == http.StatusUnauthorized {
c.token = ""
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return &APIError{
StatusCode: resp.StatusCode,
Title: resp.Status,
}
}
if resp.StatusCode == http.StatusNoContent {
return nil
}
err = json.NewDecoder(resp.Body).Decode(v)
return err
}
func (c *ClusterClient) url(path string) string {
url := c.clusterURL
url.Path = path
return url.String()
}
func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{
UID: sa.AccountID,
StandardClaims: jwt.StandardClaims{
// How long we have to login with this token
ExpiresAt: time.Now().Add(5 * time.Minute).Unix(),
},
})
return token.SignedString(sa.PrivateKey)
}

View File

@@ -0,0 +1,232 @@
package dcos
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
jwt "github.com/dgrijalva/jwt-go"
"github.com/stretchr/testify/require"
)
const (
privateKey = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCwlGyzVp9cqtwiNCgCnaR0kilPZhr4xFBcnXxvQ8/uzOHaWKxj
XWR38cKR3gPh5+4iSmzMdo3HDJM5ks6imXGnp+LPOA5iNewnpLNs7UxA2arwKH/6
4qIaAXAtf5jE46wZIMgc2EW9wGL3dxC0JY8EXPpBFB/3J8gADkorFR8lwwIDAQAB
AoGBAJaFHxfMmjHK77U0UnrQWFSKFy64cftmlL4t/Nl3q7L68PdIKULWZIMeEWZ4
I0UZiFOwr4em83oejQ1ByGSwekEuiWaKUI85IaHfcbt+ogp9hY/XbOEo56OPQUAd
bEZv1JqJOqta9Ug1/E1P9LjEEyZ5F5ubx7813rxAE31qKtKJAkEA1zaMlCWIr+Rj
hGvzv5rlHH3wbOB4kQFXO4nqj3J/ttzR5QiJW24STMDcbNngFlVcDVju56LrNTiD
dPh9qvl7nwJBANILguR4u33OMksEZTYB7nQZSurqXsq6382zH7pTl29ANQTROHaM
PKC8dnDWq8RGTqKuvWblIzzGIKqIMovZo10CQC96T0UXirITFolOL3XjvAuvFO1Q
EAkdXJs77805m0dCK+P1IChVfiAEpBw3bKJArpAbQIlFfdI953JUp5SieU0CQEub
BSSEKMjh/cxu6peEHnb/262vayuCFKkQPu1sxWewLuVrAe36EKCy9dcsDmv5+rgo
Odjdxc9Madm4aKlaT6kCQQCpAgeblDrrxTrNQ+Typzo37PlnQrvI+0EceAUuJ72G
P0a+YZUeHNRqT2pPN9lMTAZGGi3CtcF2XScbLNEBeXge
-----END RSA PRIVATE KEY-----`
)
func TestLogin(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedError error
expectedToken string
}{
{
name: "Login successful",
responseCode: 200,
responseBody: `{"token": "XXX.YYY.ZZZ"}`,
expectedError: nil,
expectedToken: "XXX.YYY.ZZZ",
},
{
name: "Unauthorized Error",
responseCode: http.StatusUnauthorized,
responseBody: `{"title": "x", "description": "y"}`,
expectedError: &APIError{http.StatusUnauthorized, "x", "y"},
expectedToken: "",
},
}
key, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(privateKey))
require.NoError(t, err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
sa := &ServiceAccount{
AccountID: "telegraf",
PrivateKey: key,
}
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
auth, err := client.Login(ctx, sa)
require.Equal(t, tt.expectedError, err)
if tt.expectedToken != "" {
require.Equal(t, tt.expectedToken, auth.Text)
} else {
require.Nil(t, auth)
}
ts.Close()
})
}
}
func TestGetSummary(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Summary
expectedError error
}{
{
name: "No nodes",
responseCode: 200,
responseBody: `{"cluster": "a", "slaves": []}`,
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
expectedError: nil,
},
{
name: "Unauthorized Error",
responseCode: http.StatusUnauthorized,
responseBody: `<html></html>`,
expectedValue: nil,
expectedError: &APIError{StatusCode: http.StatusUnauthorized, Title: "401 Unauthorized"},
},
{
name: "Has nodes",
responseCode: 200,
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
expectedValue: &Summary{
Cluster: "a",
Slaves: []Slave{
Slave{ID: "a"},
Slave{ID: "b"},
},
},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
summary, err := client.GetSummary(ctx)
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, summary)
ts.Close()
})
}
}
func TestGetNodeMetrics(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Metrics
expectedError error
}{
{
name: "Empty Body",
responseCode: 200,
responseBody: `{}`,
expectedValue: &Metrics{},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
m, err := client.GetNodeMetrics(ctx, "foo")
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, m)
ts.Close()
})
}
}
func TestGetContainerMetrics(t *testing.T) {
var tests = []struct {
name string
responseCode int
responseBody string
expectedValue *Metrics
expectedError error
}{
{
name: "204 No Contents",
responseCode: 204,
responseBody: ``,
expectedValue: &Metrics{},
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the path
w.WriteHeader(tt.responseCode)
fmt.Fprintln(w, tt.responseBody)
})
ts := httptest.NewServer(handler)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
ctx := context.Background()
client := NewClusterClient(u, defaultResponseTimeout, 1, nil)
m, err := client.GetContainerMetrics(ctx, "foo", "bar")
require.Equal(t, tt.expectedError, err)
require.Equal(t, tt.expectedValue, m)
ts.Close()
})
}
}

View File

@@ -0,0 +1,72 @@
package dcos
import (
"context"
"crypto/rsa"
"fmt"
"io/ioutil"
"strings"
"time"
"unicode/utf8"
)
const (
// How long before expiration to renew token
relogDuration = 5 * time.Minute
)
type Credentials interface {
Token(ctx context.Context, client Client) (string, error)
IsExpired() bool
}
type ServiceAccount struct {
AccountID string
PrivateKey *rsa.PrivateKey
auth *AuthToken
}
type TokenCreds struct {
Path string
}
type NullCreds struct {
}
func (c *ServiceAccount) Token(ctx context.Context, client Client) (string, error) {
auth, err := client.Login(ctx, c)
if err != nil {
return "", err
}
c.auth = auth
return auth.Text, nil
}
func (c *ServiceAccount) IsExpired() bool {
return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now())
}
func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) {
octets, err := ioutil.ReadFile(c.Path)
if err != nil {
return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err)
}
if !utf8.Valid(octets) {
return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path)
}
token := strings.TrimSpace(string(octets))
return token, nil
}
func (c *TokenCreds) IsExpired() bool {
return true
}
func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) {
return "", nil
}
func (c *NullCreds) IsExpired() bool {
return true
}

435
plugins/inputs/dcos/dcos.go Normal file
View File

@@ -0,0 +1,435 @@
package dcos
import (
"context"
"io/ioutil"
"net/url"
"sort"
"strings"
"sync"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
const (
defaultMaxConnections = 10
defaultResponseTimeout = 20 * time.Second
)
var (
nodeDimensions = []string{
"hostname",
"path",
"interface",
}
containerDimensions = []string{
"hostname",
"container_id",
"task_name",
}
appDimensions = []string{
"hostname",
"container_id",
"task_name",
}
)
type DCOS struct {
ClusterURL string `toml:"cluster_url"`
ServiceAccountID string `toml:"service_account_id"`
ServiceAccountPrivateKey string
TokenFile string
NodeInclude []string
NodeExclude []string
ContainerInclude []string
ContainerExclude []string
AppInclude []string
AppExclude []string
MaxConnections int
ResponseTimeout internal.Duration
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
client Client
creds Credentials
initialized bool
nodeFilter filter.Filter
containerFilter filter.Filter
appFilter filter.Filter
taskNameFilter filter.Filter
}
func (d *DCOS) Description() string {
return "Input plugin for DC/OS metrics"
}
var sampleConfig = `
## The DC/OS cluster URL.
cluster_url = "https://dcos-ee-master-1"
## The ID of the service account.
service_account_id = "telegraf"
## The private key file for the service account.
service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
## Path containing login token. If set, will read on every gather.
# token_file = "/home/dcos/.dcos/token"
## In all filter options if both include and exclude are empty all items
## will be collected. Arrays may contain glob patterns.
##
## Node IDs to collect metrics from. If a node is excluded, no metrics will
## be collected for its containers or apps.
# node_include = []
# node_exclude = []
## Container IDs to collect container metrics from.
# container_include = []
# container_exclude = []
## Container IDs to collect app metrics from.
# app_include = []
# app_exclude = []
## Maximum concurrent connections to the cluster.
# max_connections = 10
## Maximum time to receive a response from cluster.
# response_timeout = "20s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
## Recommended filtering to reduce series cardinality.
# [inputs.dcos.tagdrop]
# path = ["/var/lib/mesos/slave/slaves/*"]
`
func (d *DCOS) SampleConfig() string {
return sampleConfig
}
func (d *DCOS) Gather(acc telegraf.Accumulator) error {
err := d.init()
if err != nil {
return err
}
ctx := context.Background()
token, err := d.creds.Token(ctx, d.client)
if err != nil {
return err
}
d.client.SetToken(token)
summary, err := d.client.GetSummary(ctx)
if err != nil {
return err
}
var wg sync.WaitGroup
for _, node := range summary.Slaves {
wg.Add(1)
go func(node string) {
defer wg.Done()
d.GatherNode(ctx, acc, summary.Cluster, node)
}(node.ID)
}
wg.Wait()
return nil
}
func (d *DCOS) GatherNode(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
if !d.nodeFilter.Match(node) {
return
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
m, err := d.client.GetNodeMetrics(ctx, node)
if err != nil {
acc.AddError(err)
return
}
d.addNodeMetrics(acc, cluster, m)
}()
d.GatherContainers(ctx, acc, cluster, node)
wg.Wait()
}
func (d *DCOS) GatherContainers(ctx context.Context, acc telegraf.Accumulator, cluster, node string) {
containers, err := d.client.GetContainers(ctx, node)
if err != nil {
acc.AddError(err)
return
}
var wg sync.WaitGroup
for _, container := range containers {
if d.containerFilter.Match(container.ID) {
wg.Add(1)
go func(container string) {
defer wg.Done()
m, err := d.client.GetContainerMetrics(ctx, node, container)
if err != nil {
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
return
}
acc.AddError(err)
return
}
d.addContainerMetrics(acc, cluster, m)
}(container.ID)
}
if d.appFilter.Match(container.ID) {
wg.Add(1)
go func(container string) {
defer wg.Done()
m, err := d.client.GetAppMetrics(ctx, node, container)
if err != nil {
if err, ok := err.(APIError); ok && err.StatusCode == 404 {
return
}
acc.AddError(err)
return
}
d.addAppMetrics(acc, cluster, m)
}(container.ID)
}
}
wg.Wait()
}
type point struct {
tags map[string]string
labels map[string]string
fields map[string]interface{}
}
func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point {
points := make(map[string]*point)
for _, dp := range m.Datapoints {
fieldKey := strings.Replace(dp.Name, ".", "_", -1)
tags := dp.Tags
if tags == nil {
tags = make(map[string]string)
}
if dp.Unit == "bytes" && !strings.HasSuffix(fieldKey, "_bytes") {
fieldKey = fieldKey + "_bytes"
}
if strings.HasPrefix(fieldKey, "dcos_metrics_module_") {
fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_")
}
tagset := make([]string, 0, len(tags))
for k, v := range tags {
tagset = append(tagset, k+"="+v)
}
sort.Strings(tagset)
seriesParts := make([]string, 0, len(tagset))
seriesParts = append(seriesParts, tagset...)
seriesKey := strings.Join(seriesParts, ",")
p, ok := points[seriesKey]
if !ok {
p = &point{}
p.tags = tags
p.labels = make(map[string]string)
p.fields = make(map[string]interface{})
points[seriesKey] = p
}
if dp.Unit == "bytes" {
p.fields[fieldKey] = int64(dp.Value)
} else {
p.fields[fieldKey] = dp.Value
}
}
results := make([]*point, 0, len(points))
for _, p := range points {
for k, v := range m.Dimensions {
switch v := v.(type) {
case string:
p.tags[k] = v
case map[string]string:
if k == "labels" {
for k, v := range v {
p.labels[k] = v
}
}
}
}
results = append(results, p)
}
return results
}
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) {
tm := time.Now()
points := d.createPoints(acc, m)
for _, p := range points {
tags := make(map[string]string)
tags["cluster"] = cluster
for _, tagkey := range tagDimensions {
v, ok := p.tags[tagkey]
if ok {
tags[tagkey] = v
}
}
for k, v := range p.labels {
tags[k] = v
}
acc.AddFields(mname, p.fields, tags, tm)
}
}
func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions)
}
func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions)
}
func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
d.addMetrics(acc, cluster, "dcos_app", m, appDimensions)
}
func (d *DCOS) init() error {
if !d.initialized {
err := d.createFilters()
if err != nil {
return err
}
if d.client == nil {
client, err := d.createClient()
if err != nil {
return err
}
d.client = client
}
if d.creds == nil {
creds, err := d.createCredentials()
if err != nil {
return err
}
d.creds = creds
}
d.initialized = true
}
return nil
}
func (d *DCOS) createClient() (Client, error) {
tlsCfg, err := internal.GetTLSConfig(
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
if err != nil {
return nil, err
}
url, err := url.Parse(d.ClusterURL)
if err != nil {
return nil, err
}
client := NewClusterClient(
url,
d.ResponseTimeout.Duration,
d.MaxConnections,
tlsCfg,
)
return client, nil
}
func (d *DCOS) createCredentials() (Credentials, error) {
if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" {
bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey)
if err != nil {
return nil, err
}
privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(bs)
if err != nil {
return nil, err
}
creds := &ServiceAccount{
AccountID: d.ServiceAccountID,
PrivateKey: privateKey,
}
return creds, nil
} else if d.TokenFile != "" {
creds := &TokenCreds{
Path: d.TokenFile,
}
return creds, nil
} else {
creds := &NullCreds{}
return creds, nil
}
}
func (d *DCOS) createFilters() error {
var err error
d.nodeFilter, err = filter.NewIncludeExcludeFilter(
d.NodeInclude, d.NodeExclude)
if err != nil {
return err
}
d.containerFilter, err = filter.NewIncludeExcludeFilter(
d.ContainerInclude, d.ContainerExclude)
if err != nil {
return err
}
d.appFilter, err = filter.NewIncludeExcludeFilter(
d.AppInclude, d.AppExclude)
if err != nil {
return err
}
return nil
}
func init() {
inputs.Add("dcos", func() telegraf.Input {
return &DCOS{
MaxConnections: defaultMaxConnections,
ResponseTimeout: internal.Duration{
Duration: defaultResponseTimeout,
},
}
})
}

View File

@@ -0,0 +1,441 @@
package dcos
import (
"context"
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
type mockClient struct {
SetTokenF func(token string)
LoginF func(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
GetSummaryF func(ctx context.Context) (*Summary, error)
GetContainersF func(ctx context.Context, node string) ([]Container, error)
GetNodeMetricsF func(ctx context.Context, node string) (*Metrics, error)
GetContainerMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
GetAppMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
}
func (c *mockClient) SetToken(token string) {
c.SetTokenF(token)
}
func (c *mockClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
return c.LoginF(ctx, sa)
}
func (c *mockClient) GetSummary(ctx context.Context) (*Summary, error) {
return c.GetSummaryF(ctx)
}
func (c *mockClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
return c.GetContainersF(ctx, node)
}
func (c *mockClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
return c.GetNodeMetricsF(ctx, node)
}
func (c *mockClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
return c.GetContainerMetricsF(ctx, node, container)
}
func (c *mockClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
return c.GetAppMetricsF(ctx, node, container)
}
func TestAddNodeMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "basic datapoint conversion",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "process.count",
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
},
"process_count", 42.0,
)}
},
},
{
name: "path added as tag",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "filesystem.inode.free",
Tags: map[string]string{
"path": "/var/lib",
},
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"path": "/var/lib",
},
"filesystem_inode_free", 42.0,
)}
},
},
{
name: "interface added as tag",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "network.out.dropped",
Tags: map[string]string{
"interface": "eth0",
},
Unit: "count",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"interface": "eth0",
},
"network_out_dropped", 42.0,
)}
},
},
{
name: "bytes unit appended to fieldkey",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "network.in",
Tags: map[string]string{
"interface": "eth0",
},
Unit: "bytes",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"interface": "eth0",
},
"network_in_bytes", int64(42),
)}
},
},
{
name: "dimensions added as tags",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "process.count",
Tags: map[string]string{},
Unit: "count",
Value: 42.0,
},
{
Name: "memory.total",
Tags: map[string]string{},
Unit: "bytes",
Value: 42,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"hostname": "192.168.122.18",
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "192.168.122.18",
},
"process_count", 42.0),
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "192.168.122.18",
},
"memory_total_bytes", int64(42)),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addNodeMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestAddContainerMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "container",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "net.rx.errors",
Tags: map[string]string{
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"executor_name": "Command Executor (Task: telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a) (Command: NO EXECUTABLE)",
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
"source": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
},
Unit: "count",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"executor_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"framework_id": "ab2f3a8b-06db-4e8c-95b6-fb1940874a30-0001",
"framework_name": "marathon",
"framework_principal": "dcos_marathon",
"framework_role": "slave_public",
"hostname": "192.168.122.18",
"labels": map[string]string{
"DCOS_SPACE": "/telegraf",
},
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
"task_id": "telegraf.192fb45f-cc0c-11e7-af48-ea183c0b541a",
"task_name": "telegraf",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_container",
map[string]string{
"cluster": "a",
"container_id": "f25c457b-fceb-44f0-8f5b-38be34cbb6fb",
"hostname": "192.168.122.18",
"task_name": "telegraf",
"DCOS_SPACE": "/telegraf",
},
"net_rx_errors",
42.0,
),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addContainerMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestAddAppMetrics(t *testing.T) {
var tests = []struct {
name string
metrics *Metrics
check func(*testutil.Accumulator) []bool
}{
{
name: "tags are optional",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
Unit: "",
Value: 42.0,
},
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_app",
map[string]string{
"cluster": "a",
},
"container_throttled_bytes_per_sec", 42.0,
),
}
},
},
{
name: "dimensions are tagged",
metrics: &Metrics{
Datapoints: []DataPoint{
{
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
Unit: "",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"cluster_id": "c0760bbd-9e9d-434b-bd4a-39c7cdef8a63",
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
"hostname": "192.168.122.18",
"mesos_id": "2dfbbd28-29d2-411d-92c4-e2f84c38688e-S1",
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_app",
map[string]string{
"cluster": "a",
"container_id": "02d31175-1c01-4459-8520-ef8b1339bc52",
"hostname": "192.168.122.18",
},
"container_throttled_bytes_per_sec", 42.0,
),
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{}
dcos.addAppMetrics(&acc, "a", tt.metrics)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}
func TestGatherFilterNode(t *testing.T) {
var tests = []struct {
name string
nodeInclude []string
nodeExclude []string
client Client
check func(*testutil.Accumulator) []bool
}{
{
name: "cluster without nodes has no metrics",
client: &mockClient{
SetTokenF: func(token string) {},
GetSummaryF: func(ctx context.Context) (*Summary, error) {
return &Summary{
Cluster: "a",
Slaves: []Slave{},
}, nil
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.NMetrics() == 0,
}
},
},
{
name: "node include",
nodeInclude: []string{"x"},
client: &mockClient{
SetTokenF: func(token string) {},
GetSummaryF: func(ctx context.Context) (*Summary, error) {
return &Summary{
Cluster: "a",
Slaves: []Slave{
Slave{ID: "x"},
Slave{ID: "y"},
},
}, nil
},
GetContainersF: func(ctx context.Context, node string) ([]Container, error) {
return []Container{}, nil
},
GetNodeMetricsF: func(ctx context.Context, node string) (*Metrics, error) {
return &Metrics{
Datapoints: []DataPoint{
{
Name: "value",
Value: 42.0,
},
},
Dimensions: map[string]interface{}{
"hostname": "x",
},
}, nil
},
},
check: func(acc *testutil.Accumulator) []bool {
return []bool{
acc.HasPoint(
"dcos_node",
map[string]string{
"cluster": "a",
"hostname": "x",
},
"value", 42.0,
),
acc.NMetrics() == 1,
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var acc testutil.Accumulator
dcos := &DCOS{
NodeInclude: tt.nodeInclude,
NodeExclude: tt.nodeExclude,
client: tt.client,
}
err := dcos.Gather(&acc)
require.NoError(t, err)
for i, ok := range tt.check(&acc) {
require.True(t, ok, fmt.Sprintf("Index was not true: %d", i))
}
})
}
}

View File

@@ -75,12 +75,11 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
var outerr error
for _, serv := range g.Servers {
u, err := url.Parse(serv)
if err != nil {
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
continue
} else if u.Scheme == "" {
// fallback to simple string based address (i.e. "10.0.0.1:10000")
u.Scheme = "tcp"
@@ -90,13 +89,13 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
wg.Add(1)
go func(serv string) {
defer wg.Done()
outerr = g.gatherServer(u, acc)
acc.AddError(g.gatherServer(u, acc))
}(serv)
}
wg.Wait()
return outerr
return nil
}
const defaultPort = "7711"

Some files were not shown because too many files have changed in this diff Show More