Compare commits

...

1532 Commits

Author SHA1 Message Date
Daniel Nelson
1e51969813 Set 1.5.3 release date
(cherry picked from commit 2160779126)
2018-03-14 16:34:56 -07:00
Daniel Nelson
51b097a7c6 Use previous image on appveyor
(cherry picked from commit f1b681cbdc)
2018-03-14 16:31:18 -07:00
Daniel Nelson
77dfb8c9c5 Update changelog
(cherry picked from commit 6e5e2f713d)
2018-03-14 14:25:53 -07:00
Jonas Hahnfeld
1398f8e678 Add output of stderr in case of error to exec log message (#3862)
If the command failed with a non-zero exit status there might be an error
message on stderr. Append the first line to the error message to ease the
search for its cause.

(cherry picked from commit 8e515688eb)
2018-03-14 14:25:53 -07:00
Daniel Nelson
d96483bffb Use Go 1.9.4 in builds 2018-03-09 14:37:53 -08:00
Daniel Nelson
5e534676a0 Update changelog
(cherry picked from commit f7207f514e)
2018-03-08 10:55:02 -08:00
Dennis Schön
9329200afa Fix uptime metric in passenger input plugin (#3871)
(cherry picked from commit f1c8abd68c)
2018-03-08 10:55:02 -08:00
Daniel Nelson
645b8b905d Update changelog
(cherry picked from commit e4ce057885)
2018-03-07 14:17:36 -08:00
dilshatm
ea7d884c09 Fix collation difference in sqlserver input (#3786)
(cherry picked from commit a6d366fb84)
2018-03-07 14:17:33 -08:00
Daniel Nelson
7f94cb58e4 Update changelog
(cherry picked from commit 5928219454)
2018-02-25 01:07:16 -08:00
Daniel Nelson
d8f2d4af0f Disable keepalive in mqtt output. (#3779)
This functionality currently has race conditions that can result in the
output deadlocking.

(cherry picked from commit 8c932abff6)
2018-02-25 01:07:16 -08:00
Daniel Nelson
d8dae1b1ab Fix memory leak in postgresql_extensible 2018-02-20 11:58:43 -08:00
Daniel Nelson
770cf4e0b6 Update changelog
(cherry picked from commit a00d5b48f8)
2018-02-09 12:13:49 -08:00
efficks
8cb5391f4e Fix ping plugin not reporting zero durations (#3778)
(cherry picked from commit f5ea13a9ab)
2018-02-09 12:13:48 -08:00
Daniel Nelson
c5ddb65ad9 Update changelog
(cherry picked from commit 9a1d69a2ae)
2018-02-05 11:23:03 -08:00
Philipp Weber
d671299e96 Remove userinfo from url tag in prometheus input (#3743)
(cherry picked from commit b7a68eef56)
2018-02-05 11:23:02 -08:00
Daniel Nelson
f59231941f Update changelog
(cherry picked from commit 32732d42f8)
2018-01-30 18:09:24 -08:00
Daniel Nelson
100bdfba6c Set path to / if HOST_MOUNT_PREFIX matches full path (#3736)
(cherry picked from commit 10e51e4b49)
2018-01-30 18:08:50 -08:00
Daniel Nelson
67440c95bb Set release date for 1.5.2
(cherry picked from commit 3a85e7b1f0)
2018-01-30 14:02:50 -08:00
Daniel Nelson
39de63d03c Update changelog
(cherry picked from commit 5d87ad85a1)
2018-01-30 14:01:17 -08:00
Daniel Nelson
56edd339e7 Exclude master_replid fields from redis input (#3725)
(cherry picked from commit c28d0e1b16)
2018-01-30 14:01:13 -08:00
Daniel Nelson
df768f83af Update changelog
(cherry picked from commit f9c0aa1e23)
2018-01-25 13:47:39 -08:00
Pierre Tessier
8733d3826a Add timeout to wavefront output write (#3711)
(cherry picked from commit 3e4c91880a)
2018-01-25 13:47:39 -08:00
Daniel Nelson
2bb97154db Update changelog
(cherry picked from commit 899c3a2ae1)
2018-01-22 12:06:30 -08:00
Daniel Nelson
a8d9e458ab Remove graphite serializer replacement of dot with underscore in field key (#3705)
(cherry picked from commit 4558aeddeb)
2018-01-22 12:06:26 -08:00
Daniel Nelson
b464adb08c Update changelog
(cherry picked from commit 36c9113917)
2018-01-22 12:01:31 -08:00
Daniel Nelson
4bd67824ae Avoid loop creation in second processor pass (#3656)
(cherry picked from commit 5270aa451c)
2018-01-22 12:01:24 -08:00
Daniel Nelson
f5894a6a2f Limit wait time for writes in mqtt output (#3699)
(cherry picked from commit 91fc2765b1)
2018-01-22 12:01:20 -08:00
Daniel Nelson
1790b26651 Update changelog
(cherry picked from commit 5bac08662e)
2018-01-18 17:39:22 -08:00
Piotr Popieluch
bb3ee1fd39 Align aggregator period with internal ticker to avoid skipping metrics (#3693)
By the time the aggregator.run() was called about 600ms already passed since setting now which was skewing up the aggregation intervals and skipping metrics.

(cherry picked from commit 601dc99606)
2018-01-18 17:39:17 -08:00
Daniel Nelson
82df5bf2d8 Update changelog
(cherry picked from commit 0f55d9eba2)
2018-01-17 15:28:52 -08:00
Piotr Popieluch
8b566b2b9f Reconnect before sending graphite metrics if disconnected (#3680)
(cherry picked from commit f374a295d9)
2018-01-17 15:28:52 -08:00
Daniel Nelson
059a751a71 Update changelog
(cherry picked from commit ad921a3840)
2018-01-17 14:39:10 -08:00
Michael Boudreau
dcaa0ca8db Fix index out of bounds error in solr input plugin (#3683)
(cherry picked from commit 9d559292a5)
2018-01-17 14:39:05 -08:00
Daniel Nelson
8777e32d9f Update changelog
(cherry picked from commit 6e24056757)
2018-01-16 13:47:23 -08:00
Noah Crowley
667940afac Ignore empty lines in Graphite plaintext (#3684)
(cherry picked from commit 87830a1c38)
2018-01-16 13:46:58 -08:00
Daniel Nelson
0605af7c99 Pin crate docker image for testing
(cherry picked from commit 3cf0ba1ccf)
2018-01-10 14:11:36 -08:00
Daniel Nelson
4e89c17c0f Set 1.5.1 release date 2018-01-10 13:28:28 -08:00
Daniel Nelson
45b7db7de1 Add link to docs for configuring the openldap monitoring backend
(cherry picked from commit 37757b7782)
2018-01-04 15:35:45 -08:00
Daniel Nelson
cc478f035e Update changelog
(cherry picked from commit 315fd1e987)
2018-01-04 15:30:23 -08:00
Daniel Nelson
fe6239cf9f Escape environment variables during config toml parsing (#3637)
(cherry picked from commit b0c2bb870e)
2018-01-04 15:30:17 -08:00
Daniel Nelson
865917f523 Update changelog
(cherry picked from commit 07cb749e04)
2018-01-03 13:45:00 -08:00
kerams
4aa8d72644 Fix deliver_get field in rabbitmq input (#3633)
(cherry picked from commit acea7109d4)
2018-01-03 13:44:55 -08:00
Daniel Nelson
384ef6af6b Update changelog
(cherry picked from commit 81f42e8b17)
2018-01-02 16:36:18 -08:00
Daniel Nelson
07985e6524 Add information about how to set permissions for postfix input (#3594)
(cherry picked from commit a440ed8d8c)
2018-01-02 14:10:00 -08:00
Daniel Nelson
f8597f78f4 Update changelog
(cherry picked from commit 06c21fb9f7)
2017-12-28 16:24:26 -08:00
Daniel Nelson
83faea7a31 Set content-type charset in influxdb output and allow it be overridden (#3593)
(cherry picked from commit 4f7afb8cb5)
2017-12-28 16:24:21 -08:00
Daniel Nelson
223bbf0df7 Update changelog
(cherry picked from commit ef6e5c5a85)
2017-12-28 16:19:34 -08:00
Daniel Nelson
55f35f291d Fix DC/OS login expiration time (#3625)
(cherry picked from commit 005face7c0)
2017-12-28 16:19:25 -08:00
Daniel Nelson
6852231c1b Update changelog
(cherry picked from commit 1011cd0c94)
2017-12-28 16:16:30 -08:00
Daniel Nelson
ce4ca43a5d Fix name error in jolokia2_agent sample config (#3624)
(cherry picked from commit 6c075c4346)
2017-12-28 16:16:30 -08:00
timhallinflux
5d6622eb44 Update README.md
added `network` for a tiny bit more context
2017-12-14 14:58:37 -08:00
Daniel Nelson
a1668bbf9a Set release date for 1.5.0 2017-12-14 10:59:13 -08:00
Daniel Nelson
fe91c779e9 Remove AWS credential check from cloudwatch output (#3583)
This method is reported to not work with IAM Instance Profiles, and we
do not want to make any calls that would require additional permissions.

(cherry picked from commit 5b40173bcb)
2017-12-13 17:52:45 -08:00
Daniel Nelson
425b6f7d63 Update changelog
(cherry picked from commit 15266bb7eb)
2017-12-13 11:18:34 -08:00
Ildar Svetlov
c322ddb4b0 Don't add system input uptime_format as a counter (#3578)
(cherry picked from commit d935dfa9ed)
2017-12-13 11:17:55 -08:00
Daniel Nelson
648d3bde33 Update changelog
(cherry picked from commit ff634c5056)
2017-12-13 10:58:16 -08:00
Daniel Nelson
d8da77cb42 Add idle state to processes test
(cherry picked from commit 14b31a2354)
2017-12-13 10:57:28 -08:00
Ted Zlatanov
fdb04702eb Support I (idle) process state on procfs+Linux (#3530)
(cherry picked from commit 663a5b1f50)
2017-12-13 10:56:47 -08:00
Steve Banik
ecf43f4986 Fixed typo in README.md (#3574)
(cherry picked from commit d7d224d511)
2017-12-12 11:22:33 -08:00
Daniel Nelson
e307e92e86 Update changelog
(cherry picked from commit abcad439eb)
2017-12-11 18:02:35 -08:00
Daniel Nelson
8d4a09c3ea Fix separation of multiple prometheus_client outputs (#3570)
(cherry picked from commit 8484de6c12)
2017-12-11 18:02:30 -08:00
Daniel Nelson
fd964bd4eb Use auto type detection for scanned devices in smart input (#3561)
(cherry picked from commit 93d16a4603)
2017-12-08 18:03:39 -08:00
Daniel Nelson
994e75f1f0 Update changelog
(cherry picked from commit 88746b01c3)
2017-12-08 18:02:17 -08:00
Daniel Nelson
2e2efafbfc Update sarama-cluster to latest release (#3560)
(cherry picked from commit 37095ef47d)
2017-12-08 18:02:17 -08:00
Daniel Nelson
39537ed86e Use device name instead of abs path for devices tag in smart input (#3550)
(cherry picked from commit 574034c301)
2017-12-08 13:26:15 -08:00
Daniel Nelson
558ce25c94 Log connect error only in wavefront output (#3549)
(cherry picked from commit 177e7e2c73)
2017-12-06 14:56:28 -08:00
Daniel Nelson
0438f412a9 Fix formatting in changelog 2017-12-04 13:17:23 -08:00
Daniel Nelson
ca8911fec0 Update example config 2017-12-01 11:49:07 -08:00
Daniel Nelson
2c5a5373f6 Update changelog 2017-12-01 11:42:00 -08:00
Daniel Nelson
cabe10b88a Update changelog 2017-12-01 11:23:18 -08:00
Daniel Nelson
7f66863b87 Fix HOST_MOUNT_PREFIX in docker with disk input (#3529) 2017-12-01 11:21:39 -08:00
Daniel Nelson
e400ec2b57 Update changelog 2017-11-30 18:42:14 -08:00
Daniel Nelson
44320a5421 Add option to amqp output to publish persistent messages (#3528) 2017-11-30 18:40:12 -08:00
Daniel Nelson
a9951710b3 Add time import 2017-11-29 17:05:13 -08:00
Daniel Nelson
6426bca1f8 Update changelog 2017-11-29 16:36:00 -08:00
Nathan Ferch
f92a4f528f Add input plugin for OpenBSD/FreeBSD pf (#3405) 2017-11-29 16:32:50 -08:00
Daniel Nelson
3ba5458220 Update changelog 2017-11-29 12:17:46 -08:00
Bob Shannon
beb9d7560d Add support for glob patterns in net input plugin (#3140) 2017-11-29 12:16:34 -08:00
Daniel Nelson
24d82aebe6 Update changelog 2017-11-29 12:10:56 -08:00
Daniel Nelson
7dc256e845 Update gopsutil version to include netstat fix (#3513) 2017-11-29 12:06:47 -08:00
Daniel Nelson
297897ae0a Add dcos plugin to changelog and readme 2017-11-29 11:54:33 -08:00
Daniel Nelson
414a7e34fb Add input plugin for DC/OS (#3519) 2017-11-29 11:50:32 -08:00
Patrick Hemmer
bf65e19486 Fix postfix plugin age to use ctime, not mtime (#3525) 2017-11-29 11:25:31 -08:00
Daniel Nelson
2c70958c24 Update changelog 2017-11-29 10:52:59 -08:00
Daniel Nelson
d727a6f85c Add slab to mem plugin (#3518) 2017-11-29 10:49:45 -08:00
Daniel Nelson
4e9b19f7a6 Add bond input to readme and update changelog 2017-11-28 15:19:30 -08:00
Ildar Svetlov
132fb50150 Add bond input plugin (#3424) 2017-11-28 15:16:19 -08:00
Daniel Nelson
d1ba75176d Update changelog 2017-11-28 10:10:36 -08:00
Patrick Hemmer
76240b9f18 Add postfix input plugin (#2553) 2017-11-28 10:08:41 -08:00
Daniel Nelson
06e22ee7ac Update changelog 2017-11-27 17:06:50 -08:00
Lukasz Jagiello
a18eedb970 Use deb-systemd-invoke to restart service (#3506)
From man page:
```
deb-systemd-invoke is a Debian-specific helper script which asks
       /usr/sbin/policy-rc.d before performing a systemctl call.

deb-systemd-invoke is intended to be used from maintscripts to start
       systemd unit files. It is specifically NOT intended to be used
       interactively by users. Instead, users should run systemd and use
       systemctl, or not bother about the systemd enabled state in case they
       are not running systemd.
```

This PR replace regular `systemctl` with `deb-systemd-invoke`.
2017-11-27 17:05:32 -08:00
Lukasz Jagiello
6514399baf Add shadow-utils dependency to rpm package (#3505) 2017-11-27 17:02:16 -08:00
Dylan Meissner
27994abcb5 Jolokia2 handles unordered mbean object name properties (#3504) 2017-11-27 13:43:19 -08:00
Daniel Nelson
a9ada5f65b Update changelog 2017-11-27 12:32:36 -08:00
Laurent Gosselin
f758d0c6c3 Fix global variable collection when using interval_slow option in mysql input (#3500) 2017-11-27 12:29:51 -08:00
Daniel Nelson
7442b5645f Update changelog 2017-11-20 16:50:18 -08:00
Daniel Nelson
d5bd426e0c Fix snmp tools output parsing when they contain Windows eols (#3396) 2017-11-20 16:48:30 -08:00
Daniel Nelson
154b263f14 Update changelog 2017-11-20 16:27:18 -08:00
Leandro Piccilli
92ca661662 Add support for tags in the index name in elasticsearch output (#3470) 2017-11-20 16:25:36 -08:00
Daniel Nelson
54b0b9e727 Update changelog 2017-11-20 14:40:45 -08:00
aromeyer
dc2c8791d0 Add opensmtpd input plugin (#3449) 2017-11-20 14:39:13 -08:00
Daniel Nelson
367bbdeb7e Update changelog 2017-11-20 14:37:09 -08:00
aromeyer
e544d742f9 Add unbound input plugin (#3434) 2017-11-20 14:32:06 -08:00
Daniel Nelson
393c4c6c2d Update changelog 2017-11-20 14:23:16 -08:00
Leandro Piccilli
4d1bc620b2 Add index by week number to Elasticsearch output (#3490) 2017-11-20 14:22:29 -08:00
Daniel Nelson
db8e767f1f Update changelog 2017-11-20 14:20:05 -08:00
Chris Goller
afe05fcfef Use hexadecimal ids and lowercase names in zipkin input (#3488) 2017-11-20 14:19:32 -08:00
Daniel Nelson
9422cca2cc Update changelog 2017-11-16 16:51:02 -08:00
erayaslan
a06ee58785 Use MAX() instead of SUM() for latency measurements in sqlserver (#3471) 2017-11-16 16:49:51 -08:00
Daniel Nelson
b13eea89b1 Update changelog and add particle webhook to readme 2017-11-16 16:11:20 -08:00
David G. Simmons
b813e2ecae Add Particle Webhook Plugin (#3477) 2017-11-16 16:03:19 -08:00
Pierre Fersing
8364417009 Whitelist allowed char classes for graphite output (#3473) 2017-11-15 14:44:20 -08:00
Daniel Nelson
136c15ba33 Skip test requiring cratedb server in short test mode 2017-11-13 15:22:57 -08:00
Daniel Nelson
19839c0167 Update changelog 2017-11-13 15:09:05 -08:00
Daniel Nelson
72682973bd Fix typo in error message 2017-11-13 15:07:54 -08:00
faye-sama
a411306fba Fail metrics parsing on unescaped quotes (#3409)
Before this change Fields() method on a metric parsed from a line with
unescaped quotes could panic. This change makes such line unparseable.

Fixes #3326
2017-11-13 15:06:47 -08:00
Patrick Hemmer
cbd346117a Add tests for procstat systemd & cgroup matching (#3469) 2017-11-13 14:45:31 -08:00
Daniel Nelson
181a56018f Update changelog 2017-11-13 11:02:01 -08:00
Patrick Hemmer
6ee6d55751 Add systemd unit pid and cgroup matching to procstat (#3459) 2017-11-13 10:59:27 -08:00
Daniel Nelson
ebd73b7279 Update changelog 2017-11-10 14:39:11 -08:00
Trevor Pounds
6a57395731 Compile with Go 1.9.2 (#3458) 2017-11-10 14:39:00 -08:00
Daniel Nelson
be13f69305 Update changelog 2017-11-09 14:05:36 -08:00
Felix Geisendörfer
62ec3e50d9 Add CrateDB output plugin (#3210) 2017-11-09 14:03:16 -08:00
Daniel Nelson
07297e80a8 Set 1.4.4 release date 2017-11-08 15:21:20 -08:00
Daniel Nelson
f0578b8c83 Update changelog 2017-11-07 16:48:44 -08:00
Lukasz Jagiello
493af043d3 Add Solr input plugin (#2019) 2017-11-07 16:44:09 -08:00
Daniel Nelson
47d013132a Update changelog 2017-11-07 14:37:04 -08:00
Pierre Tessier
dcff769fed Add modification_time field to filestat input plugin (#3305) 2017-11-07 14:32:48 -08:00
Daniel Nelson
5141f8a2a0 Update contributing documentation 2017-11-07 13:59:06 -08:00
Daniel Nelson
bb14589469 Update changelog 2017-11-07 13:59:06 -08:00
Daniel Nelson
b81bea658f Always ignore autofs filesystems in disk input (#3440) 2017-11-07 11:45:09 -08:00
Daniel Nelson
2c2dc97702 Update changelog 2017-11-07 11:43:15 -08:00
Daniel Nelson
cbbdf1043b Use current time if container read time is zero value (#3437) 2017-11-07 11:41:53 -08:00
Daniel Nelson
c55f285de0 Update changelog 2017-11-07 11:36:29 -08:00
Daniel Nelson
e1295c41c8 Update gopsutil to v2.17.10 (#3441) 2017-11-07 11:26:11 -08:00
Daniel Nelson
e0df62c27b Update changelog 2017-11-06 17:42:42 -08:00
Bob Shannon
fdf12ce6b4 Redact datadog API key in log output (#3420) 2017-11-06 17:41:14 -08:00
Daniel Nelson
e5a265c8c7 Revert particle webhook changes on master 2017-11-06 10:47:10 -08:00
David G. Simmons
112955a9f5 Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-04 09:30:17 -04:00
David G. Simmons
da0ca8a870 Revert "Undo Revert "Revert changes since 9b0af4478""
This reverts commit 6e6aefe5da.
2017-11-04 09:19:37 -04:00
David G. Simmons
6e6aefe5da Undo Revert "Revert changes since 9b0af4478"
This reverts commit 2c31345c70.
2017-11-04 09:14:52 -04:00
David G. Simmons
ae2635b547 Readme update 2017-11-04 08:43:13 -04:00
Daniel Nelson
c14478f025 Update http_listener certs 2017-11-03 21:52:45 -07:00
Daniel Nelson
2c31345c70 Revert changes since 9b0af4478 2017-11-03 21:10:56 -07:00
David G. Simmons
4a9fa7ef4b Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-03 13:48:45 -04:00
David G. Simmons
7db06d2aa4 Revert "New Particle Plugin"
This reverts commit ba462f5c94.
2017-11-03 13:28:54 -04:00
David G. Simmons
871fae6eb3 Revert "bug fixes and refactoring"
This reverts commit 86961cc814.
2017-11-03 13:28:35 -04:00
David G. Simmons
8e587e74f5 Revert "Update README.md"
This reverts commit 8ed00af10a.
2017-11-03 13:28:00 -04:00
David G. Simmons
440918a03b Revert "Updated README.md"
This reverts commit a6ada03b91.
2017-11-03 13:27:06 -04:00
David G. Simmons
f64b23b724 Revert "Small fixes"
This reverts commit a987118b01.
2017-11-03 13:27:06 -04:00
David G. Simmons
c11739d143 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:27:06 -04:00
David G. Simmons
883696c224 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:16:09 -04:00
David G. Simmons
0ea0519e89 Merge branch 'master' into dn-particle-plugin 2017-11-03 12:13:49 -04:00
David G. Simmons
4596ae70a9 ignore mac-files 2017-11-03 12:07:03 -04:00
David G. Simmons
92caf33fff Updated Test JSON 2017-11-03 12:07:03 -04:00
David G. Simmons
a987118b01 Small fixes
Hoping to pass CircleCI this time
2017-11-03 12:07:03 -04:00
David G. Simmons
a6ada03b91 Updated README.md 2017-11-03 12:07:03 -04:00
David G. Simmons
8ed00af10a Update README.md 2017-11-03 12:07:03 -04:00
David Norton
86961cc814 bug fixes and refactoring 2017-11-03 12:07:03 -04:00
David G. Simmons
ba462f5c94 New Particle Plugin 2017-11-03 12:07:03 -04:00
David G. Simmons
1d1d5e6089 Updated Test JSON 2017-11-02 17:21:50 -04:00
David G. Simmons
8560c2f88d Fixed Readme 2017-11-02 17:19:37 -04:00
David G. Simmons
5d135cece3 test son update 2017-11-02 14:19:01 -04:00
Daniel Nelson
9b0af4478b Remove incorrect comment about linker options 2017-11-01 18:17:51 -07:00
Daniel Nelson
26ccc1f205 Add teamspeak to readme and update changelog 2017-11-01 13:30:43 -07:00
Patric Kanngießer
76ed70340b Add Teamspeak 3 input plugin (#3315) 2017-11-01 13:27:59 -07:00
Maximilien Richer
5f215c22fe Fix typos in comments (#3415) 2017-10-31 17:00:06 -07:00
Maximilien Richer
63842d48fd Add config to input-varnish README (#3414) 2017-10-31 16:58:45 -07:00
Daniel Nelson
777b84d1dc Clarify what it means to filter metrics from processors 2017-10-30 16:32:39 -07:00
Daniel Nelson
c116af35c7 Update changelog 2017-10-30 15:35:34 -07:00
Daniel Nelson
fcfcc803b1 Use explicit schemas in mqtt_consumer input (#3401) 2017-10-30 15:33:20 -07:00
Daniel Nelson
4d5de8698b Update changelog 2017-10-30 13:53:45 -07:00
Aditya C S
23ad959d71 Add support for SSL settings to ElasticSearch output plugin (#3406) 2017-10-30 13:52:40 -07:00
Aditya C S
d9fa916711 Update docker plugin README (#3404) 2017-10-30 12:26:39 -07:00
Daniel Nelson
53b13a20d0 Update changelog 2017-10-27 11:55:17 -07:00
Maximilien Richer
ffa8a4a716 Add instance name option to varnish plugin (#3398)
This change add a new configuration option to allow probing of
namespaced varnish instances, usually reached using the '-n' switch on
the varnish cli.
2017-10-27 11:53:59 -07:00
Daniel Nelson
8b4708c82a Update changelog 2017-10-26 13:37:54 -07:00
Vladimir S
88ec171293 Perform DNS lookup before ping (#3385) 2017-10-26 13:35:37 -07:00
Daniel Nelson
5885ef2c1c Update changelog 2017-10-25 15:29:56 -07:00
Daniel Nelson
a519abf13f Gather concurrently from snmp agents (#3365) 2017-10-25 15:28:55 -07:00
Daniel Nelson
6ea61b55d9 Set release date for 1.4.3 2017-10-25 14:15:10 -07:00
Daniel Nelson
206397d475 Update changelog 2017-10-24 16:31:22 -07:00
Jeremy Doupe
a6797a44d5 Add history and summary types to telegraf and prometheus plugins (#3337) 2017-10-24 16:28:52 -07:00
Daniel Nelson
13c1f1524a Update changelog 2017-10-24 16:25:49 -07:00
Daniel Nelson
9a062498e7 Use golang.org/x/sys/unix instead of syscall in diskio (#3384) 2017-10-24 16:22:31 -07:00
Daniel Nelson
f64cf89db1 Update changelog 2017-10-24 15:46:47 -07:00
Daniel Nelson
6d1777276c If the connector name cannot be unquoted, use the raw value (#3371) 2017-10-24 15:36:23 -07:00
Daniel Nelson
65580759fc Update changelog 2017-10-23 12:36:31 -07:00
Sergei Smolianinov
d2f9fc7d8c Fix ACL token usage in consul input plugin (#3376) 2017-10-23 12:31:27 -07:00
Daniel Nelson
77cc071796 Update changelog 2017-10-19 17:06:14 -07:00
Daniel Nelson
4deb6238a3 Add support for decimal timestamps to ts-epoch modifier (#3358) 2017-10-19 16:36:32 -07:00
Daniel Nelson
7088d98304 Update changelog 2017-10-19 16:27:29 -07:00
Daniel Nelson
4243403432 Remove warning when JSON contains null value (#3359) 2017-10-19 16:25:58 -07:00
Mamat Rahmat
3bbc2beeed Fix small typo in documentation (#3364) 2017-10-19 14:47:40 -07:00
Daniel Nelson
0e6a70b199 Update changelog 2017-10-18 17:43:01 -07:00
Daniel Nelson
ec4efe5b03 Use labels in prometheus output for string fields (#3350) 2017-10-18 17:42:30 -07:00
Daniel Nelson
adb1f5588c Update changelog 2017-10-18 14:53:34 -07:00
Daniel Nelson
6e5915c59f Fix prometheus passthrough for existing value types (#3351) 2017-10-18 14:51:08 -07:00
Daniel Nelson
9b59cdd10e Update changelog 2017-10-18 13:57:58 -07:00
clheikes
02baa696c3 Fix TELEGRAF_OPTS expansion in systemd service unit (#3354) 2017-10-18 13:57:32 -07:00
Daniel Nelson
a4fa19252f Update changelog 2017-10-18 12:47:58 -07:00
Daniel Nelson
7ba376964c Update changelog 2017-10-18 12:25:46 -07:00
Ayrdrie
a75ab3e190 Fix mongodb input panic when restarting mongodb (#3355) 2017-10-18 12:24:30 -07:00
Daniel Nelson
2208657d73 Add release date info to FAQ 2017-10-17 10:43:53 -07:00
Daniel Nelson
9d8e935734 Update changelog 2017-10-16 14:26:12 -07:00
Pierre Fersing
f5a9d1bc75 Fix CPU system plugin gets stuck after system suspend (#3342) 2017-10-16 14:25:00 -07:00
Daniel Nelson
4b05edea53 Update changelog 2017-10-16 14:19:16 -07:00
Craig Wickesser
246ffab3e0 Add UDP IPv6 support to statsd input (#3344) 2017-10-16 14:18:36 -07:00
Daniel Nelson
3ea41e885c Update changelog 2017-10-16 11:27:00 -07:00
Daniel Nelson
1f348037b7 Fix case sensitivity issue in sqlserver query (#3336) 2017-10-16 11:26:16 -07:00
Daniel Nelson
86f19dee2b Fix typo in ipmi_sensor readme 2017-10-16 11:10:06 -07:00
Daniel Nelson
a1796989f7 Add ipmi_sensor permission documentation 2017-10-13 13:53:18 -07:00
Daniel Nelson
6b67fedfdc Remove timing sensitive riemann test 2017-10-13 11:30:30 -07:00
Daniel Nelson
5cd3327d5f Update changelog 2017-10-13 11:12:27 -07:00
Adam Johnson
bf9f94eb9d Fix cloudwatch output requires unneeded permissions (#3335) 2017-10-13 11:04:40 -07:00
Daniel Nelson
0f9f757da7 Update changelog 2017-10-12 17:26:58 -07:00
Windkit Li
2f8d0f4d47 Fix snmpwalk address format in leofs input (#3328) 2017-10-12 17:26:14 -07:00
Daniel Nelson
024dea2ff9 Update changelog 2017-10-12 15:52:01 -07:00
Daniel Nelson
fa25e123d8 Fix container name filters in docker input (#3331) 2017-10-12 15:50:09 -07:00
Patrick Hemmer
bed14e5037 Fix documented equation for diskio average queue depth (#3334) 2017-10-12 15:08:51 -07:00
Daniel Nelson
c74c29b164 Remove suggested plugins from readme.
These are confusing since we don't support all of the examples.
2017-10-11 12:56:33 -07:00
Daniel Nelson
4e0c8e6026 Set 1.4.2 release date 2017-10-10 13:29:31 -07:00
Daniel Nelson
d7ea83f39b Update readme and changelog for basicstats aggregator 2017-10-10 12:04:41 -07:00
Toni Moreno
b641f06552 Add new basicstats aggregator (#2167) 2017-10-10 12:02:01 -07:00
Pierre Tessier
c7a6d4eaa4 Fix link for wavefront plugin in changelog (#3317) 2017-10-10 11:21:46 -07:00
Daniel Nelson
61b0336d97 Use 5 second timeout overhead when waiting for ping to complete 2017-10-09 15:09:07 -07:00
Daniel Nelson
761544f56d Add HasPoint method to testutil.Accumulator 2017-10-09 15:02:57 -07:00
Daniel Nelson
0f452ad0df Document /etc/default/telegraf file 2017-10-06 16:57:57 -07:00
Daniel Nelson
4093bc98b7 Update changelog 2017-10-06 16:17:09 -07:00
Christian Meilke
75567d5b51 Add ability to limit node stats in elasticsearch input (#3304) 2017-10-06 16:16:32 -07:00
Daniel Nelson
59bb31e765 Use golang 1.9.1 2017-10-05 16:19:53 -07:00
Daniel Nelson
13c7802b84 Update changelog 2017-10-05 16:15:43 -07:00
Daniel Nelson
cce40c515a Use chunked transfer encoding in InfluxDB output (#3307) 2017-10-05 16:14:21 -07:00
Daniel Nelson
6e1fa559a3 Update changelog 2017-10-05 16:05:51 -07:00
Daniel Nelson
f56dda0ac8 Fix panic in cpu input if number of cpus changes (#3306) 2017-10-05 16:02:21 -07:00
Daniel Nelson
4fab572b6b Release buffer back to pool earlier 2017-10-05 12:12:14 -07:00
Daniel Nelson
b9f319529f Update changelog 2017-10-04 15:30:11 -07:00
Christian Meilke
0bb32570ba Add cluster health level configuration to elasticsearch input (#3269) 2017-10-04 15:29:32 -07:00
Daniel Nelson
a4ea4c7a25 Add smart to changelog and readme 2017-10-04 15:18:15 -07:00
Rickard von Essen
e69c3f9d1c Add smart input plugin for collecting S.M.A.R.T. data (#2449) 2017-10-04 15:15:58 -07:00
Daniel Nelson
002ccf3295 Update changelog 2017-10-03 15:25:19 -07:00
Daniel Nelson
a163effa6d Add support for proxy environment variables to http_response (#3302) 2017-10-03 15:22:57 -07:00
Daniel Nelson
93ff811358 Update changelog 2017-10-03 14:37:02 -07:00
Aditya C S
dd4299e925 Collect Docker Swarm service metrics in docker input plugin (#3141) 2017-10-03 14:36:26 -07:00
Daniel Nelson
b610276485 Skip invalid urls in nginx input 2017-10-03 10:54:31 -07:00
David Norton
6aee40fac1 bug fixes and refactoring 2017-10-03 09:07:15 -04:00
Pierre Tessier
79f66dc5b3 Added newline to each metric line in wavefront output (#3290) 2017-10-02 17:42:21 -07:00
Daniel Nelson
0a55ab42b4 Update changelog 2017-10-02 17:39:32 -07:00
Jimena Cabrera Notari
aba269e94c Add extra wired tiger cache metrics to mongodb input (#3281) 2017-10-02 17:38:51 -07:00
Daniel Nelson
f67350107d Update changelog 2017-10-02 17:16:38 -07:00
Daniel Nelson
8e3ed96d6f Fix case sensitivity error in sqlserver input (#3287) 2017-10-02 17:15:34 -07:00
Daniel Nelson
771fbc311a Regenerate TLS certs due to expiration 2017-10-02 15:44:55 -07:00
David G. Simmons
d7b88b10ad New Particle Plugin 2017-10-02 16:50:23 -04:00
Daniel Nelson
cdca81c999 Fix mqtt_consumer connection_timeout test 2017-10-02 12:28:31 -07:00
Daniel Nelson
ed6f438c9d Add Wavefront output to changelog and readme 2017-09-29 16:15:48 -07:00
Pierre Tessier
366f3f560c Add Wavefront output plugin (#3160) 2017-09-29 16:13:08 -07:00
Daniel Nelson
e4f5547d37 Update example config 2017-09-29 16:09:31 -07:00
Daniel Nelson
e1bf655ef9 Add deprecation notice to jolokia sample config 2017-09-29 16:08:31 -07:00
Daniel Nelson
29b6f4168c Update changelog 2017-09-29 15:59:56 -07:00
Daniel Nelson
3d62e045af Fix format of connection_timeout in mqtt_consumer (#3286) 2017-09-29 15:58:38 -07:00
Daniel Nelson
ad4a5aa7a0 Document how to exclude kubernetes annotation 2017-09-29 14:07:19 -07:00
Daniel Nelson
f2cb1da7cf Update changelog 2017-09-29 11:50:15 -07:00
François de Metz
c3d15f0aff Add support for the rollbar occurrence webhook event. (#1692) 2017-09-29 11:49:22 -07:00
David G. Simmons
b2453e3ec3 Revert "New Particle.io Plugin for Telegraf"
This reverts commit c3b11f9cfb.
Accidentally pushed to master, instead of my fork. Backing it out.
2017-09-29 12:57:13 -04:00
David G. Simmons
c3b11f9cfb New Particle.io Plugin for Telegraf
Only the tests need to be fixed.
2017-09-29 12:45:06 -04:00
Daniel Nelson
cd1791494a Update changelog 2017-09-27 11:38:43 -07:00
Daniel Nelson
402460f038 Use underscore as default opentsdb seperator
Preserves backwards compatibility
2017-09-27 11:36:41 -07:00
owlet123
f85db90780 Add configurable separator for metrics and fields in opentsdb output (#3106) 2017-09-27 11:29:40 -07:00
Daniel Nelson
9bddd50a64 Add deprecation notice to jolokia plugin 2017-09-27 10:52:10 -07:00
Daniel Nelson
b8a0b8461a Update changelog and readme for jolokia2 plugin 2017-09-26 17:42:38 -07:00
Dylan Meissner
ee26191eb5 Add redesigned Jolokia input plugin (#2278) 2017-09-26 17:34:46 -07:00
Daniel Nelson
cadafa6405 Update changelog 2017-09-26 16:03:04 -07:00
Daniel Nelson
22a9ffbb9d Allow JSON data format to contain zero metrics (#3268) 2017-09-26 15:58:33 -07:00
Daniel Nelson
2e1457a496 Update changelog 2017-09-26 15:38:22 -07:00
Daniel Nelson
8614445235 Fix parsing of JSON with a UTF8 BOM in httpjson (#3267) 2017-09-26 15:36:00 -07:00
Daniel Nelson
f23d1eb078 Update changelog 2017-09-26 15:28:07 -07:00
Daniel Nelson
ef5c12bd86 Fix dmcache tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
c013cc1497 Fix cgroup tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
bb665cf013 Fix ceph tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
5dff5932fd Fix nginx_plus tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
f823fc73f6 Allow 64bit integers in kernel_vmstat 2017-09-26 15:25:57 -07:00
Daniel Nelson
fd702e6bb8 Set 1.4.1 release date in changelog 2017-09-26 14:19:02 -07:00
Daniel Nelson
50024c1860 Update changelog 2017-09-25 16:34:04 -07:00
Lukasz Jagiello
a4b8805f7f Add support for NSQLookupd to nsq_consumer (#3215) 2017-09-25 16:33:05 -07:00
James
837e6b1a32 Add additional numeric type handling tests for postgresql_extensible (#3066) 2017-09-25 10:58:10 -07:00
Agniva De Sarker
063f3f68df Improve statsd plugin perf by using a byte buffer pool (#3254) 2017-09-25 10:55:02 -07:00
Daniel Nelson
b24663b0bd Remove nightly versioning scheme 2017-09-22 18:07:08 -07:00
Daniel Nelson
366bda45c3 Remove out of date Vagrantfile 2017-09-22 17:35:58 -07:00
Daniel Nelson
c010fb1c3c Fix build versioning; add dev.docker file 2017-09-22 17:35:58 -07:00
Daniel Nelson
08c197f73a Fix golang version 2017-09-22 17:35:58 -07:00
Daniel Nelson
cafb22d145 Fix unittest for golang 1.9 2017-09-22 17:35:58 -07:00
Christian Meilke
73df179bd6 Tag original URL for k8s services in prometheus input (#3257) 2017-09-22 17:26:19 -07:00
Daniel Nelson
c3bea59f3b Update changelog 2017-09-22 11:46:47 -07:00
Daniel Nelson
52393582d2 Unlock Statsd when stopping to prevent deadlock (#3258) 2017-09-22 11:45:45 -07:00
Daniel Nelson
ce29ca78e3 Add nginx_plus to changelog and readme 2017-09-19 11:49:55 -07:00
Patrick O'Brien
6e6ed075dc Add new nginx_plus input plugin (#3214) 2017-09-19 11:46:01 -07:00
Daniel Nelson
c0a4bd99a1 Update changelog 2017-09-19 11:27:57 -07:00
Paulo Cabido
decb09e760 Add configurable metrics endpoint to prometheus output (#3245) 2017-09-19 11:27:11 -07:00
Daniel Nelson
a63f80e017 Build with go 1.9 on circleci 2017-09-18 16:30:09 -07:00
Daniel Nelson
daee48c861 Update prometheus input documentation 2017-09-18 16:21:45 -07:00
Daniel Nelson
dea8bf7ac0 Update changelog 2017-09-18 15:07:18 -07:00
Christian Meilke
292c5229bf Add support for k8s service DNS discovery to prometheus input (#3236) 2017-09-18 15:06:11 -07:00
Daniel Nelson
0048bf2120 Update changelog 2017-09-18 14:25:17 -07:00
Daniel Nelson
b8e134cd37 Fix arm64 packages contain 32-bit executable (#3246) 2017-09-18 14:22:54 -07:00
Patrick Hemmer
0339dc7faf Add process resource limits to procstat input (#3231) 2017-09-15 11:16:44 -07:00
Daniel Nelson
575a07c985 Update input plugin example readme. 2017-09-14 15:50:55 -07:00
Daniel Nelson
b94cda6b46 Update changelog 2017-09-14 15:28:47 -07:00
Trevor Pounds
73372872c2 Fix panic in statsd p100 calculation (#3230) 2017-09-14 15:27:42 -07:00
Daniel Nelson
103ae3b710 Update changelog 2017-09-14 15:22:46 -07:00
Trevor Pounds
171332c579 Add support for timing sums in statsd input (#3234) 2017-09-14 15:21:54 -07:00
Daniel Nelson
875ab3c4b7 Update changelog 2017-09-14 15:05:03 -07:00
Mark Wilkinson - m82labs
1c5ebd4be3 Fix duplicate keys in perf counters sqlserver query (#3175) 2017-09-14 15:04:13 -07:00
Daniel Nelson
103d24bfba Update changelog 2017-09-14 15:00:55 -07:00
Daniel Nelson
d5f48e3e96 Fix skipped line with empty target in iptables (#3235) 2017-09-14 14:59:28 -07:00
Daniel Nelson
7a41d2c586 Update changelog 2017-09-14 13:06:58 -07:00
Trevor Pounds
fa1982323a Fix counter and gauge metric types. (#3232) 2017-09-14 13:05:37 -07:00
Daniel Nelson
cdf63c5776 Update changelog 2017-09-13 17:31:39 -07:00
Daniel Nelson
0a8c2e0b3b Whitelist allowed char classes for opentsdb output. (#3227) 2017-09-13 17:30:52 -07:00
Daniel Nelson
9197a59cdb Update changelog 2017-09-13 17:28:33 -07:00
Dimitris Rozakis
9c8f4afa37 Respect path prefix in influx output uri (#3224) 2017-09-13 17:27:01 -07:00
Daniel Nelson
eebee9759f Fix fluentd test 2017-09-12 17:57:55 -07:00
Daniel Nelson
ee85f9275e Update changelog 2017-09-12 17:27:50 -07:00
Daniel Nelson
4e53464fe2 Remove unneeded error check 2017-09-12 17:24:57 -07:00
Adrián López
2163981872 Add timeout option for kubernetes (#3211) 2017-09-12 17:22:15 -07:00
Daniel Nelson
c5cfde667a Update changelog 2017-09-12 17:17:41 -07:00
Daniel Nelson
8a68e7424c Fix optional field types in fluentd input 2017-09-12 17:15:19 -07:00
Daniel Nelson
cc63b3b667 Update changelog 2017-09-11 12:27:39 -07:00
DanKans
5488f4b3ac Fix MQTT input exits if Broker is not available on startup (#3202) 2017-09-11 12:24:51 -07:00
Daniel Nelson
14a4b108b4 Update changelog 2017-09-11 11:57:18 -07:00
Daniel Nelson
32f313a6a6 Add polling method to logparser and tail inputs (#3213) 2017-09-11 11:56:04 -07:00
Daniel Nelson
c720200883 Update changelog 2017-09-11 11:54:18 -07:00
DanKans
f62e543003 Fix address already in use with webhooks input during reload (#3206) 2017-09-11 11:51:45 -07:00
Daniel Nelson
be83c8c8f0 Update changelog 2017-09-08 16:02:15 -07:00
Jeff Nickoloff
c809debfd4 TLS and MTLS enhancements to HTTPListener input plugin (#3191) 2017-09-08 16:01:16 -07:00
Daniel Nelson
247c2e71fd Update changelog 2017-09-08 15:36:26 -07:00
Daniel Nelson
7b08f9d099 Add support for standard proxy env vars in outputs. (#3212) 2017-09-08 15:35:20 -07:00
Daniel Nelson
d0b690f040 Fix short tests on darwin (#3099) 2017-09-08 13:03:37 -07:00
Daniel Nelson
98ca22597d Update changelog 2017-09-06 14:29:03 -07:00
Raúl Benencia
99dfc69fbb Include mount mode option in disk metrics (#3027) 2017-09-06 14:28:11 -07:00
Daniel Nelson
144862354a Update changelog 2017-09-06 14:20:38 -07:00
Daniel Nelson
402a0f16e1 Fix typo 2017-09-06 14:19:42 -07:00
Pavel Gurkov
5d4eec606f Add Kafka output plugin topic_suffix option (#3196) 2017-09-06 14:18:26 -07:00
Daniel Nelson
ab1c11b06d Add 1.4.0 release date 2017-09-05 17:14:11 -07:00
Daniel Nelson
864ea1efaf Improve question title in FAQ 2017-09-05 17:12:36 -07:00
Daniel Nelson
4fb1c3a2bc Add FAQ doc with dns resolver information 2017-09-05 13:12:11 -07:00
Daniel Nelson
9796d3c99d Use ip address for default InfluxDB ip in config
Helps with initial setup if localhost cannot be resolved due to the pure
go resolver.
2017-09-05 12:55:21 -07:00
Daniel Nelson
98e784faf3 Sort metrics before comparing in graphite test 2017-09-05 12:50:30 -07:00
rdxmb
16d6011ca1 Fix docker image name in docs (#3193) 2017-09-05 11:44:51 -07:00
Daniel Nelson
f43af72785 Update changelog 2017-08-31 13:43:47 -07:00
Daniel Nelson
28d16188b3 Fix panic when handling string fields with escapes (#3188) 2017-08-30 21:16:37 -07:00
Daniel Nelson
19f3264073 Update changelog 2017-08-29 16:27:02 -07:00
Daniel Nelson
8225bd0173 Convert bool fields to int in graphite serializer 2017-08-29 16:22:03 -07:00
Seua Polyakov
3806424aab Skip non-numerical values in graphite format (#3179) 2017-08-29 15:59:38 -07:00
Daniel Nelson
ef8876b70b Move changelog item to 1.4 2017-08-28 17:17:03 -07:00
Daniel Nelson
5fd8ab36d3 Update changelog 2017-08-28 17:08:44 -07:00
Jeff Nickoloff
ac1fa05672 Added CloudWatch metric constraint validation (#3183) 2017-08-28 16:56:03 -07:00
Daniel Nelson
73d57c8a02 Update changelog 2017-08-28 16:30:51 -07:00
Nevins
95fe0e43f5 Add support for sharding based on metric name (#3170) 2017-08-28 16:24:38 -07:00
Daniel Nelson
02f7b0d030 Update changelog 2017-08-28 16:11:00 -07:00
Dylan Meissner
a9a40cbf87 HTTP headers can be added to InfluxDB output (#3182) 2017-08-28 16:08:50 -07:00
Daniel Nelson
a98496591a Update changelog 2017-08-25 18:08:33 -07:00
Ashton Kinslow
0a6541dfa8 Fix NSQ input plugin when used with version 1.0.0-compat 2017-08-25 18:06:48 -07:00
Daniel Nelson
8ecc58639a Close response bodies in http_listener test 2017-08-25 13:58:45 -07:00
Daniel Nelson
6abecd0ac7 Update changelog 2017-08-25 12:59:19 -07:00
Rickard von Essen
0502b65316 Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149) 2017-08-25 12:57:35 -07:00
Daniel Nelson
e400fcf5da Update changelog 2017-08-25 11:55:59 -07:00
Jan Willem Janssen
d449833de9 Fix parsing of SHM remotes in ntpq input (#3163) 2017-08-25 11:54:06 -07:00
Daniel Nelson
58751fa4df Update fail2ban documentation 2017-08-25 11:42:07 -07:00
Daniel Nelson
656ce31d98 Fix amqp_consumer data_format documentation
closes #3164
2017-08-24 13:17:29 -07:00
Daniel Nelson
485e273187 Add links to nightly builds 2017-08-23 15:42:25 -07:00
Daniel Nelson
f95c239a3f Update changelog 2017-08-23 15:21:48 -07:00
Daniel Nelson
ae24a0754b Escape backslash within string fields (#3161) 2017-08-23 15:17:26 -07:00
Daniel Nelson
f253623231 Update changelog 2017-08-23 15:16:04 -07:00
Rickard von Essen
f0db4fd901 Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.
2017-08-23 15:14:32 -07:00
Daniel Nelson
8c68bd9ddb Update changelog 2017-08-22 17:03:00 -07:00
Daniel Nelson
9fc7220c2e Don't start Telegraf on install in Amazon Linux (#3156) 2017-08-22 17:01:59 -07:00
Daniel Nelson
6597b55477 Update changelog 2017-08-22 16:55:15 -07:00
Daniel Nelson
1f4a997164 Don't retry points beyond retention policy (#3155) 2017-08-22 16:52:26 -07:00
Daniel Nelson
5224b526f4 Hide output of git describe 2017-08-22 13:32:52 -07:00
Rickard von Essen
371638ce56 Enable fail2ban on all platforms (#3151) 2017-08-22 12:58:00 -07:00
Rickard von Essen
53c5d3a290 Enable chrony for all platforms (#3152) 2017-08-22 11:49:51 -07:00
Daniel Nelson
b480022330 Update config directory documentation 2017-08-22 11:33:26 -07:00
Daniel Nelson
ccf17a9f93 Cache intermediate objects during build 2017-08-21 17:26:55 -07:00
Chris Goller
13a6b917c3 Add JSON input support to zipkin plugin (#3150) 2017-08-21 17:24:54 -07:00
Daniel Nelson
1f1e9cc49f Add win_services to the readme 2017-08-18 17:57:30 -07:00
Daniel Nelson
70c2b83f00 Update histogram aggregator documentation (#3133) 2017-08-18 13:24:05 -07:00
Daniel Nelson
4de264ffc8 Remove version test 2017-08-18 11:08:48 -07:00
Daniel Nelson
36c2c88fd2 Update example config 2017-08-17 18:54:06 -07:00
Daniel Nelson
e31d91f0f9 Add queues to rabbitmq documentation (#3135) 2017-08-17 18:52:27 -07:00
Daniel Nelson
3006ccbf2f Update master for 1.5 development 2017-08-16 16:54:15 -07:00
Daniel Nelson
8b588ea37f Update sample config 2017-08-16 16:46:40 -07:00
Daniel Nelson
7608251633 Add tomcat input to changelog and readme 2017-08-16 15:36:56 -07:00
Daniel Nelson
1e9d7cd6e9 Add error status handle to tomcat input 2017-08-16 15:33:47 -07:00
mlindes
a91457e001 Add tomcat input plugin (#3112) 2017-08-16 15:33:20 -07:00
Daniel Nelson
fd3a9bf46a Update changelog 2017-08-16 12:26:00 -07:00
Daniel Nelson
ca394fcfb2 Discard logging from tail library (#3128) 2017-08-16 12:06:07 -07:00
Daniel Nelson
3819607511 Allow using system plugin in Windows (#3127) 2017-08-16 12:05:46 -07:00
Daniel Nelson
eb0215c382 Remove log message on ping timeout (#3126) 2017-08-16 11:59:41 -07:00
Daniel Nelson
09153c815c Move http_response headers to end of configuration.
If the subtable comes before other options, they will be placed in the
subtable.
2017-08-15 11:50:08 -07:00
Daniel Nelson
9bc13f143e Test for nil metric before reading tags in logparser 2017-08-15 11:43:16 -07:00
Daniel Nelson
032348c7a5 Update changelog 2017-08-14 14:51:28 -07:00
Bob Shannon
5fbdd09aaf Add gzip content-encoding support to influxdb output (#2978) 2017-08-14 14:50:15 -07:00
Daniel Nelson
7d5dae5a08 Improve apache input docs (#3120) 2017-08-11 17:50:51 -07:00
Daniel Nelson
54be037911 Use double hyphen in cli examples 2017-08-11 16:26:54 -07:00
Daniel Nelson
5003809e97 Merge LDFLAGS from env into build 2017-08-11 16:26:54 -07:00
G-Research
1b50f14d55 Build NTPQ input on Windows (#3117) 2017-08-11 13:36:25 -07:00
Patrick Hemmer
b0109b3550 Add weighted_io_time to diskio input (#3119) 2017-08-11 11:49:42 -07:00
Daniel Nelson
257b460f61 Update changelog 2017-08-10 12:41:09 -07:00
Daniel Nelson
287a44de5e Skip compilcation of logparser and tail on solaris (#3113)
Allows compilation for solaris
2017-08-10 12:36:11 -07:00
Daniel Nelson
73897d1f1c Update changelog 2017-08-10 10:22:11 -07:00
Daniel Nelson
1e2d594af0 Converge to typed value in prometheus output (#3104) 2017-08-10 10:19:28 -07:00
Daniel Nelson
83c003e594 Update changelog 2017-08-09 11:48:36 -07:00
Daniel Nelson
84ce9629a8 Tweak formatting of varnish README 2017-08-09 11:48:12 -07:00
Daniel Nelson
3c14b46f6f Fix ordering of all target 2017-08-09 11:47:55 -07:00
Benjamin Stromski
8a2373e8c8 Add option to run varnish under sudo (#3097) 2017-08-09 11:38:54 -07:00
Daniel Nelson
cb04fa1e9c Add diskio %util sample query 2017-08-09 11:28:27 -07:00
Seva Poliakov
92af42a847 Remove tag_env duplicate from docker README (#3109) 2017-08-09 10:21:22 -07:00
Daniel Nelson
bceb020d72 Update changelog and readme 2017-08-08 11:50:16 -07:00
Rodolphe Blancho
d9deb266df Add salesforce input plugin (#3075) 2017-08-08 11:48:01 -07:00
Slawomir Skowron
f3435f1c59 Add TCP listener for statsd input (#2293) 2017-08-08 11:41:26 -07:00
Daniel Nelson
f9573ad969 Remove Godeps_windows from build.py 2017-08-07 17:43:06 -07:00
Daniel Nelson
40aacd9046 Fix artifact redirection 2017-08-07 17:41:52 -07:00
Daniel Nelson
5e73f3e816 Only upload nightly if on master branch 2017-08-07 17:24:35 -07:00
Daniel Nelson
a1e7a5f474 Upload as nightly builds if PACKAGE set 2017-08-07 17:16:34 -07:00
Daniel Nelson
828c5817f9 Update changelog 2017-08-07 16:18:01 -07:00
Daniel Nelson
3e27134872 Add path tag to logparser containing path of logfile (#3098) 2017-08-07 16:16:31 -07:00
Daniel Nelson
1fb5373962 Build releases with -w -s ldflags 2017-08-07 15:47:20 -07:00
Daniel Nelson
75e6ebcf93 Update changelog 2017-08-07 14:39:22 -07:00
Vlasta Hajek
e21f2de8b8 Add Windows Services input plugin (#3023) 2017-08-07 14:36:15 -07:00
Daniel Nelson
795f02ab88 Cleanup Makefile (#3089) 2017-08-03 11:54:05 -07:00
Daniel Nelson
360d03e301 Update changelog and readme 2017-08-02 18:02:41 -07:00
Daniel Nelson
137b312fa9 Add Zipkin input plugin (#3080) 2017-08-02 17:58:26 -07:00
Daniel Nelson
ce12913bc2 Update precision documentation and examples
Precision is no longer used by the InfluxDB output.

closes #3079
2017-08-01 15:02:36 -07:00
Daniel Nelson
d82c5062b8 Add Appveyor continuous integration (#3074) 2017-07-31 16:12:09 -07:00
Daniel Nelson
6666e6a5a7 Update changelog 2017-07-31 11:37:32 -07:00
Vladislav Mugultyanov
9c0aadf445 Add histogram aggregator plugin (#2387) 2017-07-31 11:33:51 -07:00
Daniel Nelson
3bd14ed229 Update changelog 2017-07-31 11:30:27 -07:00
DanKans
5e95367f6c Sanitize password from couchbase metric (#3033) 2017-07-31 11:29:14 -07:00
Jeff Ashton
c31e7d0b91 Fix win_perf_counters tests (#3068) 2017-07-31 11:03:26 -07:00
Oscar Sironi
f8c84302a4 Add config file path troubleshooting advice for Windows (#3071) 2017-07-31 10:58:12 -07:00
Daniel Nelson
9143670d6e Update changelog 2017-07-27 17:19:33 -07:00
Daniel Nelson
f0bd69d904 Add tls options to docker input (#3063) 2017-07-27 17:18:44 -07:00
Daniel Nelson
7179290dea Update changelog 2017-07-27 15:21:52 -07:00
Daniel Nelson
c4297f40ad Allow iptable entries with trailing text (#3060) 2017-07-27 15:21:06 -07:00
Daniel Nelson
0d4c954e01 Update changelog 2017-07-27 15:15:11 -07:00
Daniel Nelson
d6cf9f4f30 Fix docker memory and cpu reporting in Windows (#3043) 2017-07-27 15:12:29 -07:00
Daniel Nelson
5f88be022c Add circleci parameter to build packages 2017-07-26 17:13:50 -07:00
Daniel Nelson
284ab79a37 Set 1.3.5 release date 2017-07-26 15:53:49 -07:00
Daniel Nelson
2bd6c80506 Update changelog 2017-07-25 17:12:45 -07:00
Daniel Nelson
0ca936a12e Default to localhost if zookeeper has no servers set (#3056) 2017-07-25 17:08:32 -07:00
Daniel Nelson
a26fc52181 Fix panic in logparser if file cannot be opened (#3055) 2017-07-25 17:08:03 -07:00
Daniel Nelson
83f575fcea Add redis_version field to redis input (#3054) 2017-07-25 17:07:43 -07:00
Daniel Nelson
ffd1f25b75 Update changelog 2017-07-25 16:09:48 -07:00
Daniel Nelson
1658404cea Update changelog 2017-07-25 15:43:13 -07:00
Daniel Nelson
82ea04f188 Fix prometheus output cannot be reloaded (#3053) 2017-07-25 15:41:18 -07:00
xin053
273d0b85b0 Correct spelling of toml field in mysql input (#3051) 2017-07-25 10:57:27 -07:00
Théophile Helleboid - chtitux
f3917ec5ff Fix typo in postgresql_extensible/README.md (#3052) 2017-07-25 10:39:14 -07:00
Daniel Nelson
428455e032 Update changelog 2017-07-24 18:26:29 -07:00
Daniel Nelson
573bd4aa32 Start first aggregator period at startup time (#3050)
Fixes issue where metrics collected immediately after startup would not
be aggregated.
2017-07-24 18:25:05 -07:00
Oskar
ab5205f8c3 Fix go vet under windows (#3046) 2017-07-24 12:36:33 -07:00
Daniel Nelson
85aa212467 Update changelog 2017-07-21 16:57:28 -07:00
Daniel Nelson
840d19db35 Add network option to dns_query (#3042) 2017-07-21 16:56:08 -07:00
Daniel Nelson
1c267e9b16 Update changelog 2017-07-21 15:46:22 -07:00
Andy Cobaugh
1ff6e92193 Add input plugin for OpenLDAP (#2612) 2017-07-21 15:44:20 -07:00
Daniel Nelson
c82c0e596b Update changelog 2017-07-21 14:31:25 -07:00
Daniel Nelson
31ce98fa91 Don't match pattern on any error (#3040)
This prevents a pattern with no wildcards from matching in case
permissions is denied.
2017-07-21 14:28:14 -07:00
Daniel Nelson
4d66db1603 Update changelog 2017-07-21 14:26:39 -07:00
Yann Cézard
681d20083a Only report cpu usage for online cpus in docker input (#3035) 2017-07-21 14:25:17 -07:00
Daniel Nelson
4ee74ff54b Document GNU make requirement 2017-07-21 11:15:00 -07:00
Daniel Nelson
16073e4172 Update changelog 2017-07-21 10:57:39 -07:00
Daniel Nelson
3c204d409d Line wrap documentation 2017-07-21 10:57:12 -07:00
DanKans
d903a9142d Fix filtering when both pass and drop match an item (#3036)
Adjust logic in functions responsible for passing metrics in order to be able
to process them correctly in case where pass and drop are defined together.
2017-07-21 10:53:57 -07:00
Daniel Nelson
a2d4453269 Update changelog 2017-07-19 13:09:49 -07:00
DanKans
34c042c7dc Fix combined tagdrop/tagpass filtering (#3031) 2017-07-19 13:08:40 -07:00
Daniel Nelson
4dfe2312d0 Switch skipped kafka test 2017-07-18 18:18:57 -07:00
Daniel Nelson
c740dce36d Update download information in readme 2017-07-18 13:54:38 -07:00
Daniel Nelson
475a926d43 Update changelog 2017-07-18 11:03:07 -07:00
DanKans
d2626f1da6 Fix ntpq parse issue when using dns_lookup (#3026) 2017-07-18 11:01:08 -07:00
soldierkam
f5a8415c78 Add read timeout to socket_listener 2017-07-17 18:34:36 -07:00
Daniel Nelson
1d416a4213 Remove command in example output 2017-07-17 15:08:17 -07:00
Daniel Nelson
731ab9773d Update changelog 2017-07-17 12:01:35 -07:00
Daniel Nelson
d8f7b76253 Prevent startup if intervals are 0 2017-07-17 11:58:47 -07:00
Daniel Nelson
dbe2f79019 Update changelog 2017-07-14 10:45:32 -07:00
Bob Shannon
ef63908541 Add result_type field to net_response input plugin (#2990) 2017-07-14 10:43:36 -07:00
Daniel Nelson
27e47614c6 Add credits for new plugins to changelog 2017-07-13 16:14:18 -07:00
Daniel Nelson
dc4a133b11 Update changelog 2017-07-13 16:00:09 -07:00
DanKans
f4d67d8c3c Add fluentd input plugin (#2661) 2017-07-13 15:58:20 -07:00
Daniel Nelson
785798611e Update changelog 2017-07-13 15:39:45 -07:00
Daniel Nelson
b165ce4cd5 Prevent possible deadlock when using aggregators (#3016)
Looping the metrics back through the same channel could result in a
deadlock, by using a new channel and locking the processor we can ensure
that all stages can make continual progress.
2017-07-13 15:34:21 -07:00
Daniel Nelson
d9d1ca5a46 Add release date for 1.3.4 2017-07-12 17:15:38 -07:00
Daniel Nelson
2c10806fef Update changelog 2017-07-12 12:04:43 -07:00
Daniel Nelson
5d2c093105 Prevent Write from being called concurrently (#3011) 2017-07-12 12:03:23 -07:00
Daniel Nelson
f68bab1667 Update changelog 2017-07-11 15:55:44 -07:00
Daniel Nelson
1388e2cf92 Do not allow metrics with trailing slashes (#3007)
It is not possible to encode a measurement, tag, or field whose last
character is a backslash due to it being an unescapable character.
Because the tight coupling between line protocol and the internal metric
model, prevent metrics like this from being created.

Measurements with a trailing slash are not allowed and the point will be
dropped.  Tags and fields with a trailing a slash will be dropped from
the point.
2017-07-11 15:54:38 -07:00
Daniel Nelson
af318f4959 Update changelog 2017-07-11 14:10:09 -07:00
JSH
9f244cf1ac Fix chrony plugin does not track system time offset (#2989) 2017-07-11 14:08:40 -07:00
Daniel Nelson
885aa8e6e1 Update changelog 2017-07-10 19:07:28 -07:00
Daniel Nelson
945446b36f Fix handling of escapes within fieldset (#3003)
Line protocol does not require or allow escaping of backslash, the only
requirement for a byte to be escaped is if it is an escapable char and
preceeded immediately by a slash.
2017-07-10 19:05:18 -07:00
Daniel Nelson
4209ebfa6e Update changelog 2017-07-10 12:23:16 -07:00
Daniel Nelson
79f8ed874a Update elastic version to 5.0.41 (#2999) 2017-07-10 12:18:56 -07:00
Daniel Nelson
739d97639a Update dependencies 2017-07-10 12:01:22 -07:00
Wesley Merkel
ac8e28f436 Add link to Graylog input to README.md (#2995) 2017-07-10 11:22:37 -07:00
Daniel Nelson
2740a3ba44 Update changelog 2017-07-05 14:29:59 -07:00
Song Wenhao
0f850400f2 Display error message if prometheus output fails to listen (#2984) 2017-07-05 14:28:44 -07:00
Daniel Nelson
74a764d549 Update changelog 2017-06-29 16:17:08 -07:00
Aleksey Shirokih
a8a637809e Change default prometheus_client port (#2973) 2017-06-29 14:03:42 -07:00
Daniel Nelson
75dbf2b0f8 Set release date for 1.3.3 2017-06-28 13:05:06 -07:00
Daniel Nelson
90909ae708 Fix build on Windows (#2972) 2017-06-27 16:31:28 -07:00
Daniel Nelson
d40e441240 Use git sha1 as version if not tagged (#2969) 2017-06-27 13:24:06 -07:00
Adam Perlin
cc3d420551 Fix several bugs in minecraft input (#2970) 2017-06-27 13:14:07 -07:00
Daniel Nelson
f2bb4acd4a Update changelog 2017-06-26 15:25:06 -07:00
Bob Shannon
a7595c918a Fix panic in elasticsearch input if cannot determine master (#2954) 2017-06-26 15:23:53 -07:00
Daniel Nelson
a52f90122b Update changelog 2017-06-26 15:15:31 -07:00
Bob Shannon
d217cdc1a6 Add optional usage_active and time_active CPU metrics (#2943) 2017-06-26 15:13:38 -07:00
Daniel Nelson
d5b6f92f3f Log aerospike field value on error 2017-06-26 14:48:22 -07:00
Daniel Nelson
1a636abaaf Update changelog 2017-06-26 14:31:17 -07:00
vodolaz095
1fdbfa4719 Add support for RethinkDB 1.0 handshake protocol (#2963)
Allow rethinkdb input plugin to work with RethinkDB 2.3.5+ databases that requires username,password authorization and Handshake protocol v1.0

* remove top level header not required in sample config

* remove top level header not required in sample config
2017-06-26 14:29:48 -07:00
Daniel Nelson
22fc130e97 Update changelog 2017-06-23 16:56:36 -07:00
Ayrdrie
a726579d50 Add Minecraft input plugin (#2960) 2017-06-23 16:54:12 -07:00
Daniel Nelson
d774c2a170 Update changelog 2017-06-23 11:13:00 -07:00
MatthewCh
6d5bb35f84 Support HOST_PROC in processes and linux_sysctl_fs inputs (#2924) 2017-06-23 11:11:33 -07:00
Daniel Nelson
e028f10586 Update changelog 2017-06-23 11:04:13 -07:00
Daniel Nelson
9276318faf Fix bug parsing default timestamps with modified precision (#2949) 2017-06-23 10:59:04 -07:00
Daniel Nelson
82a04d904d Use strings.Join in statsd input (#2947) 2017-06-21 16:24:23 -07:00
Daniel Nelson
364da9a83d Update changelog 2017-06-21 12:46:57 -07:00
grugrut
ca9cec2c84 Add input plugin for Fail2ban (#2875) 2017-06-21 12:42:13 -07:00
Daniel Nelson
9211985c63 Update changelog 2017-06-21 12:39:09 -07:00
Daniel Nelson
929ba0a637 Remove label value sanitization in prometheus output (#2939) 2017-06-21 12:36:29 -07:00
Daniel Nelson
dcdcb70cb1 Update changelog 2017-06-19 11:52:53 -07:00
Eugene Shilin
cb5a12de3d Add standard SSL options to mysql input (#2933) 2017-06-19 11:42:43 -07:00
Artem Kovardin
193e8fa5ad More explicit 404 error in cassandra input (#2936) 2017-06-19 11:06:49 -07:00
trastle
00b37a7c0d Update README for Prometheus Client Output (#2452) 2017-06-19 11:04:08 -07:00
Daniel Nelson
736322dfc9 Set default ping count in Windows
fixes #2934
2017-06-16 13:39:55 -07:00
Daniel Nelson
ba364988de Document that ping_interval is non-linux only 2017-06-16 13:32:04 -07:00
Daniel Nelson
a729a44284 Update changelog 2017-06-16 13:18:27 -07:00
Daniel Nelson
3ecfd32df5 Allow dos line endings in tail and logparser (#2920)
Parsing dos line ending delimited line protocol is still illegal in most
cases.
2017-06-16 13:16:48 -07:00
Daniel Nelson
ea1888bd26 Update changelog 2017-06-16 12:06:40 -07:00
Simone Rotondo
674c24f987 Add HTTP Proxy support to influxdb output (#2929) 2017-06-16 12:05:08 -07:00
Daniel Nelson
ca72df5868 Update 1.3.2 release date 2017-06-14 12:16:47 -07:00
Daniel Nelson
ea787b83bf Update changelog 2017-06-13 18:07:12 -07:00
Daniel Nelson
949072e8dc Ensure prometheus metrics have same set of labels (#2857) 2017-06-13 18:04:26 -07:00
Daniel Nelson
246f342e6a Update changelog 2017-06-13 17:19:33 -07:00
Daniel Nelson
619b5d4c14 Change node_name to be a tag in aerospike input (#2918) 2017-06-13 17:09:38 -07:00
Daniel Nelson
b0efc22140 Update changelog 2017-06-13 14:10:33 -07:00
Heston Kan
5d1efdbfda Add min/max response time on linux/darwin to ping (#2908) 2017-06-13 14:09:17 -07:00
Daniel Nelson
e3ccd473d2 Update changelog 2017-06-13 13:44:07 -07:00
Dheeraj Dwivedi
f0cbfe4d67 Add secure connection support to graphite output (#2602) 2017-06-13 13:42:11 -07:00
Daniel Nelson
40d8e582ee Update changelog 2017-06-12 18:32:50 -07:00
Daniel Nelson
02b55fe77f Update aws-sdk-go dependency to latest release. (#2912) 2017-06-12 18:31:27 -07:00
Daniel Nelson
0c53de6700 Update changelog 2017-06-08 16:55:27 -07:00
Daniel Nelson
b277e6e2d7 Fix support for mongodb/leofs urls without scheme (#2900)
This was broken by changes in go 1.8 to url.Parse.  This change allows
the string but prompts the user to move to the correct url string.
2017-06-08 16:52:01 -07:00
Daniel Nelson
de4a312eba Update changelog 2017-06-08 13:20:44 -07:00
Matteo Cerutti
4b3b16ef1a Add wildcard support for container inclusion/exclusion (#2793) 2017-06-08 13:17:31 -07:00
Daniel Nelson
4c534433aa Skip kafka_consumer_integration_test due to issue on CircleCI 2017-06-07 18:31:52 -07:00
Daniel Nelson
f9447d01d4 Add release note to changelog regarding kafka_consumer 2017-06-07 18:27:12 -07:00
Seuf
2092443cd7 Add Kafka 0.9+ consumer support (#2487) 2017-06-07 18:22:28 -07:00
Bob Shannon
1c73caba04 Add SSL/TLS support to nginx input plugin (#2883) 2017-06-07 17:52:10 -07:00
Daniel Nelson
84dbf8bb25 Update changelog 2017-06-07 13:46:06 -07:00
Daniel Nelson
a275e6792a Fix metric splitting edge cases (#2896)
Metrics needing one extra byte to fit the output buffer would not be split, so we would emit lines without a line ending. Metrics which overflowed by exactly one field length would be split one field too late, causing truncated fields.
2017-06-07 13:37:54 -07:00
Daniel Nelson
de7fb2acfe Update changelog 2017-06-06 13:55:11 -07:00
Frederick Roth
91f2764cd5 Add result_type field for http_response input (#2814) 2017-06-06 13:39:07 -07:00
Daniel Nelson
4e91b18bbe Update changelog 2017-06-06 11:56:19 -07:00
Mariusz Brzeski
56a7ffe0e4 Fix timeout option in Windows ping input sample configuration (#2885) 2017-06-06 11:55:01 -07:00
Daniel Nelson
f9462d4fff Update changelog 2017-06-05 14:47:34 -07:00
Sebastian Borza
035905d65e Add timezone support to logparser timestamps (#2882) 2017-06-05 14:45:11 -07:00
Daniel Nelson
a47e6e6efe Update changelog 2017-06-05 12:46:50 -07:00
Daniel Nelson
5bab4616ff Fix udp metric splitting (#2880) 2017-06-05 12:44:29 -07:00
Daniel Nelson
37e01808b5 Set 1.3.1 release date 2017-05-31 15:00:31 -07:00
Daniel Nelson
0b6db905ff Generate sha256 hashes when packaging 2017-05-31 12:29:39 -07:00
Daniel Nelson
9529199a44 Update changelog 2017-05-30 17:40:37 -07:00
Daniel Nelson
be03abd464 Fix length calculation of split metric buffer (#2869) 2017-05-30 17:38:32 -07:00
Daniel Nelson
04aa732e94 Update changelog 2017-05-30 11:04:39 -07:00
Steve Nardone
e7f9db297e Fix panic in mongo input (#2848) 2017-05-30 11:02:26 -07:00
Daniel Nelson
24ea9fdc4d Update changelog 2017-05-26 12:12:18 -07:00
Matteo Cerutti
02d168705c MySQL input: log and continue on field parse error (#2855) 2017-05-26 12:09:43 -07:00
Daniel Nelson
7d7206b3e2 Update changelog 2017-05-25 16:20:29 -07:00
Daniel Nelson
03ca3975b5 Update gopsutil version
fixes #2856
2017-05-25 16:11:49 -07:00
Daniel Nelson
e1088b9eee Update changelog 2017-05-25 13:39:16 -07:00
Daniel Nelson
f47924ffc5 Fix influxdb output database quoting (#2851) 2017-05-25 13:25:52 -07:00
Olivier Lambert
a96f85c847 Add documentation for fetching metrics on Caddy HTTP and Prometheus (#2853) 2017-05-25 13:07:49 -07:00
Sylvain Boily
9148871608 Documentation privilege requirements for specific procstat metrics (#2787) 2017-05-25 13:06:27 -07:00
Matteo Cerutti
7d198f0a68 Add timeout option to ipmi_sensor plugin - solves #2817 (#2818) 2017-05-22 13:41:34 -07:00
Daniel Nelson
1459fab4d6 Remove changelog item from pull request template
Person who merges PR is now expected to update the CHANGELOG.
2017-05-22 12:06:48 -07:00
Daniel Nelson
b0bd4d55f5 Update CHANGELOG with fixed issue #1137 2017-05-22 12:01:22 -07:00
Steven Burgart
9ab688d62c Fix multiple plugin loading in win_perf_counters (#2800) 2017-05-22 11:58:00 -07:00
Daniel Nelson
8fdc2aec80 Update dependency license file 2017-05-19 18:03:49 -07:00
Lukasz Jagiello
91690b1d3e Consul plugin README typo (#2829) 2017-05-19 11:37:31 -07:00
Daniel Nelson
c61cd73eff Update changelog 2017-05-18 18:11:49 -07:00
rsingh2411
93e638d63e Add Docker container environment variables as tags. Only whitelisted #2580 (#2581) 2017-05-18 16:58:34 -07:00
mced
501c22478e [enh] set db_version at 0 if query version fails (#2819) 2017-05-18 13:52:56 -07:00
Daniel Nelson
7155e90f66 Update changelog for #2815 2017-05-16 17:37:51 -07:00
Timo Mihaljov
c53d9fa9b7 Handle process termination during read from /proc (#2816)
Fixes #2815.
2017-05-16 17:33:35 -07:00
Frederick Roth
ac5ac3161f Fixed inconsistency between HasIntField and IntField (#2813) 2017-05-16 15:25:30 -07:00
Daniel Nelson
bfeb3020a3 Add release date for 1.3.0 2017-05-15 19:52:35 -07:00
Daniel Nelson
b01ecdccff Add back the changelog entry for 2141 2017-05-15 12:54:03 -07:00
Daniel Nelson
da99777f6f Only split metrics if there is an udp output (#2799) 2017-05-12 15:34:05 -07:00
Zack Zatkin-Gold
dd537b3382 Fix telegraf example arguments (#2788)
Many of the examples provided within documentation are using a single
dash for the command line arguments, but the telegraf executable
explicitly has two dashes.

There are also some inconsistencies with the ordering of the command
line argument examples.  I've ordered them so that the examples will
show: config, config-directory, input-filter, test
2017-05-12 15:22:29 -07:00
Sebastian Borza
f74687dcc0 split metrics based on UDPPayload size (#2795) 2017-05-12 14:45:50 -07:00
Daniel Nelson
a47aa0dcc2 Merge branch 'reuse-transport' 2017-05-10 18:19:21 -07:00
Daniel Nelson
17d883c602 Ensure keep-alive is not used in http_response input.
Using Keep-Alive would change the timing for already established
connections.  Previous to this commit, Keep-Alive worked only when using
a response_string_match due to failure to close the request body.
2017-05-10 14:40:55 -07:00
Daniel Nelson
a1446a60f7 Update changelog 2017-05-10 13:11:33 -07:00
Daniel Nelson
1931aac284 Fix http_response input creation of transport on every gather 2017-05-09 16:23:38 -07:00
Daniel Nelson
b88eb0f59d Fix prometheus input creation of transport on every gather 2017-05-09 16:21:49 -07:00
Daniel Nelson
e7ad2d0463 Fix apache input creation of transport on every gather. 2017-05-09 16:19:56 -07:00
Daniel Nelson
c28ffb11cb Merge branch 'update-readme' 2017-05-09 13:50:19 -07:00
Daniel Nelson
018fd5ce5b Add missing plugins to README 2017-05-09 13:50:12 -07:00
Daniel Nelson
cd0ec0185a Update contributing section
Hoping this will encourage more non-plugin contributions.
2017-05-09 13:50:12 -07:00
Adrian Sadłocha
8124cfa3ed Improve PostgreSQL plugin documentation (#2777) 2017-05-09 12:58:43 -07:00
Lukasz Jagiello
5af985ef5f Add support for self-signed certs to InfluxDB input plugin (#2773) 2017-05-08 15:20:24 -07:00
Sylvain Boily
1ebd1aaa41 Systemd does not see all shutdowns as failures (#2716) 2017-05-08 11:48:29 -07:00
Daniel Nelson
de3f52b990 Update cloudwatch documentation
Mention that some metrics are available only at larger intervals than 5
minutes.  Update dead links to new locations and example config.

closes #1907
2017-05-08 11:31:20 -07:00
Daniel Nelson
4200018a0b Enable s390x builds
closes #2766
2017-05-05 14:39:56 -07:00
Daniel Nelson
67cd1669cc Add SLES11 support to rpm package (#2768) 2017-05-05 14:29:40 -07:00
Sébastien
a8cfe03ba8 fix systemd path in order to add compatibility with SuSe (#2499) 2017-05-05 14:04:33 -07:00
ceseuron
e2983383e4 Fixed sqlserver input to work with case sensitive server collation. (#2749)
Fixed a problem with sqlserver input where database properties are not returned by Telegraf when SQL Server has been set up with a case sensitive server-level collation.

* Added bugfix entry to CHANGELOG.md for sqlserver collation input fix.
2017-05-04 10:47:03 -07:00
Daniel Nelson
8cf0dc769b Add 1.4 section to changelog 2017-05-03 17:29:34 -07:00
Daniel Nelson
613de8a80d Remove documentation in kafka_consumer for metric_buffer 2017-05-03 11:51:49 -07:00
Damien Krotkine
f5c890cc1d reflect zookeeper chroot config in readme (#2759) 2017-05-03 11:50:08 -07:00
Daniel Nelson
f7f1eaef65 Return an error if no valid patterns. (#2753) 2017-05-02 14:54:38 -07:00
Alexander Blagoev
188703e204 Improve redis input documentation (#2708) 2017-05-02 11:43:07 -07:00
Patrick Hemmer
52c19af0ba fix close on closed socket_writer (#2748) 2017-05-02 11:06:49 -07:00
Daniel Nelson
5c88965084 Add initial documentation for rabbitmq input. (#2745) 2017-05-01 18:55:48 -07:00
Daniel Nelson
6e76731b7e Don't log error creating database on connect (#2740)
closes #2739
2017-04-28 15:58:46 -07:00
Daniel Nelson
c7a0e40c87 Update telegraf.conf 2017-04-28 13:47:32 -07:00
Daniel Nelson
086a2f5f12 Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:46:23 -07:00
Daniel Nelson
1da1c4753e Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:40:58 -07:00
Daniel Nelson
a083e1af7d Use go 1.8.1 for CI and Release builds (#2732) 2017-04-27 16:18:11 -07:00
Daniel Nelson
052e88ad5e Fix grammar 2017-04-27 14:59:18 -07:00
Daniel Nelson
b9ce455bba Update telegraf.conf 2017-04-27 11:53:32 -07:00
Seuf
cd103c85db Added SASL options for ouput kafka plugin (#2721) 2017-04-27 11:50:25 -07:00
Ross McDonald
a3feacbd2f Kapacitor input plugin (#2031) 2017-04-27 11:47:22 -07:00
Daniel Nelson
e1a734c525 Fix logfile documentation 2017-04-27 11:38:49 -07:00
Daniel Nelson
53ab56de72 Update haproxy README 2017-04-27 11:23:37 -07:00
Seuf
4e2fe598ac Added SSL configuration for input haproxy (#2723) 2017-04-27 11:20:41 -07:00
Daniel Nelson
5fe5c46c6d Fix amqp output block on write if disconnected (#2727)
fixes #2603
2017-04-27 11:10:30 -07:00
Damien Krotkine
153304d92b it's -> its (#2728) 2017-04-27 11:10:00 -07:00
Damien Krotkine
cb9aecbf04 it's -> its (#2729) 2017-04-27 11:06:40 -07:00
Nevins
c66e2896c6 add option to randomize Kinesis partition key (#2705) 2017-04-26 10:54:24 -07:00
Jeff Zellner
9b874dff8d Update README.md (#2719) 2017-04-25 13:17:15 -07:00
Daniel Nelson
b243faa22b Don't close stdout on config reload. (#2707)
fixes #2528
2017-04-24 16:18:58 -07:00
Patrick Hemmer
8f5cd6c2ae add keep-alive support to socket_listener & socket_writer (#2697)
closes #2635
2017-04-24 13:14:42 -07:00
Alexander Blagoev
3c28b93514 Improve procstat input documentation (#2699)
closes #1895
2017-04-24 11:18:55 -07:00
Patrick Hemmer
06baf7cf78 use AddError everywhere (#2372) 2017-04-24 11:13:26 -07:00
Alexander Blagoev
801f6cb8a0 System net input documentation (#2698)
closes #2166
2017-04-24 11:03:53 -07:00
Daniel Nelson
3684ec6315 Update EXAMPLE_README.md 2017-04-21 14:27:36 -07:00
Daniel Nelson
da0773151b Use C locale when running sadf (#2690)
fixes #1911
2017-04-21 10:55:54 -07:00
Daniel Nelson
38e1c1de77 Update commit hash of tail fork 2017-04-20 16:29:39 -07:00
Daniel Nelson
799c8bed29 Add fix for network aliases to changelog
Change was made in gopsutil
2017-04-20 15:34:30 -07:00
Alexander Blagoev
a237301932 Memcached input documentation (#2685)
Closes #2615
2017-04-20 11:25:22 -07:00
Oleg Grytsynevych
b03d78d00f win_perf_counters: Format errors reported by pdh.dll in human-readable format (#2338) 2017-04-20 11:22:44 -07:00
Martin
748ca7d503 Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems (#2360) 2017-04-20 11:19:33 -07:00
Daniel Nelson
bf30ef89ee Fix ipmi_sensor config is shared between all plugin instances (#2684) 2017-04-19 17:02:44 -07:00
Daniel Nelson
3690e1b9bf Add diskio for darwin to changelog 2017-04-19 13:42:24 -07:00
Patrick Hemmer
2542ef6d62 change jolokia input to use bulk requests (#2253) 2017-04-18 13:00:41 -07:00
Nikolay Denev
eb7ef5392e Simplify system.DiskUsage() (#2630) 2017-04-18 11:42:58 -07:00
Ross McDonald
70b3e763e7 Add input for receiving papertrail webhooks (#2038) 2017-04-17 13:49:36 -07:00
François de Metz
58ee962679 GitHub webhooks: check signature (#2493) 2017-04-17 11:42:03 -07:00
Daniel Nelson
dc5779e2a7 Rename heap_objects_bytes to heap_objects in internal plugin. (#2674)
* Rename heap_objects_bytes to heap_objects in internal plugin.

This field does not contain bytes

fixes #2671
2017-04-14 17:32:14 -07:00
Daniel Nelson
b968759d10 Use variadic disk.IOCounters() function 2017-04-14 13:48:02 -07:00
Daniel Nelson
b90a5b48a1 Improve logparser README (#2664) 2017-04-14 13:47:43 -07:00
calerogers
a12e082dbe Refactor interrupts plugin code (#2670) 2017-04-14 13:40:36 -07:00
calerogers
cadd845b36 Irqstat input plugin (#2494)
closes #2469
2017-04-13 15:53:02 -07:00
ingosus
dff216c44d Feature #1820: add testing without outputs (#2446) 2017-04-13 12:59:28 -07:00
Gregory Kman
45c9b867f6 Update ping-input-plugin Readme (#2651) 2017-04-12 17:46:48 -07:00
Chris Goffinet
9388fff1f7 Fixed content-type header in output plugin OpenTSDB (#2663) 2017-04-12 17:40:10 -07:00
Daniel Nelson
3e0c55bff9 Update grok version (#2662) 2017-04-12 17:10:17 -07:00
Jesús Roncero
49ab4e26f8 Nagios plugin documentation fix (#2659) 2017-04-12 12:04:44 -07:00
Daniel Nelson
360b10c4de Clarify precision documentation (#2655) 2017-04-12 10:42:11 -07:00
Daniel Nelson
2c98e5ae66 Add collectd parser (#2654) 2017-04-12 10:41:26 -07:00
Nick Irvine
0193cbee51 Add max_message_len in kafka_consumer input (#2636) 2017-04-11 12:05:39 -07:00
Daniel Nelson
f55af7d21f Use name filter for IOCounters in diskio (#2649)
Use IOCountersForNames for disk counters.
2017-04-11 11:41:09 -07:00
Patrick Hemmer
516dffa4c4 set default measurement name on snmp input (#2639) 2017-04-10 16:45:02 -07:00
Daniel Nelson
62b5c1f7e7 Add support for precision in http_listener (#2644) 2017-04-10 16:39:40 -07:00
Daniel Nelson
07c428ef89 Use random port in http_listener tests 2017-04-10 14:39:39 -07:00
Vladimir S
aa722fac9b Add dmcache input plugin (#1667) 2017-04-07 15:39:43 -07:00
Rajaseelan Ganeswaran
7cc4ca2341 Add sample config stanza for CPU (#2620) 2017-04-06 14:44:02 -07:00
Victor Yunevich
92fa20cef2 ipmi_sensor: allow @ symbol in password (#2633) 2017-04-06 14:40:34 -07:00
Daniel Nelson
c9f8308f27 Update filtering documentation (#2631) 2017-04-06 12:06:08 -07:00
James
5ffc9fd379 fix postgresql connection leak (#2611) 2017-04-04 17:37:44 -07:00
Daniel Nelson
8bf193dc06 Update httpjson documentation (#2619)
closes  #2536
2017-04-03 18:34:04 -07:00
Patrick Hemmer
f2805fd4aa socket_listener: clean up unix socket file on start & stop (#2618) 2017-04-03 18:06:51 -07:00
Shakeel Sorathia
35e4390168 Docker: optionally add labels as tags (#2425) 2017-04-03 13:43:15 -07:00
Patrick Hemmer
51c99d5b67 add support for linux sysctl fs metrics (#2609) 2017-03-31 14:01:02 -07:00
Daniel Nelson
540f98e228 Fix possible deadlock when output cannot write. (#2610) 2017-03-31 12:45:28 -07:00
Dmitry Ulyanov
c980c92cd5 Added pprof tool (#2512) 2017-03-29 18:28:43 -07:00
Daniel Nelson
9495b615f5 Update changelog for #2587 2017-03-29 17:15:11 -07:00
tjmcs
fb1c7d0154 Adds a new json_timestamp_units configuration parameter (#2587) 2017-03-29 17:12:29 -07:00
Patrick Hemmer
03ee6022f3 fix race in testutil Accumulator.Wait() (#2598) 2017-03-29 17:03:06 -07:00
djjorjinho
cc5b2f68b6 fix timestamp parsing on prometheus plugin (#2596) 2017-03-29 15:04:29 -07:00
Daniel Nelson
2d7f612bd7 Use fork of hpcloud/tail (#2595) 2017-03-29 14:25:33 -07:00
Daniel Nelson
9e036b2d65 Remove wait loop in riemann tests
This testcase still has a race condition but I believe it is when the
test does not complete quickly enough.
2017-03-28 13:05:10 -07:00
mgresser
1100a98f11 Removed duplicate evictions metric (#2577) 2017-03-28 10:47:00 -07:00
Daniel Nelson
37689f4df6 Add elasticsearch output to changelog 2017-03-28 10:22:28 -07:00
Daniel Nelson
78c7f4e4af Add write timeout to Riemann output (#2576) 2017-03-27 15:49:45 -07:00
Daniel Nelson
84a9f91f5c Skip elasticsearch output integration test in short mode 2017-03-27 15:05:06 -07:00
Daniel Nelson
5612df48f9 Update telegraf.conf 2017-03-27 14:49:04 -07:00
Daniel Nelson
0fa9001453 Clarify influxdb output url format
closes #2568
2017-03-24 16:04:18 -07:00
Patrick Hemmer
995546e7c6 snmp: support table indexes as tags (#2366) 2017-03-24 12:06:52 -07:00
Patrick Hemmer
1402c158b7 remove sleep from tests (#2555) 2017-03-24 12:03:36 -07:00
Oskar
616b66f5cb Multi instances in win_perf_counters (#2352) 2017-03-22 12:04:58 -07:00
Daniel Nelson
70a0a84882 Really fix procstat initialization 2017-03-21 11:40:51 -07:00
Daniel Nelson
5c33c760c7 Fix procstat initialization 2017-03-21 10:59:41 -07:00
Leandro Piccilli
bb28fb256b Add Elasticsearch 5.x output (#2332) 2017-03-20 17:47:57 -07:00
Daniel Nelson
a962e958eb Refactor procstat input (#2540)
fixes #1636 
fixes #2315
2017-03-17 16:49:11 -07:00
Patrick Hemmer
8514acdc3c return error on unsupported serializer data format (#2542) 2017-03-17 10:14:03 -07:00
Antoine Augusti
426182b81a Update default value for Cloudwatch rate limit (#2520) 2017-03-15 15:20:18 -07:00
Daniel Nelson
7a5d857846 Add support for new SSL configuration to mongodb (#2522)
closes #2519
2017-03-10 11:27:55 -08:00
jeremydenoun
13f314a507 Report DEAD (X) State Process (#2501)
Report count of processes in dead (X) process state from the processes input.  This process state is only valid on Linux.
2017-03-09 11:28:54 -08:00
Daniel Nelson
ea6e0b8259 Fix typo in postgresql README 2017-03-09 10:13:31 -08:00
Cameron Sparr
e811e2600d create telegraf.d directory in tarball
closes #2513
2017-03-09 11:41:08 +00:00
Timothy
49c212337f Update CONFIGURATION.md (#2516)
Add information about default configuration file locations.  Also mention that the config directory option is available.
2017-03-09 11:21:03 +00:00
Dennis Dryden
d243d69a09 Add configuration docs to Postgresql input plugin (#2515)
* Add configuration docs to Postgresql input plugin

Add configuration docs to PostgreSQL input plugin README (mostly from the source code) though I've not included the configuration example that seems to use all he connections on the database[1].

[1] https://github.com/influxdata/telegraf/issues/2410

* Fix typo in readme and sampleConfig string.
2017-03-09 11:19:03 +00:00
jeremydenoun
ae6a5d2255 Remove warning if parse empty content (#2500)
closes #2448
2017-03-08 14:08:55 -08:00
Robpol86
56aa89e5c8 Exporting Ipmi.Path to be set by config. (#2498)
* Exporting Ipmi.Path to be set by config.

Currently "path" is not exported, giving this error when users try to
override the variable via telegraf.conf as per the sample config:

`field corresponding to `path' is not defined in `*ipmi_sensor.Ipmi'`

Exporting the variable solves the problem.

* Updating changelog.
2017-03-08 16:38:36 +00:00
vvvkamper
7513fcac4e Fix part 2 of #1291
added PDH_FMT_NOCAP100 format option

closes #2483
2017-03-08 13:39:03 +00:00
Cameron Sparr
9df2974a0f update gopsutil for file close fixes
hopefully this will fix #2472
2017-03-08 12:54:17 +00:00
Daniel Nelson
ceb36adac7 Update issue template 2017-03-06 11:20:53 -08:00
Cameron Sparr
7a8e821731 Revert "Procstat: don't cache PIDs" (#2479) 2017-03-06 15:59:36 +00:00
François de Metz
76bcdecd21 Respond 200 when receiving a ping event. (#2492) 2017-03-06 12:34:41 +00:00
Jack Zampolin
10744646db AMQP Consumer plugin (#1678) 2017-03-03 10:24:50 -08:00
Charles-Henri
1873abd248 Iptables input: document better the ignored rules behavior (#2482)
During issue #2215 it was highlighted that the current behavior where
rules without a comment are ignored is confusing for several users.

This commit improves the documentation and adds a NOTE to the sample
config to clarify the behavior for new users.
2017-03-02 09:58:26 +00:00
Chris Koehnke
9618515926 Disk counter array newline (#2481)
Tweak formatting of `LogicalDisk` counter array to have one entry per
line.
2017-03-02 08:43:33 +00:00
Cameron Sparr
a251adb838 Fix type conflict on windows ping plugin (#2462)
closes #1433
2017-03-01 11:22:42 +00:00
Cameron Sparr
9e810ac463 Handle nil os.FileInfo in filepath.Walk
closes #2466
2017-02-28 17:51:03 +00:00
Cameron Sparr
b9457a1092 log error message when invalid regex is used
closes #2178
2017-02-28 12:48:14 +00:00
Cameron Sparr
6f2eeae498 Remove sleep from riemann test 2017-02-28 12:46:27 +00:00
Cameron Sparr
42a41d33cc add cgroup plugin to README 2017-02-24 09:43:22 +00:00
Cameron Sparr
81408f9da7 switch out deprecated docker client library
closes #2071
2017-02-22 10:55:00 +00:00
Rickard von Essen
c4212d69c9 Updated readme, now requires Go 1.8 (#2455) 2017-02-21 22:13:22 +01:00
Carlos
e17164d3f0 Added default config to file output pugin's README (#2426) 2017-02-20 11:50:39 +01:00
Cameron Sparr
e5349393f8 Check for errors in user stats & process list
closes #2414
2017-02-17 15:38:33 +00:00
Cameron Sparr
06176ef410 Only set the buffer size once
fixes #2380
2017-02-17 14:11:15 +00:00
Cameron Sparr
2a3448c8f3 socket_writer output plugin README 2017-02-16 23:13:14 +00:00
Leandro Piccilli
5da40d56ad Check if tag value is empty before allocation
closes #2390
closes #2404
2017-02-16 23:07:27 +00:00
Cameron Sparr
54c9a385d5 Fix prometheus_client reload behavior
fixes #2282
2017-02-16 21:57:13 +00:00
Priyank Trivedi
25c55419df Fix typo - Default from Defalt (#2417) 2017-02-16 19:03:17 +00:00
Yaron de Leeuw
c19fb1535e README: update golang requirement to 1.7 (#2412)
The docker engine-api package we use needs golang 1.7+, see:
https://github.com/docker/engine-api/pull/382#issuecomment-244512952

So telegraf won't compile without 1.7
2017-02-15 17:17:26 +00:00
François de Metz
45a168e425 Fix setting the username and the password to the influxdb output. (#2401) 2017-02-13 15:30:30 +00:00
Cameron Sparr
22243a8354 Skip service input plugins in test mode 2017-02-13 10:40:38 +00:00
Cameron Sparr
ff9369f1a1 prepend 'inputs.' to --test output check 2017-02-13 10:33:51 +00:00
Cameron Sparr
21cf79163c don't use influxdata/config, just use influxdata/toml 2017-02-10 17:27:18 +00:00
Cameron Sparr
f05fac74cb update naoina/toml to do config validation 2017-02-10 17:05:13 +00:00
Cameron Sparr
c8cc01ba6a deprecate udp_listener & tcp_listener 2017-02-06 10:41:44 +00:00
Cameron Sparr
694955c87b Remove metric.Point from metric interface 2017-02-03 16:53:07 +00:00
Cosmo Petrich
b1945c0493 Increment gather_errors for all input errors
closes #2339
2017-02-03 11:22:31 +00:00
Cameron Sparr
1c4673e900 changelog update 2017-02-03 10:04:50 +00:00
Nick Irvine
dfb4038654 Remove pidfile if pidfile was created (#2358)
Also, ensure pidfile perms are 644
2017-02-03 10:02:19 +00:00
Patrick Hemmer
b3537ef2a8 add socket listener & writer (#2094)
closes #1516 
closes #1711 
closes #1721 
closes #1526
2017-02-02 16:24:03 +00:00
Yaron de Leeuw
0ce44648cf Procstat: don't cache PIDs (#2206)
* Procstat: don't cache PIDs

Changed the procstat input plugin to not cache PIDs. Solves #1636.
The logic of creating a process by pid was moved from `procstat.go` to
`spec_processor.go`.

* Procstat: go fmt

* procstat: modify changelog for #2206
2017-02-02 14:12:22 +00:00
Patrick Hemmer
55d3f70771 add missing fields to haproxy input (#2323) 2017-02-02 13:46:53 +00:00
Matteo Cerutti
a610f8bd03 allow querying sensors via the open interface
closes #2244
closes #1547
2017-02-02 13:31:04 +00:00
Cameron Sparr
dfba3ff37a fix telegraf swallowing panics in --test mode
this defer function was causing telegraf to call os.Exit(0) instead of
panicking when it was supposed to.

closes #2341
2017-02-02 12:14:35 +00:00
Cameron Sparr
285be648c4 Godeps update
closes #2356
2017-02-02 09:52:06 +00:00
Cameron Sparr
f7d551a807 Add more nested globpath tests 2017-02-01 23:44:35 +00:00
Nathan Haugo
3f224a15d5 Update readme to link to k8s plugin (#2355) 2017-02-01 21:23:45 +00:00
Jérôme Vizcaino
c0bbde03ea Ceph: represent pgmap states using tags (#2229)
* ceph: maps are already refs, no need to use a pointer

* ceph: pgmap_states are represented in a single metric "count", differenciated by tag

* Update CHANGELOG
2017-02-01 14:47:23 +00:00
Cameron Sparr
97050e9669 changelog update 2017-02-01 14:41:58 +00:00
James Gregory
eafd1dcc7c Kubernetes input: Handle null startTime for stopped pods (#2335) 2017-02-01 14:41:04 +00:00
Cameron Sparr
c528c53e5b iptables changelog update 2017-02-01 14:39:16 +00:00
ldep30
07a6223932 Add lock option to the IPtables input plugin (#2201)
* Update README.md

* Add lock support to the IPtables input plugin

* Update iptables.go

Doc cleaning
2017-02-01 14:37:18 +00:00
Cameron Sparr
aeb849d744 changelog fix 2017-02-01 14:22:31 +00:00
Len Smith
9003efc3fa http_response : Add in support for looking for substring in response (#2204)
* Add in support for looking for substring in response

* Add note to CHANGELOG.md

* Switch from substring match to regex match

* Requested code changes

* Make requested changes and refactor to avoid nested if-else.

* Convert tabs to space and compile regex once
2017-02-01 14:21:08 +00:00
Pierre Fersing
32e06a489d Keep -config-directory when running as Windows service (#2330)
* Keep -config-directory when running as Windows service

* Update changelog
2017-02-01 14:12:35 +00:00
njwhite
2932db8480 Make Logparser Plugin Check For New Files (#2141)
* Make Logparser Plugin Check For New Files

Check in the Gather metric to see if any new files matching the glob
have appeared. If so, start tailing them from the beginning.

* changelog update for #2141
2017-02-01 14:11:39 +00:00
Cameron Sparr
19dee32287 Go 1.7.5 update cherry-picked to 1.2.1 release 2017-02-01 10:11:16 +00:00
Cameron Sparr
4dad723088 Changelog update 2017-02-01 10:07:31 +00:00
Cameron Sparr
54cfbb5b87 metric: Fix negative number handling
closes #2324
2017-02-01 10:07:31 +00:00
Martin
3e37dda7b0 Go version 1.7.4 -> 1.7.5 (#2348) 2017-02-01 10:07:02 +00:00
Cameron Sparr
fb7931591d Changelog update 2017-02-01 08:59:54 +00:00
Cameron Sparr
e87ce22af9 running output: Drop nil metrics
fixes #2317
2017-02-01 08:55:22 +00:00
John Engelman
738cbbdbb6 Add numerical representation of Consul health check state. (#2277) 2017-01-28 16:47:25 -08:00
Patrick Hemmer
074e6d177c add support for diskio name templates & udev tags
closes #1453
closes #1386
closes #1428
2017-01-27 16:15:42 -08:00
Cameron Sparr
1d864ebd40 Fix riemann output unit tests 2017-01-27 15:08:21 -08:00
Cameron Sparr
e9decadf75 Riemann rewrite changelog update 2017-01-27 14:59:35 -08:00
Fabio Berchtold
3fa37a9212 Rewriting Riemann output plugin (#1900)
* rename to riemann_legacy

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* initial draft for Riemann output plugin rewrite

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add unit tests

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add option to send string metrics as states

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add integration tests

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add plugin README.md

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* bump riemann library

* clarify settings description

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* update Readme.md with updated description

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add Riemann event examples

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* use full URL for Riemann server address

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

closes #1878
2017-01-27 14:54:59 -08:00
Cameron Sparr
c9e87a39f8 Revert using fasthttp library to net/http 2017-01-25 16:42:23 -08:00
Cameron Sparr
4a5d313693 Improve the InfluxDB through-put performance
This changes the current use of the InfluxDB client to instead use a
baked-in client that uses the fasthttp library.

This allows for significantly smaller allocations, the re-use of http
body buffers, and the re-use of the actual bytes of the line-protocol
metric representations.
2017-01-25 11:54:16 -08:00
Cameron Sparr
168270ea5f ntpq: correct number of seconds in an hour
closes #2256
2017-01-24 15:27:44 -08:00
Jonas Hahnfeld
c4d4185fb5 snmp: Allow lines with empty or missing tags (#2172)
The changes in #1848 resulted in lines being dropped if they had an empty
tag. Let's allow all lines that have empty or missing tags!
2017-01-24 14:57:43 -08:00
Kali Hernandez
822333690f Debian package: check for group before useradd (#2107)
Fixes #2106
2017-01-24 14:54:19 -08:00
Will Pearson
d7a8bb2214 Fix problem with graphite talking to closed connections (#2171)
We were having problems with telegraf talking to
carbon-relay-ng using the graphite output. When
the carbon-relay-ng server restarted the connection
the telegraf side would go into CLOSE_WAIT but telegraf
would continue to send statistics through the connection.

Reading around it seems you need to a read from the connection
and see a EOF error. We've implemented this and added a test
that replicates roughly the error we were having.

Pair: @whpearson @joshmyers
2017-01-24 12:50:29 -08:00
Pierre Fersing
a505123e60 Improve win_perf_counters on non English systems (#2261) 2017-01-24 12:46:06 -08:00
Pierre Fersing
be10b19760 Added more Windows metrics (#2290)
Signed-off-by: Pierre Fersing <pierre.fersing@bleemeo.com>
2017-01-24 12:38:10 -08:00
James
b9ae3d6a57 fix postgresql 'name', and 'oid' data types by switching to a driver (#1750)
that handles them properly
2017-01-24 12:36:36 -08:00
Cameron Sparr
c882570983 32-bit binary for windows and freebsd
closes #1346
closes #2218
2017-01-23 20:28:13 -08:00
Cameron Sparr
80411f99f0 influxdb output: treat field type conflicts as a successful write
If we write a batch of points and get a "field type conflict" error
message in return, we should drop the entire batch of points because
this indicates that one or more points have a type that doesnt match the
database.

These errors will never go away on their own, and InfluxDB will
successfully write the points that dont have a conflict.

closes #2245
2017-01-23 16:41:29 -08:00
Cameron Sparr
6df3f0fdae Run scheduled flushes in background
doing this unblocks incoming metrics while waiting for a flush to take
place.

we have to create a semaphore so that we can
'skip' flushes that try to run while a flush is already running.

closes #2262
2017-01-23 14:41:40 -08:00
Cameron Sparr
22340ad984 Add newline to influx line-protocol if not present
closes #2297
2017-01-23 13:52:20 -08:00
Cameron Sparr
c15504c509 opentsdb: add tcp:// prefix if not present
closes #2299
2017-01-23 13:45:16 -08:00
Claudius Zingerli
20bf90ee52 Add minimal documentation to the diskio plugin (#2296)
* Add documentation to diskio plugin

* Update spelling, fix iops_in_progress unit
2017-01-21 15:08:17 -08:00
Cameron Sparr
3de6bfbcb8 Direct people to downloads page for installation 2017-01-13 17:02:10 +00:00
Cameron Sparr
e0c6262e0b mysql build fixup and changelog update 2017-01-13 14:44:28 +00:00
Pierre Fersing
9b2f6499e7 Added more InnoDB metric to MySQL plugin (#2179) 2017-01-13 14:28:56 +00:00
Cameron Sparr
9262712f0a Changelog update and go fmt 2017-01-13 14:27:20 +00:00
acezellponce
0c9da0985a Added userstats to mysql input plugin (#2137)
* Added GatherUserStatistics, row Uptime in gatherGlobalStatuses, and version fields & tags

* Updated README file

* pulling in latest from master

* ran go fmt to fix formatting

* fix unreachable code

* few fixes

* cleaning up and applying suggestions from sparrc
2017-01-13 14:25:25 +00:00
Viet Hung Nguyen
b89c45b858 Ignore devfs on OSX (#2232) 2017-01-13 14:19:57 +00:00
Cameron Sparr
b60b360f13 Changelog update 2017-01-13 13:50:07 +00:00
Kebus1
734988d732 Fixed Bug 2077 SQL Server (#2212) 2017-01-13 13:47:47 +00:00
Cameron Sparr
95bad9e55b OpenTSDB filter types for HTTP AND telnet 2017-01-13 11:44:28 +00:00
Cameron Sparr
e812a2efc6 Accept an HTTP request body without newline at end (#2266)
I don't like this behavior, but it's what InfluxDB accepts, so the
telegraf listener should be consistent with that.

I accidentally reverted this behavior when I refactored the telegraf
metric representation earlier in this release cycle.
2017-01-13 11:43:50 +00:00
Cameron Sparr
411853fc74 update etc/telegraf.conf 2017-01-12 11:14:12 +00:00
Patrick Hemmer
b7d29ca0e9 allow changing jolokia delimiter (#2255) 2017-01-12 11:08:22 +00:00
Mohammad Ali Alfarra
947e1909ff Document basic auth for haproxy (#2258)
* Document basic auth for haproxy

* Typo in haproxy readme
2017-01-12 08:47:01 +00:00
Cameron Sparr
31a4f03031 mongodb: Remove superfluous ReplSet log message
closes #2248
2017-01-11 17:50:01 +00:00
Emil Haugbergsmyr
81f95e7a29 Fixes change in Kafka consumer input plugin (#2222)
* Fixes change to the error api in the kafka project.

* Updated test to reflect the change.

* Update kafka to match master branch.
2017-01-11 16:24:09 +00:00
Kurt Mackey
2aa2c796e5 Fix for broken librato output (#2225)
* Fix for broken librato output

These errors are delightful, but I'd rather avoid them:

```
Error parsing /etc/telegraf/telegraf.conf, line 2: field corresponding to `api_user' is not defined in `*librato.Librato'
```

* Fixed bad format from last commit
2017-01-09 14:48:32 +00:00
Patrick Hemmer
a658e6c509 ensure proper context on snmp error messages (#2220) 2017-01-09 13:03:33 +00:00
Jérôme Vizcaino
5f6766f6e1 ceph: sample config should reflect actual defaults (#2228) 2017-01-09 12:51:15 +00:00
Cameron Sparr
7279018cfe readme fixup & test output fixup 2017-01-09 12:28:13 +00:00
Cameron Sparr
4b08d127e0 mongodb: dont print unecessary & inaccurate auth failure
closes #2209
2017-01-06 13:11:24 +01:00
YKlausz
fd1feff7b4 Remove print call in cassandra plugin (#2192) 2016-12-21 17:23:54 +00:00
Dominik Labuda
37bc9cf795 [plugins] jolokia input plugin: configurable http timeouts (#2098) 2016-12-21 12:41:58 +00:00
Cameron Sparr
b762546fa7 docker: check type when totalling blkio & net metrics
closes #2027
2016-12-21 12:18:38 +00:00
Cameron Sparr
bf5f2659a1 Do not try Uint parsing in redis plugin
this is just a waste of cpu cycles, since telegraf converts all uints to
int64 anyways.
2016-12-20 23:42:14 +00:00
Mark Wolfe
d2787e8ef5 Fix for loop over value array range issue. (#2187) 2016-12-20 22:56:02 +00:00
Cameron Sparr
a9f03a72f5 Mask username/password from error messages
closes #1980
2016-12-20 19:35:45 +00:00
Cameron Sparr
7fc57812a7 changelog update 2016-12-20 18:50:32 +00:00
Mark Wolfe
8a982ca68f Moved to using the inbuilt serializer. (#1942)
* Moved to using the inbuilt serializer.

* Remove Atomic variable as it is not required.

* Adjusted metric type in line with latest changes.
2016-12-20 18:49:28 +00:00
Cameron Sparr
200237a515 Do not create a global statsd "previous instance"
this basically reverts #887

at some point we might want to do some special handling of reloading
plugins and keeping their state intact, but that will need to be done at
a higher level, and in a way that is thread-safe for multiple input
plugins of the same type.

Unfortunately this is a rather large feature that will not have a quick
fix available for it.

fixes #1975
fixes #2102
2016-12-20 17:55:04 +00:00
Cameron Sparr
0ae1e0611c changelog update 2016-12-20 16:30:49 +00:00
Matt O'Hara
1392e73125 Add clusterstats to elasticsearch plugin (#1979)
* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* responses to requested changes

* remove unnecessary recommendation
2016-12-20 16:30:03 +00:00
Cameron Sparr
a90afd95c6 Fix & unit test logparser CLF pattern with IPv6
deals partially with #1973

see also https://github.com/vjeantet/grok/issues/17
2016-12-20 15:57:32 +00:00
Cameron Sparr
9866146545 Support negative statsd counters
closes #1898
2016-12-20 13:21:51 +00:00
Cameron Sparr
8df325a68c changelog update 2016-12-20 13:04:51 +00:00
Łukasz Harasimowicz
48ae105a11 Fixing consul with multiple health checks per service (#1994)
* plugins/input/consul: moved check_id from regular fields to tags.

When service has more than one check sending data for both would overwrite each other
resulting only in one check being written (the last one). Adding check_id as a tag
ensures we will get info for all unique checks per service.

* plugins/inputs/consul: updated tests
2016-12-20 13:03:31 +00:00
Jeff Ashton
4e808c5c20 Importing pdh from github.com/lxn/win
closes #1763
closes #2017
2016-12-20 12:06:40 +00:00
Ken Dilley
eb96443a34 Update MySQL Readme to clarify connection string examples. (#2175)
* Update MySQL Readme to clarify connection string examples.

* Update mysql sample config to clarify connection string examples
2016-12-20 10:17:00 +00:00
Cameron Sparr
e36c354ff5 internal.Duration build fixup 2016-12-17 13:10:33 +00:00
Pierre Tessier
f09c08d1f3 Added response_timeout property
closes #2006
2016-12-17 13:06:04 +00:00
Steven Pall
0e8122a2fc Add trailing slash to jolokia context (#2105) 2016-12-17 12:51:46 +00:00
Cameron Sparr
6723ea5fe6 changelog update 2016-12-16 17:30:13 +00:00
Vincent
e8bf968c78 fix mongodb replica set lag awalys 0 #1449 (#2125) 2016-12-16 17:29:04 +00:00
Cameron Sparr
9c8f24601f rabbitmq, decrease timeout verbosity in config 2016-12-16 14:12:50 +00:00
Tevin Jeffrey
4957717df5 Add field for last GC pause time (#2121) 2016-12-16 14:03:53 +00:00
Cameron Sparr
21fac3ebec changelog update 2016-12-16 14:02:11 +00:00
Patrick Hemmer
ecbc634221 fix tail input seeking when used with pipe (#2090) 2016-12-16 14:01:49 +00:00
alekseyp
90cec20d1d Standard deviation (jitter) for Input plugin Ping (#2078) 2016-12-16 13:58:27 +00:00
Cameron Sparr
bcbf82f8e8 changelog update 2016-12-16 13:54:51 +00:00
Alex Sherwin
3a45d8851d fixes #1987 custom docker repos with non-standard port (#2018)
* fixed parsing of docker image name/version

now accounts for custom docker repo's which contain a colon for a non-default port

* 1978: modifying docker test case to have a custom repo with non-standard port

* using a temp var to store index, ran gofmt

* fixes #1987, renaming iterator to 'i'
2016-12-16 13:53:16 +00:00
Pierre Tessier
4a83c8c518 Add Questions status variable for issue: #1988 (#2004) 2016-12-16 13:47:47 +00:00
Doug Reese
bc13d32d53 MongoDB input plugin: Improve state data (#2001)
* MongoDB input plugin: Improve state data

Adds ARB as a "member_status" (replica set arbiter).
Uses MongoDB replica set state string for "state" value.

* MongoDB input plugin: Improve state data - changelog update
2016-12-16 13:46:32 +00:00
Frank Stutz
e6fc32bdf0 fix for puppetagent config - test 1
put Makefile back to normal

removed comment from puppetagent.go

changed config_version to config_version_string and fixed yaml for build

changed workind from branch to environment for config_string

fixed casing and Changelog

fixed test case

closes #1917
2016-12-16 13:36:06 +00:00
Cameron Sparr
a970b9c62c Revert "Rabbitmq plugin: connection-related metrics." (#2169) 2016-12-15 19:31:40 +00:00
Florian Klink
17b307a7bc ping: fix typo in README (#2163) 2016-12-14 19:47:48 +00:00
Jose Luis Navarro
393f5044bb Collect JSON values recursively
closes #1993
closes #1693
2016-12-13 21:06:05 +00:00
Pieter Slabbert
c630212dde Enable setting a clientID for MQTT Output
closes #2079
closes #1910
2016-12-13 20:03:09 +00:00
Cameron Sparr
f39db08c6d Set default values for delete_ configuration options
closes #1893
2016-12-13 20:00:52 +00:00
Jonas Falck
b4f9bc8745 Change hddtemp to always put temperature in temperature field (#1905)
Added unit tests for the changes

Fixes #1904
2016-12-13 19:40:55 +00:00
Cameron Sparr
5f06bd2566 Graylog output should set short_message field
closes #2045
2016-12-13 16:10:59 +00:00
Cameron Sparr
8a4ab3654d Fix documentation for net_response plugin
closes #2103
2016-12-13 16:02:03 +00:00
Cameron Sparr
e2f9617228 Support strings in statsd set measurements
closes #2068
2016-12-13 15:42:22 +00:00
Cameron Sparr
e097ae9632 Fix possible panic when file info cannot be gotten
closes #2061
2016-12-13 14:54:07 +00:00
Cameron Sparr
07684fb030 Update changelog 2016-12-13 14:28:28 +00:00
Da1den
17fa6f9b17 Fixed bug that you cannot gather data on non english systems (#1944) 2016-12-13 14:24:41 +00:00
krise3k
8e3fbaa9dd Add missing slim (#1937) 2016-12-13 14:23:18 +00:00
Kishore Nallan
dede3e70ad Rabbitmq plugin: connection-related metrics. (#1908)
* Rabbitmq plugin: connection-related metrics.

* Run go fmt.
2016-12-13 14:17:20 +00:00
Anthony Arnaud
7558081873 Output openTSDB HTTPS with basic auth (#1913) 2016-12-13 14:15:51 +00:00
Leon Barrett
6e241611be Fix bug: too many cloudwatch metrics (#1885)
* Fix bug: too many cloudwatch metrics

Cloudwatch metrics were being added incorrectly. The most obvious
symptom of this was that too many metrics were being added. A simple
check against the name of the metric proved to be a sufficient fix. In
order to test the fix, a metric selection function was factored out.

* Go fmt cloudwatch

* Cloudwatch isSelected checks metric name

* Move cloudwatch line in changelog to 1.2 features
2016-12-13 14:13:53 +00:00
Rikaard Hosein
fc9f921b62 Can turn pid into tag instead of field
closes #1843
fixes  #1668
2016-12-13 13:21:39 +00:00
Cameron Sparr
12db3b9120 Check if metric is nil before calling SetAggregate
fixes #2146
2016-12-13 12:27:10 +00:00
Patrick Hemmer
b58926dd26 snmp: use a shared global translation cache
Prevents the same data from being looked up multiple times. Also prevents multiple simultaneous lookups.

closes #2115
closes #2104
2016-12-12 13:32:42 +00:00
Patrick Hemmer
91143dda1a snmp: make snmptranslate not required (#2008) 2016-12-12 13:30:07 +00:00
Christian Eichelmann
efb64a049f add a hint to possible basic authentication settings 2016-12-09 12:58:54 +00:00
Ross McDonald
4f6087a99d Update readme links for 1.1.1. (#2134) 2016-12-07 17:10:05 +00:00
Cameron Sparr
6b0e863556 Support a telegraf.Metric.Split function 2016-12-07 15:18:47 +00:00
Cameron Sparr
11bc82379c Go version 1.7.3 -> 1.7.4 2016-12-06 15:42:50 +00:00
Cameron Sparr
a093ec1eaa Kafka output fixup 2016-12-06 15:38:59 +00:00
Cameron Sparr
d71a42cd1b Implement telegraf collecting stats on itself
closes #1348
2016-12-05 18:56:54 +00:00
Nathan D Acuff
d518d7d806 Add device name as a tag in disk stats (#1807)
* return partition stat alongside disk stat from disk usage method, and report device name (minus /dev/) as a tag in disk stats

* update system/disk tests to include new partition stat return value from disk usage method calls

* update changelog for #1807 (use device name instead of path to report disk stats)
2016-12-05 17:42:36 +00:00
Pierre Fersing
1d1afe6481 Fix RPM architecture for armhf (#2003)
Signed-off-by: Pierre Fersing <pierre.fersing@bleemeo.com>
2016-12-05 16:45:02 +00:00
Foxlik
5a3f2e61f3 Fix improper total of CPU times (#2123)
On linux, the cpu timer counters of user and nice include the respective guest and guest_nice counters. This results in improper calculation of percentages.

Please see:
https://github.com/torvalds/linux/blob/447976e/kernel/sched/cputime.c#L169
https://lists.linuxfoundation.org/pipermail/virtualization/2009-August/013459.html
https://github.com/giampaolo/psutil/pull/940
2016-12-05 08:35:59 +00:00
Cameron Sparr
504f4e69db file output plugin fixup 2016-12-02 11:36:22 +00:00
Cameron Sparr
9f6666beb3 unit test fixup 2016-12-01 19:17:44 +00:00
Cameron Sparr
af6e7b9531 More unit tests for new metric 2016-12-01 19:07:14 +00:00
Cameron Sparr
6fd7361364 allocation & perf improvements 2016-12-01 18:17:02 +00:00
Cameron Sparr
e5c7a71d8e Fix unit tests for new metric implementation 2016-12-01 18:17:02 +00:00
Cameron Sparr
db7a4b24b6 Implement telegraf's own full metric type
main reasons behind this:
- make adding/removing tags cheap
- make adding/removing fields cheap
- make parsing cheaper
- make parse -> decorate -> write out bytes metric flow much faster

Refactor serializer to use byte buffer
2016-12-01 18:17:02 +00:00
Cameron Sparr
332f678afb JSON serializer: include unit test with escapes 2016-12-01 18:16:52 +00:00
John Engelman
04a2b36a52 Fix changelog for json parser. (#2100) 2016-11-28 18:20:32 +00:00
Cameron Sparr
f862c6585d amqp precision is not used anymore 2016-11-24 10:17:24 +00:00
Cameron Sparr
5c32521a07 Add benchmarks for metric parsing and creating 2016-11-23 17:23:08 +00:00
Cameron Sparr
9db30250c3 'discard' output plugin 2016-11-23 14:03:30 +00:00
Cameron Sparr
2b0cd2037b Add Copy() function to Metric interface 2016-11-23 12:30:31 +00:00
Guillem Jover
536dbfb724 Switch to github.com/kballard/go-shellquote (#1950)
The old gonuts fork has no License and has not seen any commits
differing from the original project, while the original has seen some
activity, even if low.

Having no license is a problem for distributors, as by default, such
projects are undistributable.
2016-11-16 11:24:11 -05:00
karech
b77398c4d3 Configurable RabbitMQ HTTP timeouts #1997 (#1998)
* [plugins] rabbitmq input plugin: add non default http timeouts

* update CHANGELOG.md
2016-11-16 16:18:56 +00:00
Chris Goller
fbf5bee051 Update win_pref_counter to include Processor Queue Length in examples. (#2029) 2016-11-16 13:16:44 +00:00
leplan73
81004c808f Added IopsInProgress to diskio stats (#2037)
* Export IopsInProgress

* Export IopsInProgress

* Export IopsInProgress
2016-11-16 13:16:16 +00:00
Pieter Slabbert
196509cc53 Trim null characters in Value data format (#2049)
* Trim null characters in Value data format

Some producers (such as the paho embedded c mqtt client) add a null
character "\x00" to the end of a message.  The Value parser would fail on
any message from such a producer.

* Trim whitespace and null in all Value data formats

* No unnecessary reassignments in Value data format parser

* Update change log for Value data format fix
2016-11-16 13:13:31 +00:00
John Engelman
94ce67cc67 Add support to parse JSON array. (#1965) 2016-11-15 13:02:55 -05:00
Toni Moreno
33ed528afe Apache input enhancements ( added Basic Auth and SSL skipverify ) (#1964)
* added connection Timeout parámeter, basic HTTP autentication and HTTP support with Sslskipverify option

* updated README.md

* added optional SSL config , changed timeout name and type , and other minor fixes

* added some code style improvements

* Update README.md
2016-11-15 10:52:24 -05:00
Cameron Sparr
2435e47926 changelog update 2016-11-15 11:36:26 +00:00
Mike Ragalie
ff67a4b96c Cache and expire metrics for prometheus output (#2016)
* Cache and expire metrics for prometheus output

* Fix test

* Use interval.Duration

* Default prometheus expiration interval to 60s

* Update changelog
2016-11-15 11:33:39 +00:00
Sebastian Borza
f816b952cf Add udp_buffer_size option to udp_listener (#1883)
* patching udp_listener for fun

updating with errcode

adding debug flags to temp msgs

moving from debug to info

* updating PR 1883 based on feedback
2016-11-15 09:49:48 +00:00
Todd Persen
b905bc1b5d Merge pull request #2024 from influxdata/cs2023-single-quote-duration
Fix single quote parsing of TOML durations
2016-11-11 09:40:32 -08:00
Cameron Sparr
0ecbf9e349 Fix single quote parsing of TOML durations
closes #2023
2016-11-10 09:47:46 +00:00
Cameron Sparr
1c7715780e Documentation improvements
- fully document aggregator and processor plugins
- improve readme.md

closes #1989
2016-11-08 13:55:37 +00:00
John Engelman
5d3850c44e Update docs on Cloudwatch. Set default period to 5m. (#2000) 2016-11-07 12:14:04 +00:00
Cameron Sparr
e84b356a12 Update etc/telegraf.conf 2016-11-04 13:18:44 +00:00
John Engelman
b349800f7a Fix up AWS plugin docs so they don't use single quotes. (#1991)
Also don't use named returns in fetchNamespaceMetrics since it's
non-standard for the rest of the codebase.
2016-11-04 13:16:41 +00:00
Cameron Sparr
47de43abf3 Use rfc3339 timestamps in telegraf log output
closes #1564

also add unit and benchmark tests
2016-11-03 18:39:02 +00:00
Johannes Rudolph
7a9fef80f5 Update README.md (#1868)
I think this is a copy paste bug? ;-)
2016-11-03 18:28:24 +00:00
Cameron Sparr
dc28875437 Update gopsutil dependency
primarily for a fix in Windows network counter getting code

closes #1949
2016-11-03 18:06:10 +00:00
Cameron Sparr
a6ed4d4c3a CircleCI script, do not explicitly set version tag 2016-11-03 17:21:06 +00:00
Cameron Sparr
fe6162b2a1 Use short commit in Makefile build 2016-11-03 16:37:52 +00:00
Cameron Sparr
34182d9c9f Add release 1.2 section to changelog 2016-11-03 14:34:09 +00:00
Cameron Sparr
16081b2d1a Update etc/telegraf.conf 2016-11-03 14:31:55 +00:00
Matteo Cerutti
e43cfc2fce fix leap_status value in chrony input plugin (#1983) 2016-11-03 10:46:54 +00:00
Prunar
137272afea Update README.md (#1963)
Typo
2016-11-02 14:25:09 +00:00
Cameron Sparr
2150510bd4 nats_consumer: buffer incoming messages
fixes #1956
2016-10-27 13:39:27 +01:00
albundy83
fc59757a1a Just fix typo (#1962) 2016-10-27 11:45:17 +01:00
Cameron Sparr
0cfa0d419a udp_listener & tcp_listener set default values
closes #1936
2016-10-27 10:25:24 +01:00
Paulo Pires
522658bd07 Fix NATS plug-ins reconnection logic (#1955)
* NATS output plug-in now retries to reconnect forever after a lost connection.

* NATS input plug-in now retries to reconnect forever after a lost connection.

* Fixes #1953
2016-10-26 15:45:33 +01:00
Jonathan Chauncey
b1a97e35b9 fix(kubernetes): Only initialize RoundTripper once (#1951)
fixes #1933
2016-10-26 13:47:35 +01:00
Cameron Sparr
c66363cba5 Update Go version: 1.7.1->1.7.3 2016-10-25 14:49:21 +01:00
Cameron Sparr
61269c3500 Update config generation docs
closes #1925
2016-10-25 14:46:50 +01:00
Priyank Trivedi
393d129982 Fix typo from 'Proctstas' to 'Procstat' in procstat plugin's README (#1945) 2016-10-25 13:57:55 +01:00
Cameron Sparr
80d4864844 Only install fpm,rpm,boto if we need them 2016-10-25 13:31:48 +01:00
Cameron Sparr
f729fa990d Unit testing for internal.Duration Unmarshal
closes #1926
2016-10-25 13:11:32 +01:00
Alex Zorin
662db7a944 Fix panic in internal.Duration UnmarshalTOML 2016-10-25 18:30:01 +11:00
Cameron Sparr
c849b58de9 http_listener input unit tests 2016-10-24 18:17:49 +01:00
Cameron Sparr
097b1e09db http listener refactor
in this commit:

- chunks out the http request body to avoid making very large
  allocations.
- establishes a limit for the maximum http request body size that the
  listener will accept.
- utilizes a pool of byte buffers to reduce GC pressure.
2016-10-24 18:17:49 +01:00
John Hu
babd37bf35 Typo (#1924) 2016-10-21 14:11:03 +01:00
David Norton
91f48e7ad5 Merge pull request #1847 from jchauncey/kubernetes-plugin
feat(kubernetes): Add kubernetes input plugin
2016-10-17 15:58:47 -04:00
Jonathan Chauncey
a12bd878e0 feat(kubernetes): Add kubernetes input plugin
closes #1774
2016-10-17 15:40:55 -04:00
Cameron Sparr
a4e8f24b16 Set reasonable defaults in ping plugin
closes #1742
2016-10-17 15:21:09 +01:00
Cameron Sparr
a65447d22e Use mysql.ParseDSN func instead of url.Parse
The MySQL DB driver has it's own DSN parsing function. Previously we
were using the url.Parse function, but this causes problems because a
valid MySQL DSN can be an invalid http URL, namely when using some
special characters in the password.

This change uses the MySQL DB driver's builtin ParseDSN function and
applies a timeout parameter natively via that.

Another benefit of this change is that we fail earlier if given an
invalid MySQL DSN.

closes #870
closes #1842
2016-10-12 17:10:28 +01:00
Cameron Sparr
b00ad65b08 Log config file parsing errors properly
closes #1344
2016-10-12 16:50:22 +01:00
Cameron Sparr
a84ce5d5cb drop metrics outside of the aggregators period 2016-10-12 14:56:03 +01:00
Cameron Sparr
8ca4a50c18 delete nil fields in the metric maker.
closes #1771
2016-10-12 14:50:19 +01:00
Cameron Sparr
03b2984ac2 Fixup some code based on feedback from @dgnorton 2016-10-12 14:50:19 +01:00
Cameron Sparr
9540a6532f Update influxdb dependency for new models.Tags 2016-10-12 14:50:19 +01:00
Cameron Sparr
cace663bbf Processor & Aggregator Contrib doc 2016-10-12 14:50:19 +01:00
Cameron Sparr
acfdd15aa9 Processor & Aggregator configuration doccing 2016-10-12 14:50:19 +01:00
Cameron Sparr
78f544c0aa Support --aggregator-filter & --processor-filter 2016-10-12 14:50:19 +01:00
Cameron Sparr
2175a72fcc Rebase fixup 2016-10-12 14:50:19 +01:00
Cameron Sparr
b03c1d9691 Support ordering of processor plugins 2016-10-12 14:50:19 +01:00
Cameron Sparr
fead80844e Refactor handling of MinMax functionality into RunningAggregator
allows for easier addition of a sliding window at a later time.

Also makes `period` be a generic argument for all aggregator plugins.
2016-10-12 14:50:19 +01:00
Cameron Sparr
ef885eda62 Change minmax aggregator to store float64 2016-10-12 14:50:19 +01:00
Cameron Sparr
64a71263a1 Support Processor & Aggregator Plugins
closes #1726
2016-10-12 14:50:19 +01:00
Cameron Sparr
974221f0cf Fix phpfpm fcgi client panic when URL doesnt exist
closes #1886
2016-10-12 11:58:38 +01:00
Ririsoft
bccef2856d Revert "Moving cgroup path name to field from tag to reduce cardinality (#1457)"
This was introducing a regression with influxdb output, leading to
collision an points missing.
This reverts commit 53f40063b3.

closes #1724
closes #1796
2016-10-12 11:04:28 +01:00
Patrick Hemmer
80df3f7634 snmp: fix initialization of table fields in manual tables (#1836) 2016-10-12 11:00:39 +01:00
Cameron Sparr
e96f7a9b12 graphite parser, handle multiple templates empty filter
Previously, the graphite parser would simply overwrite any template that
had an identical filter to a previous template. This included the empty
filter.

Now we will still overwrite, but first we will sort to make sure that
the most "specific" template always matches.

closes #1731
2016-10-11 15:22:51 +01:00
Cameron Sparr
2bbb6aa6f2 Add doc for SNMP debug tips (#1831) 2016-10-11 14:48:08 +01:00
Cameron Sparr
1ff721ad84 Add riemann output plugin deprecation message 2016-10-11 12:28:20 +01:00
Eric
3e3b094270 Only log warning on type when in debug mode.
closes #1793
2016-10-11 11:35:43 +01:00
Eric
1f7a8fceef Fixed json serialization to make sure only value type supported by OpenTSDB are sent and made sure we send numbers un-quoted event though OpenTSDB API accepts them as this is not clean json. 2016-10-11 11:32:24 +01:00
Marko Crnic
b702a9758b haproxy/README: make quotes consistent
closes #1700
2016-10-11 11:30:22 +01:00
Marko Crnic
3b607aa8ae haproxy: add README covering basics of the plugin 2016-10-11 11:29:04 +01:00
Marko Crnic
4a4a6892f9 haproxy: update HAproxy docs URL 2016-10-11 11:29:04 +01:00
Marko Crnic
56b627dfe2 haproxy_test: extend tests to cover name globbing 2016-10-11 11:29:04 +01:00
Marko Crnic
5c87b92976 haproxy_test: define expected results in one place
Map holding expected results was defined in multiple places, making test
cases a bit hard to read. This way we can change our expectations of
good results in one place and have them affect multiple test cases.
2016-10-11 11:29:04 +01:00
Marko Crnic
dbcc312b0e haproxy: clarify handling of http and socket addresses
This behaviour was introduced along with socket support, but never got
documented properly.
2016-10-11 11:29:04 +01:00
Marko Crnic
2d842fefb8 haproxy: add support for socket name globbing 2016-10-11 11:29:04 +01:00
Marko Crnic
d63e3c8cc4 haproxy: move socket address detection to own function 2016-10-11 11:29:04 +01:00
Stian Øvrevåge
187a894fe9 Create CONFIG-EXAMPLES.md with a switch interface example
Added a standard example for collecting interface metrics from switches or routers and tagging them properly.

closes #1666
2016-10-11 11:00:25 +01:00
Cameron Sparr
ca55c4a55d Remove COMING SOON: multiple statsd fields 2016-10-11 10:57:34 +01:00
Cameron Sparr
d627bdbbdb logparser: allow numbers in ident & auth parameters
fixes #1810
2016-10-10 11:27:35 +01:00
Edie Zhang
4f06f6b3d8 adding the tags in the graylog output plugin
closes #1861
2016-10-07 12:24:21 +01:00
Cameron Sparr
7f0fe78615 Changelog update for systemd log change 2016-10-06 17:48:23 +01:00
Ririsoft
5913f7cb36 Log to systemd journal
Let's align to InfluxDB 1.0 logging policy and log to systemd journal by
default.

closes #1732
2016-10-06 17:48:22 +01:00
James Carr
8dc42ad9f2 Add idle_since to emitted metrics (#1844) 2016-10-06 14:26:53 +01:00
Cameron Sparr
886bdd2ef2 changelog update 2016-10-06 14:25:28 +01:00
Patrick Hemmer
5a86a2ff26 snmp: return error on unknown conversion type (#1853) 2016-10-06 14:23:51 +01:00
zensqlmonitor
817d696628 SQL Server plugin: Fix WaitStats issue (#1859)
Issue #1854
2016-10-06 14:21:14 +01:00
Cameron Sparr
4ab0344ebf Update changelog & readme for 1.0.1 2016-10-05 08:41:58 +01:00
Patrick Hemmer
7b05170145 update to latest gosnmp (#1850) 2016-10-05 08:40:56 +01:00
Patrick Hemmer
b48ad4b737 fix snmp emitting empty fields
closes #1848
closes #1835
2016-10-04 16:25:16 +01:00
Patrick Hemmer
9feb639bbd fix translating snmp fields not in MIB (#1846) 2016-10-04 16:22:15 +01:00
Cameron Sparr
ce5054c850 Changelog update 2016-10-03 18:20:10 +01:00
Cameron Sparr
c7834209d2 Major Logging Overhaul
in this commit:

- centralize logging output handler.
- set global Info/Debug/Error log levels based on config file or flags.
- remove per-plugin debug arg handling.
- add a I!, D!, or E! to every log message.
- add configuration option to specify where to send logs.

closes #1786
2016-10-03 17:13:03 +01:00
Cameron Sparr
78ced6bc30 Use a bufio.Scanner in http listener
this will prevent potential very large allocations due to a very large
chunk size send from a client.

fixes #1823
2016-09-29 16:07:51 +01:00
Cameron Sparr
ca8e512e5b Update changelog 2016-09-28 16:12:32 +01:00
zensqlmonitor
573628dbdd Fix collation issue 2016-09-28 16:11:00 +01:00
Peter Murray
e477620dc5 Making '-service' flags work from a non-interactive session, i.e. Ansible, related to #1760 2016-09-28 16:09:43 +01:00
Łukasz Harasimowicz
32268fb25b Disable mesos tasks statistics until we find a better way to deal with them.
Due to quite real problem of generating vast number of data series through
mesos tasks metrics this feature is disabled until better solution is found.
2016-09-28 16:07:35 +01:00
Łukasz Harasimowicz
80391bfe1f Fixed tags on mesos_task metrics.
Tagging values by executor_id can create quite a lot data series
in InfluxDB so we should stick to framework_id and server.
2016-09-28 16:07:35 +01:00
Cameron Sparr
e19845c202 Load config directory using filepath.Walk
closes #1137
2016-09-28 16:01:52 +01:00
Cameron Sparr
52134555d6 globpath: only walk tree if ** is defined
closes #1517
2016-09-28 15:44:29 +01:00
Cameron Sparr
e7e39df6a0 Default SNMP parameter changes
max-repetitions = 10 is the default of net-snmp utils according to
http://net-snmp.sourceforge.net/docs/man/snmpbulkwalk.html

retries = 3 is the default of gosnmp:
https://godoc.org/github.com/soniah/gosnmp#pkg-variables

Could deal with some parts of the performance issues reported
by #1665
2016-09-28 14:34:20 +01:00
Patrick Hemmer
055ef168ae add oid_index_suffix to snmp plugin 2016-09-27 11:30:25 +01:00
Patrick Hemmer
2778b7be30 add snmp conversions for MAC addresses & IPs 2016-09-27 11:30:25 +01:00
Patrick Hemmer
953db51b2c Adjust snmp translation to return conversion info.
Also consolidated the translation code to obtain all info with just 1 command execution.

Also split test command mocks out to their own file for cleanliness.
2016-09-27 11:30:25 +01:00
Cameron Sparr
c043461f6c Fix varnish plugin to use default values
closes #1752
2016-09-23 16:06:33 +01:00
Cameron Sparr
ddc07f9ef8 Fix powerdns integer parse error handling
closes #1751
2016-09-23 16:05:15 +01:00
lost_z
2cf1db0837 add mysql uptime (#1735) 2016-09-23 15:59:22 +01:00
Cameron Sparr
17e6496830 update changelog 2016-09-23 11:38:52 +01:00
Vinh Quốc Nguyễn
1d10eda84e Fix crash when allow pending messgae wasn't set (#1785)
The default is 0 so we hit a division by 0 error and crash. This checks
ensure we will not crash and `log` and continue to let telegraf run

Also we set default allow pending message number to 10000
2016-09-23 11:37:47 +01:00
Daniele Gozzi
9ea3dbeee8 Allow numeric and non-string values for tag_keys. (#1782)
* Allow numeric and non-string values for tag_keys.

According to the go documentation the JSON deserializer only produces these
base types in output:
- string
- bool
- float64
- nil
With this patch bool, float64 and nil values get converted to a string when
their field key is specified in tag_keys. Previously the field was simply
discarded.

* Updated handling of nil for passing tests.

The automated tests are less than trivial to reproduece locally for me,
so I hope CircleCI wonn't mind...

* Updated changelog entries with PR and issue links.
2016-09-21 18:07:35 +01:00
Rikaard Hosein
100501ba72 statsd input plugin correctly handles colons in data-dog tag values now (#1794)
* Code correctly handles colons in tag values now

* Modified existing datadog tag test to include a tag value containing a colon
2016-09-21 14:37:42 +01:00
Cameron Sparr
f12368698b Update etc/telegraf.conf
closes #1789
2016-09-21 11:53:06 +01:00
Ross McDonald
6b25a73629 Add container state metrics to docker plugin (#1791)
* Add container state metrics to docker plugin.

* Update changelog.
2016-09-21 10:37:49 +01:00
David Moravek
90c7475c68 Fix sysstat resource leak (#1792) 2016-09-21 10:19:59 +01:00
Cameron Sparr
6648c101dd Add configurable timeout to influxdb input
closes #1773
2016-09-16 16:50:39 +01:00
Cameron Sparr
8d3285522c Prometheus output: do not remake metrics map each write
closes #1775
2016-09-16 16:50:39 +01:00
David Norton
b613405f42 Merge pull request #1768 from influxdata/dgn-speedup-statsd-parser
speed up statsd parser
2016-09-15 10:46:56 -04:00
David Norton
e999298078 speed up statsd parser 2016-09-15 08:11:06 -04:00
David Norton
0f0ab953f6 Merge pull request #1766 from influxdata/dgn-statsd-parsing-benchmarks
add statsd parsing benchmarks
2016-09-15 07:10:18 -04:00
David Norton
aaddbd153e add statsd parsing benchmarks 2016-09-14 11:12:02 -04:00
Cameron Sparr
9b2e2cc41f kafka panic: Check that error is non-nil before
fixes #1764
2016-09-14 08:54:22 +01:00
Cameron Sparr
bc22309459 Add commit & branch to Makefile 2016-09-13 09:31:30 +01:00
Gunnar
b6f81b538a Add commit to Telegraf version string (#1756) 2016-09-13 08:41:02 +01:00
Cameron Sparr
c3aa43a6bd Fix prometheus output panic on reload
closes #1530
2016-09-12 10:46:37 +01:00
Rene Zbinden
b2ea39077e fix issue #1716 (#1749) 2016-09-12 10:30:35 +01:00
Cameron Sparr
811567a2f4 Update go version to 1.7, fix vet errors
closes #1728
2016-09-09 16:11:17 +01:00
Cameron Sparr
ca8fb440cc Fix statsd scientific notation parsing
closes #1733
2016-09-09 15:13:11 +01:00
Cameron Sparr
ac58a6bb3c Fix unmarshal of influxdb metrics will null tags
closes #1738
2016-09-09 14:49:21 +01:00
Sean Beckett
9757d39240 Update CHANGELOG.md 2016-09-08 09:11:24 -06:00
Cameron Sparr
5a9e7d77b8 Update readme & chglog for 1.0 2016-09-08 15:26:10 +01:00
Cameron Sparr
e963b7f01b alphabetize service inputs, add logparser 2016-09-07 15:55:21 +01:00
Nathan D Acuff
e7899d4dc5 Postgresql database blacklist configuration option (#1699)
* separate hello and authenticate functions, force connection close at end of write cycle so we don't hold open idle connections, which has the benefit of mostly removing the chance of getting hopelessly connection lost

* update changelog, though this will need to be updated again to merge into telegraf master

* bump instrumental agent version

* fix test to deal with better better connect/reconnect logic and changed ident & auth handshake

* Update CHANGELOG.md

correct URL from instrumental fork to origin and put the change in the correct part of the file

* go fmt

* Split out Instrumental tests for invalid metric and value.

* Ensure nothing remains on the wire after final test.

* Force valid metric names by replacing invalid parts with underscores.

* Multiple invalid characters being joined into a single udnerscore.

* Adjust comment to what happens.

* undo split hello and auth commands, to reduce roundtrips

* Add ignored_databases option to postgresql configuration files, to enable easy filtering of system databases without needing to whitelist all the databases on the server.  Add tests for database whitelist and blacklist.

* run go fmt on new postgresql database whitelist/blacklist code

* add postgresql database blacklist option to changelog

* remove a bad merge from the changelog
2016-09-07 09:39:55 +01:00
Cameron Sparr
301c79e57c Add a 404 and high-traffic test to http listener
also remove locking around adding metrics. Instead, keep a waitgroup on
the ServeHTTP function and wait for that to finish before returning from
the Stop() function

closes #1407
2016-09-06 17:21:01 +01:00
ncohensm
67c288abda initial http_listener implementation
fix incredibly stupid bugs

populate README

support query endpoint and change default listen port

set response headers for query endpoint

add unit tests

revert erroneous Godeps change

add plugin ref to top-level README

remove debug output and add empty post body test

fix linter errors

move stoppableListener into repo

use constants for http status codes

add CHANGELOG entry

address code review comments re. style/structure

address further code review comments

add note to README re. database creation calls per PR comments
2016-09-06 17:21:01 +01:00
Cameron Sparr
8dd2a8527a Refactor NATS ssl config 2016-09-06 13:52:29 +01:00
Cameron Sparr
2fe427b3b3 mongodb input: fix version 2.2 panic
closes #1628
2016-09-06 11:58:06 +01:00
Paulo Pires
6b1cc67664 Add NATS output plugin.
Added NATS server container needed for tests.

Added NATS output plug-in. Fixes #1487

NATS output plug-in use internal.GetTLSConfig to instrument TLS configuration.

Added NATS output plug-in to changelog.

closes #1487
closes #1697
2016-09-06 11:39:57 +01:00
Cameron Sparr
1271f9d71a jolokia input: add note about POST permissions
closes #1628
2016-09-06 11:11:27 +01:00
aaron jheng
49ea4e9f39 [Docker Plugin] add server hostname for each docker measurements (#1599)
* add server hostname for each docker measurements

* update CHANGELOG

* move feature to v1.1

* tweak docker_engine_host tag
2016-09-06 08:37:46 +01:00
Cameron Sparr
50ef3282b6 Refactor and code cleanup of filtering
started working on this with the idea of fixing #1623, although I
realized that this was actually just a documentation issue around
a toml eccentricity.

closes #1623
2016-09-05 16:30:18 +01:00
Phil
b63dedb74d sanitize parenthesis (#1701) 2016-09-05 14:30:40 +01:00
Denis Orlikhin
5628049440 Handle negative integers coming as unsigned integers from Aerospike (#1679)
* Handle negative integers coming as unsigned integers from Aerospike stats

* skip values with overflow

* aerospike stat values parsing tests

* better tests
2016-09-05 14:29:14 +01:00
Cameron Sparr
54c9ba7639 Update documentation for Gauge & Counters 2016-09-05 12:58:07 +01:00
Cameron Sparr
b18d375d6c Implement AddGauge & AddCounter functions
and utilize them in the in the 'system' input plugins.
2016-09-02 16:51:26 +01:00
Cameron Sparr
6dbbe65897 Remove Add() function from accumulator 2016-09-02 16:35:27 +01:00
Cameron Sparr
03d8abccdd Implement telegraf metric types
And use them in the prometheus output plugin.

Still need to test the prometheus output plugin.

Also need to actually create typed metrics in the system plugins.

closes #1683
2016-09-02 16:35:27 +01:00
David Caldwell
0f6d317a8e Fix MySQL plugin not sending 0 value fields (#1695)
closes #1695
2016-09-02 15:22:30 +01:00
Cameron Sparr
792682590c Remove snmp_legacy unit tests and docker image 2016-08-31 12:17:06 +01:00
François de Metz
2d3da343b3 Add basic filestack webhook.
closes #1542

Generalize event.

Add doc.

Update default config.

Add filestack to the list of plugins.

Check that video conversion event returns 400.

Update the readme.

Update the changelog.
2016-08-31 10:48:27 +01:00
Charles-Henri
094eda22c0 Add new iptables plugin
The iptables plugin aims at monitoring bytes and packet counters
matching a given set of iptables rules.

Typically the user would set a dedicated monitoring chain into a given
iptables table, and add the rules to monitor to this chain. The plugin
will allow to focus on the counters for this particular table/chain.

closes #1471
2016-08-31 10:42:44 +01:00
Butitsnotme
4886109d9c Added option to remove all CRs from input stream
Added the option removecr to inputs.exec to remove all carraige returns
(CR, ASCII 0x0D, Unicode codepoint \u0D, ^M). The option is boolean and
not enabled if not present in the config file.

closes #1606

Updated CHANGELOG.md with information about removecr

Ran go fmt ./...

Moved removal of CRs to internal/internal.go

Moved the code to remove carriage returns from
plugins/inputs/exec/exec.go to internal/internal.go. Additionally
changed the conditional on which it gets applied from using a
configuration file option to checking if it is running on Windows.

Moved Carriage Return check to correct place

Moved the carriage return removal back to the exec plugin. Added unit
testing for it. Fixed a bug (removing too many characters).

Ran go fmt ./...

Reverted CHANGELOG to master

Updated Changelog
2016-08-31 10:32:33 +01:00
Cameron Sparr
2dc47285bd Move CloudWatch rate limit to config (#1673)
* Move CloudWatch rate limit to config
Reference #1670

* make that variable a string

* ahem, apparently limiter wants an int

* add the ratelimit to the sample config

* update the test to include the rate

* set a default value of 10 for ratelimit

* Move default ratelimit to init
2016-08-31 10:29:24 +01:00
Eric
6e33a6d62f OpenTSDB HTTP output
closes #1539

First version of http put working

Refactored code to separate http handling from opentsdb module. Added batching support.

Fixed tag cleaning in http output and refactored telnet output.

Removed useless struct.

Fixed current unittest and added a new one.

Added benchmark test to test json serialization. Made sure http client would reuse connection.

Ran go fmt on opentsdb sources.

Updated README file

Removed useHttp in favor of parsing host string to determine the right API to use for sending metrics. Also renamed BatchSize to HttpBatchSize to better convey that it is only used when using Http API.

Updated changelog

Fixed format issues.

Removed TagSet type to make it more explicit.

Fixed unittest after removing TagSet type.

Revert "Updated changelog"

This reverts commit 24dba5520008d876b5a8d266c34a53e8805cc5f5.

Added PR under 1.1 release.

add missing redis metrics

This makes sure that all redis metrics are present without having to use a hard-coded list of what metrics to pull in.
2016-08-31 10:27:08 +01:00
Cameron Sparr
a8f9eb23cc add missing redis metrics (#1689)
This makes sure that all redis metrics are present without having to use a hard-coded list of what metrics to pull in.
2016-08-31 08:44:47 +01:00
Patrick Hemmer
41a5ee6571 add missing redis metrics
This makes sure that all redis metrics are present without having to use a hard-coded list of what metrics to pull in.
2016-08-31 01:05:11 -04:00
Nathan Haneysmith
7d8de4b8e1 Move default ratelimit to init 2016-08-30 14:33:51 -07:00
Cameron Sparr
cc2b53abf4 Fix changelog for #1650 2016-08-30 16:24:07 +01:00
Yaser Alraddadi
32aa1cc814 httpjson: support configurable response_timeout (#1651)
* httpjson: support configurable response_timeout

* make default ResponseTimeout in init

* Update CHANGELOG.md
2016-08-30 16:23:15 +01:00
Simon Murray
38d877165a Ceph Cluster Performance Input Plugin
The existing ceph input plugin only has access to the local admin daemon socket
on the local host, and as such has access to a limited subset of data.  This
extends the plugin to use CLI commands to get access to the full spread of Ceph
data.  This patch collects global OSD map and IO statistics, PG state and per pool
IO and utilization statistics.

closes #1513
2016-08-30 15:43:07 +01:00
Cameron Sparr
5c5984bfe1 Changelog update 2016-08-30 15:26:27 +01:00
tuier
30cdc31a27 Some improvment in mesos input plugin, (#1572)
* Some improvment in mesos input plugin,
     Removing uneeded statistics prefix for task's metric,
     Adding framework id tags into each task's metric,
     Adding state (leader/follower) tags to master's metric,
     Make sure the slave's metrics are tags with slave

* typo, replacing cpus_total with elected to determine leader

* Remove remaining statistics_ from sample

* using timestamp from mesos as metric timestamp

* change mesos-tasks to mesos_tasks, measurement

* change measurement name in test

* Replace follower by standby
2016-08-30 15:25:29 +01:00
Cameron Sparr
602a36e241 fix changelog #1607 2016-08-30 07:04:10 +01:00
Joel "The Merciless" Meador
b863ee1d65 [Instrumental] Underscore metric name output (#1607)
* separate hello and authenticate functions, force connection close at end of write cycle so we don't hold open idle connections, which has the benefit of mostly removing the chance of getting hopelessly connection lost

* update changelog, though this will need to be updated again to merge into telegraf master

* bump instrumental agent version

* fix test to deal with better better connect/reconnect logic and changed ident & auth handshake

* Update CHANGELOG.md

correct URL from instrumental fork to origin and put the change in the correct part of the file

* go fmt

* Split out Instrumental tests for invalid metric and value.

* Ensure nothing remains on the wire after final test.

* Force valid metric names by replacing invalid parts with underscores.

* Multiple invalid characters being joined into a single udnerscore.

* Adjust comment to what happens.

* undo split hello and auth commands, to reduce roundtrips

* Split out Instrumental tests for invalid metric and value.

* Ensure nothing remains on the wire after final test.

* Force valid metric names by replacing invalid parts with underscores.

* Multiple invalid characters being joined into a single udnerscore.

* add an entry to CHANGELOG for easy merging upstream

* go fmt variable alignment

* remove some bugfixes from changelog which now more properly are in a different section.

* remove headers and whitespace should should have been removed with the last commit
2016-08-30 07:03:32 +01:00
Nathan Haneysmith
ca49babf3a set a default value of 10 for ratelimit 2016-08-29 11:41:43 -07:00
SoleAngelus
cf37b5cdcf Update WINDOWS_SERVICE.md (#1669)
1. Added further clarification on running commands in PowerShell.
2. Added double quotes to file paths.
2016-08-29 17:36:05 +01:00
Cameron Sparr
969f388ef2 Make elasticsearch timeout configurable
closes #1674
2016-08-29 11:06:30 +01:00
Nathan Haneysmith
0589a1d0a5 update the test to include the rate 2016-08-25 18:17:33 -07:00
Nathan Haneysmith
4e019a176d add the ratelimit to the sample config 2016-08-25 18:04:29 -07:00
Nathan Haneysmith
a0e23d30fe ahem, apparently limiter wants an int 2016-08-25 17:56:33 -07:00
Nathan Haneysmith
e931706249 make that variable a string 2016-08-25 17:53:46 -07:00
Nathan Haneysmith
2457d95262 Move CloudWatch rate limit to config
Reference #1670
2016-08-25 17:46:38 -07:00
Cameron Sparr
e9d33726a9 start aerospike container 1st for unit tests
because it requires some time to initialize before it can respond to
metric requests.
2016-08-24 09:16:55 +01:00
Cameron Sparr
2462e04bf2 Rdme upd (#1660)
* Update README and CHANGELOG with 1.0 RC 1

* Increase circleci test docker sleep

* update aerospike dependency
2016-08-24 08:41:12 +01:00
Patrick Hemmer
7fac74919c Alternate SNMP plugin (#1389)
* Add a new and improved snmp plugin

* update gosnmp for duplicate packet fix

https://github.com/soniah/gosnmp/issues/68
https://github.com/soniah/gosnmp/pull/69
2016-08-22 16:37:53 +01:00
Robert Kánia
b022b5567d Added missing column, refs #1646 (#1647) 2016-08-22 15:35:39 +01:00
Cameron Sparr
dbf6380e4b update PR template with changelog note 2016-08-17 18:24:06 +01:00
Cameron Sparr
a0e42f8a61 Sanitize graphite characters in field names
also sanitize the names at a higher scope for better clarity

closes #1637
2016-08-17 16:56:31 +01:00
Cameron Sparr
94e673fe85 Revert "add pgbouncer plugin"
This reverts commit fec9760f72.
2016-08-17 16:50:11 +01:00
Cameron Sparr
7600757f16 ntpq: don't index ntp fields that dont exist
closes #1634
2016-08-16 15:16:42 +01:00
Cameron Sparr
4ce8dd5f9a Rename snmp plugin to snmp_legacy 2016-08-11 16:11:35 +01:00
politician
26315bfbea Defines GOOS and GOARCH for windows builds (#1621)
* defines GOOS and GOARCH for windows builds

* default to amd64 on windows

* windows: use latest versions of missing packages
2016-08-11 15:35:00 +01:00
David Bayendor
a282fb8524 Update README.md (#1622)
* Update README.md

Clean up minor typos and syntax.

* Update README.md

Fix typo in 'default'
2016-08-11 09:14:56 +01:00
Jack Zampolin
dee98612e2 Modernize zookeeper readme (#1615)
* Modernize zookeeper readme

* Add configuration
2016-08-10 22:58:47 +01:00
Ross McDonald
69e4e862a3 Fix typo of 'quorom' to 'quorum' when specifying write consistency. (#1618) 2016-08-10 17:51:21 +01:00
Cameron Sparr
c0e895c3a7 etc/telegraf.conf update 2016-08-10 15:16:01 +01:00
jsvisa
fec9760f72 add pgbouncer plugin
add pgbouncer docker for testing

add pgbouncer testcase

update changlog

closes #1400
2016-08-10 15:14:15 +01:00
Rene Zbinden
1989a5855d remove cgo dependeny with forking sensors command
closes #1414
closes #649
2016-08-09 08:38:05 +01:00
Cameron Sparr
abcd19493e If win stat buffer is empty, do not try to index
closes #1425
2016-08-09 08:29:37 +01:00
tuier
e457b7a8df Source improvement for librato output (#1416)
* Source improvement for librato output

Build the source from the list of tag instead of a configuration specified
single tag

Graphite Serializer:
* make buildTags public
* make sure not to use empty tags

Librato output:
* Improve Error handling for librato API base on error or debug flag
* Send Metric per Batch (max 300)
* use Graphite BuildTag function to generate source

The change is made that it should be retro compatible

Metric sample:
server=127.0.0.1 port=80 state=leader env=test
measurement.metric_name value
service_n.metric_x

Metric before with source tags set as "server":
source=127.0.0.1
test.80.127_0_0_1.leader.measurement.metric_name
test.80.127_0_0_1.leader.service_n.metric_x

Metric now:
source=test.80.127.0.0.1.leader
measurement.metric_name
service_n.metric_x

As you can see the source in the "new" version is much more precise
That way when filter (only from source) you can filter by env or any other tags

* Using template to specify which tagsusing for source, default concat all
tags

* revert change in graphite serializer

* better documentation, change default for template

* fmt

* test passing with new host as default tags

* use host tag in api integration test

* Limit 80 char per line, change resolution to be a int in the sample

* fmt

* remove resolution, doc for template

* fmt
2016-08-09 08:29:15 +01:00
Mariusz Brzeski
3853d0d065 Fix problem with metrics when ping return Destination net unreachable ( windows ) (#1561)
* Fix problem with metrics when ping return Destination net unreachable
Add test case TestUnreachablePingGather
Add percent_reply_loss
Fix some other tests

* Add errors measurment

* fir problem with ping reply "TTL expired in transit" ( use regex for more specific condition - TTL in line but it's a not valid replay )
add test case for "TTL expired in transit" - TestTTLExpiredPingGather
2016-08-09 08:27:30 +01:00
Patrick Hemmer
53e31cf1b5 Fix postgres extensible text (#1601)
* convert postgresql_extensible byte slice values to strings

* code cleanup in postgresql_extensible
2016-08-09 08:25:59 +01:00
Cameron Sparr
c99c22534b influxdb output: config doc update 2016-08-09 07:50:35 +01:00
Cameron Sparr
8e22526756 Adding c:\program files\telegraf\telegraf.conf
this will now be the default config file location on windows, basically
it is the windows equivalent of /etc/telegraf/telegraf.conf

also updating the changelog

closes #1543
2016-08-08 23:17:27 +01:00
Dennis Bellinger
7b6713b094 Telegraf support for built-in windows service.
Updated windows dependencies

Updated the windows dependencies so that the versions matched the
dependencies for Mac OS and Linux. Additionally added some that were
complained about being missing at compile time.

Incorporated kardianos/service for management

Incorporated the library github.com/kardianos/service to manage the
service on the various platforms (including Windows). This required an
alternate main function.

The original main function was renamed to reloadLoop (as that is what
the main loop in it does) (it also got a couple of parameters). The
service management library calls it as the main body of the program.

Merged service.go into telegraf.go

Due to compilation issues on Windows, moved the code from service.go
into telegraf.go and removed service.go entirely.

Updated dependencies and fixed Windows service

Updated the dependencies so that it builds properly on Windows,
additionally, fixed the registered command for starting it as
a service (needed to add the config file option). This currently
standardizes it as a C:\telegraf\telegraf.conf on Windows.

Added dependency for github.com/kardianos/service

Removed the common dependencies from _windows file

Removed all the common dependencies from the Godeps_windows file and
modified Makefile to load Godeps and then Godeps_windows when building
for Windows. This should reduce problems caused by the Godeps_windows
file being forgotten when updating dependencies.

Updated CHANGELOG.md with changes

Ran `go fmt ./...` to format code

Removed service library on all but Windows

The service library [kardianos/service](github.com/kardianos/service)
has been disabled on all platforms but windows, as there is already
existing infrastructure for other platforms.

Removed the dependency line for itself

It appears that gdm accidentally added the project itself to the
dependency list. This caused the dependency restoration to select an
earlier version of the project during build.

This only affected windows.
This only affected builds after 020b2c70

Updated documentation for Windows Service

Removed the documentation about using NSSM and added documentation on
installing telegraf directly as a Windows Service.

Added license info for kardianos/service

Added the license information for github.com/kardianos/service which is
licensed under the ZLib license, although that name is never mentioned
the license text matches word for word.

Changed the Windows Config file default location

Updated the default location of the configuration file on Windows from
C:\telegraf\telegraf.conf to C:\Program Files\Telegraf\telegraf.conf.
With this change includes updating the directions, including directing
that the executable be put into that same directory. Additionally, as
noted in the instructions, the location of the config file for the
service may be changed by specifying the location with the `-config`
flag at install time.

Fixed bug - Wrong data type: svcConfig

svcConfig service.Config => svcConfig *service.Config
(It needed to be a pointer)
2016-08-08 23:10:39 +01:00
Jack Zampolin
b0ef506a88 Add Kafka output readme (#1609) 2016-08-08 23:10:07 +01:00
Jack Zampolin
22c293de62 Add request for sample queries (#1608) 2016-08-08 23:06:03 +01:00
Cameron Sparr
d3bb1e7010 Rename internal_models package to models 2016-08-08 14:41:40 +01:00
Cameron Sparr
49988b15a3 Default config typo fix 2016-08-06 07:40:28 +01:00
Cameron Sparr
f0357b7a12 CHANGELOG formatting update
put all 1.0 beta releases into a single 1.0 release manifest

also add #1586 change
2016-08-05 14:51:19 +01:00
Cameron Sparr
9d3ad6309e Remove IF NOT EXISTS from influxdb output 2016-08-05 13:55:02 +01:00
Cameron Sparr
b55e9e78e3 gopsutil, fix /proc/pid/io naming issue
closes #1584
2016-08-05 09:53:14 +01:00
Cameron Sparr
4bc6fdb09e Removing INFLUXDB_HTTP_LOG from logparser usage/docs
this log format is likely soon going to be removed from a future
influxdb release, so we should not be recommending that users base any
of their log parsing infra on this.
2016-08-04 16:42:59 +01:00
Cameron Sparr
2b43b385de Begin implementing generic timestamp logparser capability 2016-08-04 16:08:55 +01:00
Cameron Sparr
13865f9e04 Disable darwin builds (#1571)
telegraf can't be cross-compiled for darwin, it has C dependencies and
thus many of the system plugins won't work.
2016-08-04 14:27:33 +01:00
Jack Zampolin
497353e586 add call to action for plugin contribuitors to write tickscripts (#1580) 2016-08-04 14:27:06 +01:00
Cameron Sparr
2d86dfba8b Removing deprecated flags
they are:
  -configdirectory
  -outputfilter
  -filter
2016-08-03 13:08:06 +01:00
Cameron Sparr
30dbfd9af8 Fix racy tail from beginning test 2016-07-28 14:08:12 +01:00
Cameron Sparr
c991b579d2 tcp/udp listeners, remove locks & improve test coverage 2016-07-28 13:42:34 +01:00
Srini Chebrolu
841729c0f9 RPM post remove script update for proper handle on all Linux distributions (#1381) 2016-07-28 08:34:57 +01:00
Victor Garcia
412f5b5acb Fixing changelog, MongoDB stats per db feature not release in 1.0beta3 (#1548) 2016-07-26 19:15:40 +01:00
Mariusz Brzeski
0b3958d3cd Ping windows (#1532)
* Ping for windows

* En ping output

* Code format

* Code review

* Default timeout

* Fix problem with std error when no data received ( exit status = 1 )
2016-07-25 13:17:41 +01:00
Patrick Hemmer
e68f251df7 add AddError method to accumulator (#1536) 2016-07-25 13:09:49 +01:00
Jason Gardner
986735234b Fix output config typo. (#1527) 2016-07-22 16:05:53 +01:00
Patrick Hemmer
4363eebc1b update gopsutil for FreeBSD disk time metrics (#1534)
Results in adding the io_time metric to FreeBSD, and adjusts the read_time and write_time metrics to be in milliseconds like linux.
2016-07-22 09:23:45 +01:00
Patrick Hemmer
1be6ea5696 remove unused accumulator.prefix (#1535) 2016-07-22 09:22:52 +01:00
Cameron Sparr
8acda0da8f Update etc/telegraf.conf 2016-07-21 17:53:41 +01:00
Łukasz Harasimowicz
ee240a5599 Added metrics for Mesos slaves and tasks running on them.
closes #1356
2016-07-21 17:13:00 +01:00
Mendelson Gusmão
29ea433763 Implement support for fetching hddtemp data (#1411) 2016-07-21 17:00:54 +01:00
Pierre Fersing
0462af164e Added option "total/perdevice" to Docker input (#1525)
Like cpu plugin, add two option "total" and "perdevice" to send network
and diskio metrics either per device and/or the sum of all devices.
2016-07-21 16:50:12 +01:00
Cameron Sparr
1c24665b29 Prometheus client & win_perf_counters char changes
1. in prometheus client, do not check for invalid characters anymore,
because we are already replacing all invalid characters with regex
anyways.
2. in win_perf_counters, sanitize field name _and_ measurement name.
Also add '%' to the list of sanitized characters, because this character
is invalid for most output plugins, and can also easily cause string
formatting issues throughout the stack.
3. All '%' will now be translated to 'Percent'

closes #1430
2016-07-21 16:24:19 +01:00
Torsten Rehn
0af0fa7c2e jolokia: handle multiple multi-dimensional attributes (#1524)
fixes #1481
2016-07-20 14:47:04 +01:00
Cameron Sparr
191608041f Strip container_version from container_image tag
closes #1413
2016-07-19 17:57:40 +01:00
Pierre Fersing
42d9d5d237 Fix Redis url, an extra "tcp://" was added (#1521) 2016-07-19 15:24:10 +01:00
Cameron Sparr
d54b169d67 nstat: fix nstat setting path for snmp6
closes #1477
2016-07-19 14:51:36 +01:00
Cameron Sparr
82166a36d0 Fix err race condition and partial failure issues
closes #1439
closes #1440
closes #1441
closes #1442
closes #1443
closes #1444
closes #1445
2016-07-19 14:45:55 +01:00
Victor Garcia
cbf5a55c7d MongoDB input plugin: Adding per DB stats (#1466) 2016-07-19 12:47:12 +01:00
Cameron Sparr
5f14ad9fa1 clean up and finish aerospike refactor & readme 2016-07-19 11:36:41 +01:00
Timothée GERMAIN
0be69b8a44 Make the user able to specify full path for HAproxy stats
closes #1499
closes #1019

Do no try to guess HAproxy stats url, just add ";csv" at the end of the
url if not present.

Signed-off-by: tgermain <timothee.germain@corp.ovh.com>
2016-07-19 11:35:15 +01:00
Matt Jones
375710488d Add support for self-signed certs to RabbitMQ input plugin (#1503)
* add initial support to allow self-signed certs

When using self-signed the metrics collection will fail, this will allow
the user to specify in the input configuration file if they want to skip
certificate verification. This is functionally identical to `curl -k`

At some point this functionality should be moved to the agent as it is
already implemented identically in several different input plugins.

* Add initial comment strings to remove noise

These should be properly fleshed out at some point to ensure
code completeness

* refactor to use generic helper function

* fix import statement against fork

* update changelog
2016-07-19 10:24:06 +01:00
Cameron Sparr
03d02fa67a Telegraf v1.0 beta 3 2016-07-18 18:20:41 +01:00
Cameron Sparr
b58cd78c79 Use errchan in redis input plugin
this may address, or at least log issue #1462
2016-07-18 17:26:44 +01:00
Cameron Sparr
dabb6f5466 Internally name all patterns for log parsing flexibility
closes #1436

This also fixes the bad behavior of waiting until runtime to return log
parsing pattern compile errors when a pattern was simply unfound.

closes #1418

Also protect against user error when the telegraf user does not have
permission to open the provided file. We will now error and exit in this
case, rather than silently waiting to get permission to open it.
2016-07-18 15:44:58 +01:00
Cameron Sparr
281a4d5500 Change resp_code from field to tag in logparser
closes #1479
2016-07-18 13:33:11 +01:00
François de Metz
1c2965703d Webhooks plugin: add mandrill (#1408)
* Add mandrill webhook.

* Store the id of the msg as part of event.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Decode body to get the mandrill_events.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Handle HEAD request.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Add the README.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Add mandrill_webhooks to the README.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Update changelog.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Run gofmt.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>
2016-07-18 12:41:13 +01:00
Cameron Sparr
5dc4cce157 Fixup adding 'measurement' to logparser grok
closes #1434
2016-07-18 12:28:55 +01:00
Nathaniel Cook
8c7edeb53b allow measurement to be defined for logparser_grok plugin 2016-07-18 12:20:24 +01:00
Tim Allen
1d9745ee98 Move exec WaitGroup from Exec instance level to Gather.
If Gather is run concurently the shared WaitGroup variable never finishes.

closes #1463
closes #1464
2016-07-18 12:18:14 +01:00
Mark McKinstry
2d6c8767f7 add ability to read redis from a socket (#1480)
* add ability to read redis from a socket

* update CHANGELOG
2016-07-18 12:03:39 +01:00
Cameron Sparr
b4a6d9c647 Change prometheus replacer to reverse regex replacer
closes #1474
2016-07-18 11:50:22 +01:00
ashish
6afe9ceef1 cassandra plugin lower version support added
closes #1427
closes #1508
2016-07-18 09:22:20 +01:00
Cameron Sparr
704d9ad76c Refactor aerospike plugin to use client lib 2016-07-16 22:15:37 +01:00
tuier
300d9adbd0 Considere zookeeper's state as a tags (#1417)
This change will send the state of zookeeper (leader|follower) as a tag
and not a metrics
That way it will be easier to search for filter per state
2016-07-16 19:19:21 +01:00
Pierre Fersing
207c5498e7 Remove systemd Install alias (#1470)
Alias is a list of additional names. Adding it's cannonical name
cause systemctl enable telegraf to show a warning "Too many levels of
symbolic links"
2016-07-14 15:53:05 -06:00
Cameron Sparr
d5e7439343 procstat plugin: store PID as a field
closes #1460
2016-07-14 15:52:02 -06:00
Joel Meador
21add2c799 instrumental plugin, rewrite connection retries
closes #1412

separate hello and authenticate functions,
force connection close at end of write cycle so we don't
hold open idle connections,
which has the benefit of mostly removing
the chance of getting hopelessly connection lost

bump instrumental agent version

fix test to deal with better better connect/reconnect logic and changed ident & auth handshake

Update CHANGELOG.md

correct URL from instrumental fork to origin and put the change in the correct part of the file

go fmt

undo split hello and auth commands, to reduce roundtrips
2016-07-14 15:18:31 -06:00
Shashank Sahni
4651ab88ad Fetching galera status metrics in MySQL
These are useful for Percona Xtradb cluster.

closes #1437
2016-07-14 15:02:45 -06:00
Sebastian Borza
53f40063b3 Moving cgroup path name to field from tag to reduce cardinality (#1457)
adding assertContainsFields function to cgroup_test for custom validation
2016-07-14 14:18:55 -06:00
Andrei Burd
97d92bba67 Redis input enhancement (#1387)
master_last_io_seconds_ago added
role tag renamed to replication_role
2016-07-14 13:28:36 -06:00
Cameron Sparr
bfdd665435 Copy metrics for each configured output
This is for better thread-safety when running with multiple outputs,
which can cause very odd panics at very high loads

primarily this is to address #1432

closes #1432
2016-07-14 09:16:29 -06:00
Cameron Sparr
821d3fafa6 Refactor SerializeBucketName to be read-only for struct fields 2016-07-14 09:16:29 -06:00
Cameron Sparr
7c9b312cee Make race detector build in CI 2016-07-14 09:16:29 -06:00
Cameron Sparr
69ab8a645c graphite output: set write deadline on TCP connection 2016-07-14 09:16:29 -06:00
Kostas Botsas
7b550c11cb Documentation for load balancing on graphite output servers (#1469)
* Added documentation for load balancing on graphite output servers

* clarifications

* updates1

* updates2

* updates3
2016-07-14 09:06:00 -06:00
Cameron Sparr
bb4f18ca88 temp ci fix, aerospike changed their metrics
see http://www.aerospike.com/docs/operations/upgrade/stats_to_3_9

TODO change aerospike input plugin to use official go client library.
2016-07-14 08:52:37 -06:00
Cameron Sparr
6efe91ea9c prometheus_client, implement Collector interface
closes #1334
2016-07-13 06:52:18 -06:00
Vladimir S
5f0a63f554 fixes #1450 (#1472) 2016-07-10 13:17:53 +01:00
François de Metz
d14e7536ab Cleanup the list of plugins. (#1423)
Github and Rollbar are now part of the webhooks plugin.
2016-07-10 12:12:33 +02:00
Jack Zampolin
c873937356 Add note about influxdb compatability (#1465) 2016-07-10 12:11:43 +02:00
Cameron Sparr
e1c3800cd9 Prometheus parser fix, parse headers properly
closes #1458
2016-07-09 22:34:59 +02:00
Kostas Botsas
c046232425 Merge pull request #1426 from influxdata/metrics-panic
nil metric list panic fix
2016-06-29 13:50:11 +03:00
Cameron Sparr
2d4864e126 nil metric list panic fix 2016-06-29 12:08:36 +02:00
Rene Zbinden
048448aa93 add build directory to git ignore (#1415) 2016-06-25 11:17:51 +01:00
Cameron Sparr
755b2ec953 fixup: BOM Trim -> TrimPrefix 2016-06-24 08:47:31 +01:00
Cameron Sparr
f62c493c77 Recover from prometheus multiple handler panic
closes #1339
2016-06-23 14:29:35 +01:00
Jonathan Chauncey
a6365a6086 feat(nsq_consumer): Add input plugin
to consume metrics from an nsqd topic

closes #1347
closes #1369
2016-06-23 14:06:36 +01:00
Cameron Sparr
f7e057ec55 refactor cgroup build so non-linux systems see plugin
also updated the README for the fields->files change.
2016-06-23 11:47:25 +01:00
Cameron Sparr
30cc00d11b Update changelog, etc/telegraf.conf 2016-06-23 10:28:38 +01:00
Cameron Sparr
d641c42029 cgroup: change fields -> files
closes #1103
closes #1350
2016-06-23 10:23:59 +01:00
Vladimir Sagan
9c2ca805da Remove flush_scope logic 2016-06-23 10:13:31 +01:00
Vladimir Sagan
b0484d8a0c add cgroup plugin 2016-06-23 10:13:31 +01:00
Cameron Sparr
5ddd61d2e2 Trim BOM from config file for windows support
closes #1378
2016-06-23 09:00:51 +01:00
Victor Garcia
50ea7f4a9d x509 certs authentication now supported for Prometheus input plugin (#1396) 2016-06-23 08:59:44 +01:00
Thibault Cohen
b18134a4e3 Fix #1405 (#1406) 2016-06-23 08:59:14 +01:00
Cameron Sparr
7825df4771 Fix darwin ping tests 2016-06-22 18:21:07 +01:00
Cameron Sparr
d6951dacdc Remove docker-machine/boot2docker dependencies & references 2016-06-22 17:25:01 +01:00
François de Metz
e603825e37 Add new webhooks plugin that superseed github and rollbar plugins.
closes #1289

Signed-off-by: François de Metz <francois@stormz.me>
Signed-off-by: Cyril Duez <cyril@stormz.me>

Rename internals struct.

Signed-off-by: François de Metz <francois@stormz.me>
Signed-off-by: Cyril Duez <cyril@stormz.me>

Update changelog.

Signed-off-by: François de Metz <francois@stormz.me>
Signed-off-by: Cyril Duez <cyril@stormz.me>

Update READMEs and CHANGELOG.

Signed-off-by: François de Metz <francois@stormz.me>
Signed-off-by: Cyril Duez <cyril@stormz.me>

Update SampleConfig.

Update the config format.

Update telegraf config.

Update the webhooks README.

Update changelog.

Update the changelog with an upgrade path.

Update default ports.

Fix indent.

Check for nil value on AvailableWebhooks.

Check for CanInterface.
2016-06-22 17:18:14 +01:00
Mike Glazer
e3448153e1 Allow for TLS connections to ElasticSearch (#1398)
* Allow for TLS connections to ElasticSearch

Extremely similar implementation to the HTTP JSON module's
implementation of the same code.

* Changelog update
2016-06-22 16:23:49 +01:00
jsvisa
25848c545a Fix: riak with read_repairs available
closes #1399
2016-06-22 14:56:44 +01:00
Konstantin Kulikov
3098564896 fix datarace in input apache plugin
closes #1384
2016-06-22 14:42:47 +01:00
Stian Øvrevåge
4b6f9b93dd Updated sqlserver.go - Added Rows/Logs max size (#1380)
I added Rows/Logs max size counters for tracking databases that do not have autogrowth enabled. The counters return numbers in 8KB pages since there are a few special values (such as -1 for no max size) that can't directly be multiplied by 8192 to get size in bytes.

Also added Rows/Logs size in 8KB pages for comparison from the same system table. Even though it returns the same size as sizes from sys.dm_io_virtual_file_stats which are already collected.
2016-06-22 14:39:35 +01:00
Cameron Sparr
2beef21231 Beta 2 Release 1.0 2016-06-21 14:35:26 +01:00
Cameron Sparr
cb3c54a1ae logparser input plugin
closes #102
closes #328
2016-06-21 14:23:01 +01:00
Iiro Uusitalo
d50a1e83ac Added support for Tengine (#1390)
* Adds support for Tengine

* Added #1390 Tengine PR to changelog
2016-06-21 14:22:51 +01:00
Cameron Sparr
1f10639222 Fix Graphite output mangling '%' character.
closes #1377
2016-06-21 11:52:49 +01:00
Cameron Sparr
af0979cce5 change "default" retention policy to ""
closes #1374
2016-06-16 12:22:27 +01:00
Cameron Sparr
5b43901bd8 update issue_template.md 2016-06-14 18:17:11 +01:00
Cameron Sparr
d7efb7a71d Add precision rounding to accumulator
Adding precision rounding to the accumulator. This means that now every
input metric will get rounded at collection, rather than at write (and
only for the influxdb output).

This feature is disabled for service inputs, because service inputs
should be in control of their own timestamps & precisions.
2016-06-14 00:36:39 +01:00
Adrian Moisey
4d242836ee Fix typo (#1367)
* Fix typo

* Fix another typo
2016-06-13 10:38:58 +01:00
Cameron Sparr
06cb5a041e statsd, udp, tcp: do not log every dropped metric.
also applying this change to the udp_listener and tcp_listener input
plugins

closes #1340
2016-06-10 13:47:33 +01:00
Cameron Sparr
ea2521bf27 Fixup ping change
fixes #1335
2016-06-10 13:05:28 +01:00
kodek
4cd1f7a104 Increase ping timeout based on ping count and interval 2016-06-10 12:49:37 +01:00
Cameron Sparr
137843b2f6 Change default zookeeper chroot to empty string
closes #1112
2016-06-10 12:07:36 +01:00
Cameron Sparr
008ed17a79 Fix exec plugin panic with single binary
fixes #1330
2016-06-10 11:27:46 +01:00
Tobias Schoknecht
75e6cb9064 Fixed incorrect prometheus metrics source selection (#1337)
Metrics type summary should retrieve values via GetSummary
 while histogram should retrieve values via GetHistogram for 
both count and sum
2016-06-09 22:50:00 +01:00
Cameron Sparr
ad88a9421a Beta 1 Release 1.0 2016-06-07 10:48:17 +01:00
Cameron Sparr
346deb30a3 OpenTSDB test problems, disabling output integration tests 2016-06-07 10:39:25 +01:00
Cameron Sparr
8c3d7cd145 Fix rare panic in RHEL 5.2 diskio plugin (#1327)
closes #1322
2016-06-03 14:28:47 +01:00
Cameron Sparr
821b30eb92 Add timeout param to exec readme (#1325) 2016-06-03 13:32:16 +01:00
Cameron Sparr
a362352587 Use glob match for finding /proc/<pid>/stat files
closes #1323
2016-06-03 13:31:31 +01:00
Ross McDonald
94f952787f Add statically-linked amd64 builds to default build targets.
Remove version and iteration from root packaging folder. (#1318)

closes #1201
2016-06-02 16:14:18 +01:00
Pierre Fersing
3ff184c061 Removed leaked "database" tag on redis metrics (#1316) 2016-06-02 14:25:23 +01:00
Meng Ye
80368e3936 fix used_percent Calculation formula (#1313) 2016-06-02 14:24:48 +01:00
Cameron Sparr
2c448e22e1 New object: ErrChan for concurrent err handling 2016-06-02 13:29:37 +01:00
Ali Alrahaleh
1aabd38eb2 Add graylog input pluging change log (#1309) 2016-06-02 13:13:17 +01:00
Cameron Sparr
675457873a haproxy input: fix potential race condition 2016-06-02 11:22:07 +01:00
Cameron Sparr
8173338f8a fix build flags 2016-06-01 18:58:54 +01:00
Cameron Sparr
c4841843a9 Create dummy zfs plugin file 2016-06-01 18:53:29 +01:00
Cameron Sparr
f08a27be5d graylog input doc tweaks
closes #1261
2016-06-01 18:44:18 +01:00
Ali Alrahahleh
a4b36d12dd add graylog plugin
add unit test for graylog
2016-06-01 18:21:09 +01:00
Cameron Sparr
c842724b61 Fix graylog test race 2016-06-01 16:32:38 +01:00
Cameron Sparr
fb5f40319e update gitattributes for easier fork mngmnt 2016-06-01 16:18:17 +01:00
Cameron Sparr
52b9fc837c Adding active & inactive memory to mem plugin
closes #1213
2016-06-01 16:04:20 +01:00
Cameron Sparr
6f991ec78a Sleep longer in tail test 2016-06-01 15:49:32 +01:00
Łukasz Harasimowicz
7921d87a45 Added Consul health checks state monitoring. (#1294) 2016-06-01 11:02:28 +01:00
Rickard von Essen
9f7a758bf9 RFR: Initial support for ZFS on FreeBSD (#1224)
* WIP: Initial support for ZFS on FreeBSD

* Added build directives

* Ignore 'kstatPath' config option on FreeBSD

* Added tests for ZFS FreeBSD input plugin.

* Updated the README to confrom with the guidelines and added FreeBSD info

* Fixed indents

* Spell check
2016-05-31 17:49:56 +01:00
Cameron Sparr
0aff7a0bc1 Disk plugin: return immediately if usage fails
closes #1297
2016-05-31 17:17:06 +01:00
Cameron Sparr
c4cfdb8a25 Revert "Revert graylog output"
This reverts commit 4f27315720.
2016-05-31 16:45:14 +01:00
Cameron Sparr
342cfc4087 ReAdd gelf serializer & graylog output filter. (#1299)
This reverts commit 958ef2f872.
2016-05-31 16:41:27 +01:00
Cameron Sparr
bd1282eddf Don't print config with trailing whitespace 2016-05-31 16:25:02 +01:00
Cameron Sparr
892abec025 Refactor collection_jitter and flush_jitter
use a common function between collection_jitter and flush_jitter. which
creates the same behavior between the two options.

going forward, both jitters will be random sleeps that get re-evaluated
at runtime for every interval (previously only collection_jitter did
this)

also fixes behavior so that both jitters will exit in the event of a
process exit.

closes #1296
2016-05-31 14:24:32 +01:00
Martin Seener
e809c4e445 Also added reasonable default for influxdb input plugin
to simplify configuration for most users

closes #1295
2016-05-31 13:41:02 +01:00
Cameron Sparr
9ff536d94d Limit GetMetricStatistics to 10 per second
closes #1197
2016-05-31 11:26:52 +01:00
Cameron Sparr
4f27315720 Revert graylog output 2016-05-31 11:23:01 +01:00
Cameron Sparr
958ef2f872 Revert "Add gelf serializer & graylog output filter." (#1299) 2016-05-31 11:21:20 +01:00
Cameron Sparr
069764f05e Update README & etc/telegraf.conf 2016-05-31 11:02:10 +01:00
vanillahsu
eeeab5192b Add gelf serializer & graylog output filter. (#1167)
* add gelf serializer.

* change url.

* handle fields in correct format.

* add graylog.

* handle host field of graylog.

* 1: Add go-gelf entry to Godeps to fix ci.
2: switch to github.com/Graylog2/go-gelf.

* implement Close().

* Deprecated gelf serializer, and back to graylog-golang.

* Update graylog-golang's hash.

* move gelf related function to graylog.go.

* 1: remove uneeded deps on Godeps_windows.
2: add README.md
3: add unittest.

* Fix unittest on 'go test -race'
2016-05-31 10:58:35 +01:00
robinpercy-xm
a7dfbce3d3 Addressing PR feedback
- Updated README/CHANGELOG
- Added links to further info to input README
- Reduced lines to 80 chars

Removing input declaration from SampleConfig

Moved PR to unreleased section of changelog

closes #1165
2016-05-31 10:47:26 +01:00
Jan Shim
ed2d1d9bb7 Add kernel_vmstat input plugins 2016-05-31 10:46:34 +01:00
Robin Percy
0fb2d2ffae Adding a conntrack input plugin
- Collects conntrack stats from the configured directories and files.

Applying PR feedback:

- Rebased onto master
- Updated README/CHANGELOG
- Limited lines to 80 chars
- Improved plugin docs and README
- added a dummy notlinux build file

Fixed up CHANGELOG and README after rebase

closes #1164
2016-05-31 10:42:19 +01:00
Ranjib Dey
3af65e7abb Fix typo in output plugin example (#1290) 2016-05-27 17:44:41 +01:00
Martin
984b6cb0fb Made the apache input’s urls parameter optional by using a reasonable default for most users (#1288) 2016-05-27 16:12:36 +01:00
Björn Lichtblau
ca504a19ec Use optimeDate to get MongoTimestamp (mongo input plugin). (#1281) 2016-05-27 11:57:17 +01:00
Lukasz Jagiello
c2797c85d1 Updated documentation. (#1284) 2016-05-26 19:31:51 +01:00
Pierre Fersing
d5add07c0b processes: Don't return error if process exited (#1283) 2016-05-26 17:58:20 +01:00
Kostas Botsas
0ebf1c1ad7 write_consistency documentation (#1282)
Added write_consistency to InfluxDB output documentation
2016-05-26 17:23:01 +01:00
Martin Seener
42d7fc5e16 Use the DefaultURL parameter if no url is explicitly set by the user
closes #1278
closes #1277
2016-05-26 12:14:43 +01:00
Jari Sukanen
6828fc48e1 exec plugin: allow using glob pattern in command list
Allow using glob pattern in the command list in configuration. This enables for
example placing all commands in a single directory and using /path/to/dir/*.sh
as one of the commands to run all shell scripts in that directory.

Glob patterns are applied on every run of the commands, so matching commands can
be added without restarting telegraf.

closes #1142
2016-05-26 11:38:15 +01:00
Pierre Fersing
98d91b1c89 Fix reloading Telegraf under systemd (#1279) 2016-05-26 11:32:05 +01:00
Cameron Sparr
9bbdb2d562 Allow wildcard filtering of varnish stats
closes #1275
2016-05-26 10:42:34 +01:00
Rene Zbinden
a8334c3261 add option to disable dns lookup for chronyc
closes #1265
2016-05-25 18:58:56 +01:00
Cameron Sparr
9144f9630b graphite parser: support multiple tag keys
closes #1272
2016-05-25 17:11:28 +01:00
Cameron Sparr
3e4a19539a http_response plugin: Add SSL config options
closes #1264
2016-05-25 13:44:36 +01:00
Cameron Sparr
5fe7e6e40e influxdb input: Use non-panicking type assertion
closes #1268
2016-05-25 13:32:10 +01:00
Cameron Sparr
58f2ba1247 kernel: use strconv.ParseInt instead of strconv.Atoi
closes #1258
2016-05-25 12:31:10 +01:00
John Engelman
5f3a91bffd Consolidate AWS credentials (#1208)
* Use shared AWS credential configuration.

*  Cloudwatch dimension wilcards 

* Allow configuring cache_ttl for cloudwatch metrics.

* Allow for wildcard in dimension values to select all available metrics.

* Use internal.Duration for CacheTTL and go fmt

* Refactor to not use embedded structs for config.

* Update AWS plugin READMEs with credentials details, update Changelog.

* Fix changelog after rebasing to master and 0.13.1 release.

* Fix changelog after rebase.
2016-05-25 12:30:39 +01:00
Cameron Sparr
6351aa5167 only count shard if it's non-empty
closes #1221
2016-05-25 12:05:14 +01:00
Nick
9966099d1a Replace ":" with "_" in tags. This should make the mysql plugin work with the opentsdb output (it uses a "server" tag like "127.0.0.1:3306") (#1256) 2016-05-25 11:37:48 +01:00
Cameron Sparr
1ef5599361 update changelog & etc/telegraf.conf 2016-05-24 15:34:56 +01:00
Cyril Duez
c78b6cdb4e Add input plugin for rollbar service. (#1247)
* Report rollbar events.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Fix indent with go fmt.

* Add test for rollbar webhooks.

* Report more data from new_item event.

* Handle new deploy webhook.

Signed-off-by: Cyril Duez <cyril@stormz.me>
Signed-off-by: François de Metz <francois@stormz.me>

* Update default port.

* Add readme.

* Add rollbar_webhooks to the readme.

* Add rollbar_webhooks to plugins list.

* Add tag level for new_item event.

* Update readme.

* Update changelog.
2016-05-24 15:32:42 +01:00
Cameron Sparr
d736c7235a prevent potential tail datarace (#1254) 2016-05-24 15:16:33 +01:00
Rene Zbinden
475252d873 fix telegraf service (#1252) 2016-05-24 15:14:58 +01:00
Cameron Sparr
e103923430 Release 0.13.1 2016-05-24 12:04:48 +01:00
Cameron Sparr
cb59517ceb Update etc/telegraf.conf 2016-05-24 11:16:21 +01:00
robinpercy-xm
1248934f3e Adding Varnish HTTP Cache input plugin (#1173)
* Adding Varnish HTTP Cache input plugin

* Applying PR feedback

- Linked to varnish in input README
- Updated README/CHANGELOG
- Cleaned up sampleConfig to remove formatting
- Shorted lines to 80 chars (except where test input requires long strings)
- Using internal.RunTimeout to wrap call to varnishtat
- Added dummy file for windows
2016-05-24 11:06:25 +01:00
Cameron Sparr
204ebf6bf6 influxdb output: write consistency parameter
closes #1249
2016-05-24 10:50:27 +01:00
Rene Zbinden
52d5b19219 add chrony support (#1238)
* add chrony support

* remove path definition

* add changelog
2016-05-24 09:55:25 +01:00
Cameron Sparr
8e92d3a4a0 Log to /var/log/telegraf/telegraf.log on systemd
closes #1243
2016-05-23 18:00:59 +01:00
Cameron Sparr
c44ecf54a5 Utilize timeout in net_response plugin.
Also changing the net_response and http_response plugins to only accept
duration strings for their timeout parameters. This is a breaking config
file change.

closes #1214
2016-05-23 15:59:23 +01:00
Klaudiusz Staniek
c6699c36d3 Add the OctetString OID value support (#1242)
This update adds support for strings values. Not sure why this was missed.
2016-05-23 11:21:53 +01:00
François de Metz
d6ceae7005 Refactor GitHub webhooks (#1240)
* Fix a typo.

* Extract similar code to generateEvent function.

* Remove functions to use generateEvent in the switch.

* Refactor tests.
2016-05-23 11:21:34 +01:00
Rene Zbinden
4dcb82bf08 fix interval rounding error
closes #1190
2016-05-23 11:20:01 +01:00
Cameron Sparr
4f5d5926d9 Set a timeout for calls to input.Gather
Changing the internal behavior around running plugins. Each plugin
will now have it's own goroutine with it's own ticker. This means that a
hung plugin will not block any other plugins. When a plugin is hung, we
will log an error message every interval, letting users know which
plugin is hung.

Currently the input interface does not have any methods for killing a
running Gather call, so there is nothing we can do but log an "ERROR"
and move on. This will give some visibility into the plugin that is
acting up.

closes #1230
fixes #479
2016-05-21 21:39:01 +01:00
Cameron Sparr
3c5c3b98df update gopsutil to commit with timeout support
closes #1215
2016-05-21 21:00:51 +01:00
Cameron Sparr
56aee1ceee Update gopsutil dependency
closes #1233
2016-05-20 15:30:13 +01:00
Cameron Sparr
f176c28a56 http_response: override req.Host header properly
closes #1198
2016-05-19 13:19:51 +01:00
Cameron Sparr
2e68bd1412 don't overwrite host tags in plugins
closes #1227
closes #1210
2016-05-19 13:19:28 +01:00
Cameron Sparr
35eb65460d github issue and pr templates 2016-05-19 12:11:19 +01:00
Jared Biel
ab54064689 Procstat input plugin - functionality for overriding of process_name (#1192)
Being able to override the process_name in the procstat module
is useful for daemonized perl, ruby, erlang etc. processes. This
allows for manually setting process_name rather than it being set to
the interpreter/VM of the process.
2016-05-19 11:34:25 +01:00
Cameron Sparr
debf7bf149 ntpq input: ignore lines that are '-'
closes #1223
2016-05-18 22:20:47 +01:00
Kane Dou
1dbe3b8231 fix mongodb input concurrent map read/write
closes #1211
2016-05-18 21:23:39 +01:00
Cameron Sparr
b065573e23 influxdb input: Add shard counter
closes #1221
2016-05-18 19:31:36 +01:00
Cameron Sparr
e94e50181c update changelog, readme, telegraf.conf 2016-05-18 16:07:17 +01:00
robinpercy-xm
69dfe63809 Implemented ceph collector (#1172)
- records metrics from the output of mon and osd socket perf
  dumps.
2016-05-18 15:48:44 +01:00
Jason Roelofs
f32916a5bd Output stats to the Instrumental TCP Collector
closes #1139
2016-05-18 15:03:28 +01:00
Cameron Sparr
be7ca56872 Update README w/ tail plugin 2016-05-18 15:00:30 +01:00
Anthony Brodard
33cacc71b8 Add role tag to redis plugins (#1207)
fixes #1203

- Format code
2016-05-18 14:17:14 +01:00
Cameron Sparr
c292e3931a Remove ntpq state prefixes and make their own tag
closes #1161
2016-05-18 14:16:01 +01:00
François de Metz
a87d6f0545 Fix typo. (#1220) 2016-05-18 14:14:50 +01:00
Cameron Sparr
3a01b6d5b7 Update elasticsearch readme
closes #1145
2016-05-18 12:22:34 +01:00
Zdenek Styblik
39df2635bd Fix crash in Docker input plugin - Fixes #1195
Commit fixes crash in Docker input plugin caused by the fact that return value
might be nil when error occurs.

closes #1195
2016-05-18 11:27:06 +01:00
Jack Tench
08ecfb8a67 Replace sudo with su in init script
To avoid issues starting service when 'Defaults requiretty' is enabled in the sudoers file.

Fixes #1204
closes #1205
2016-05-17 18:31:49 +01:00
Baptiste Mille-Mathias
a59bf7246a Don't use root user as example
Using root as user is a bad habit.

closes #1177
2016-05-17 18:30:15 +01:00
Cameron Sparr
281296cd3f changelog update 2016-05-17 17:05:27 +01:00
Jonathan A. Sternberg
61d190b1ae Add docker pull badge to the README 2016-05-17 17:02:42 +01:00
Cameron Sparr
dc89f029ad nstat plugin cleanups and formatting
closes #1104
closes #1138
2016-05-17 17:00:46 +01:00
Maksadbek
7557056a31 updated readme for nstat 2016-05-17 16:51:42 +01:00
Maksadbek
20c45a150c nstat plugin: reading files paths from env 2016-05-17 16:51:42 +01:00
Maksadbek
46bf0ef271 nstat input plugin 2016-05-17 16:51:42 +01:00
Brano Zarnovican
a7b632eb5e fix 0.13 download urls 2016-05-13 12:57:22 +02:00
Cameron Sparr
90a98c76a0 Finalize 0.13 release 2016-05-11 13:41:32 -07:00
Cameron Sparr
12357ee8c5 processes: add 'unknown' procs (?) 2016-05-11 11:52:29 -07:00
Cameron Sparr
bb254fc2b9 Default docker timeout in case one is not defined in config 2016-05-10 14:18:55 -07:00
Cameron Sparr
aeadc2c43a Update etc/telegraf.conf, mqtt_cons readme 2016-05-10 14:18:55 -07:00
Cameron Sparr
ed492fe950 update influxdb & gopsutil deps 2016-05-10 14:18:55 -07:00
Cameron Sparr
775daba8f5 Change Version->version for consistency w/ influxdb 2016-05-10 14:18:55 -07:00
Cameron Sparr
677dd7ad53 Release 0.13 2016-05-10 14:18:55 -07:00
Cameron Sparr
85dee02a3b snmp plugin: change host -> snmp_host
closes #1156
2016-05-10 14:18:00 -07:00
Cameron Sparr
afdebbc3a2 Make OidInstanceMapping a field of the snmp host
fixes #1171
2016-05-10 10:15:01 -07:00
Jörg Thalheim
5deb22a539 docker: add container_id also to per cpu stats
currently this field exists only for total cpu usage

closes #1168
2016-05-09 16:43:27 -07:00
Ross McDonald
36b9e2e077 Merge pull request #1157 from influxdata/ross-build-updates
Minor fixes to build script
2016-05-06 11:28:48 -05:00
Ross McDonald
5348937c3d Choose correct configuration when building for windows. 2016-05-06 10:46:29 -05:00
Ross McDonald
72fcacbbc7 Minor fixes to build script:
- Fix for --name build parameter
- Remove rc parameter from build script
- Fix regression on first-level tarball directory structure
- Convert any dashes/underscores in version tag to tilde
2016-05-05 14:02:34 -05:00
Lukasz Jagiello
4c28f15b35 Fix #1148 - chatty MySQL
Two additional config options to reduce MySQL metrics

With:
 - gather_table_lock_waits = false
 - gather_event_waits = false

```
| wc -l
34
```

With:
 - gather_table_lock_waits = true
 - gather_event_waits = true

```
| wc -l
50040
```

closes #1148
closes #1149
2016-05-04 10:23:54 -06:00
Marko Crnic
095ef04c04 Fix formatting for haproxy tests
closes #1146
2016-05-04 09:47:39 -06:00
Marko Crnic
7d49979658 Update haproxy input plugin sample configuration 2016-05-04 09:46:44 -06:00
Marko Crnic
7a36695a21 Add tests for haproxy stats socket 2016-05-04 09:46:44 -06:00
Marko Crnic
5865587bd0 Add stats socket support to haproxy plugin 2016-05-04 09:46:44 -06:00
Jörg Thalheim
219bf93566 docker: add container_id to blkio metrics
all container metrics except blkio have this field

closes #1150
2016-05-04 09:28:13 -06:00
Ross McDonald
8371546a66 Disable circle uploads to S3 until more testing can be done for external PR's. 2016-05-03 11:26:52 -05:00
John Engelman
0b9b7bddd7 Specify host mount prefix with envvar.
closes #1120
2016-05-02 15:53:30 -06:00
Rene Zbinden
4c8449f4bc fix sysstat timeout error
closes #1134
2016-05-02 15:17:11 -06:00
Ross McDonald
36d7b5c9ab Improvements to build.py:
- Now uses Python argparse for cleaner handling of arguments
- Added function documentation
- Removed a few unneeded functions
- Updated nightly logic to incremement minor version
- Added support for building from specific branch or commit
- Changed --no-stash option to --no-uncommitted for clarity
- Added a --release flag, default package output will now contain the branch and commit hash in the version number
- Static builds are now listed as an architecture
- Changed default upload bucket to dl.influxdata.com/telegraf
- Don't include iteration in package name

closes #1040
2016-05-02 14:37:29 -06:00
Cameron Sparr
f2b0ea6722 value parser: doc & string handling 2016-05-02 12:17:20 -06:00
Cameron Sparr
46f4be88a6 Revert "exec plugin: allow using glob pattern in command list"
This reverts commit 6381efa7ce.
2016-05-02 12:07:17 -06:00
Jari Sukanen
6381efa7ce exec plugin: allow using glob pattern in command list
Allow using glob pattern in the command list in configuration. This enables for
example placing all commands in a single directory and using /path/to/dir/*.sh
as one of the commands to run all shell scripts in that directory.

Glob patterns are applied on every run of the commands, so matching commands can
be added without restarting telegraf.

closes #1127
2016-05-02 11:36:15 -06:00
Pierre Fersing
85ee66efb9 "DELAYED" Inserts were deprecated in MySQL 5.6.6
closes #1136
2016-05-02 11:23:28 -06:00
Victor Garcia
40dccf5b29 Metric for MongoDB jumbo chunks
closes #1128
2016-05-01 14:27:27 -06:00
Cameron Sparr
c114849a31 Use a timeout for docker list & stat cmds
closes #1133
2016-05-01 10:26:46 -06:00
Cameron Sparr
4e9798d0e6 agent and tags configs sometimes not applied
closes #1090
2016-04-29 19:44:01 -06:00
Cameron Sparr
a30b1a394f Kafka output: set max_retry=3 & required_acks=-1 as defaults
closes #1113
2016-04-29 18:51:45 -06:00
Cameron Sparr
91460436cf Changelog update 2016-04-29 12:32:04 -06:00
Cameron Sparr
3f807a9432 Implement timeouts for all exec command runners
First is to write an internal CombinedOutput and Run function with a
timeout.

Second, the following instances of command runners need to have timeouts:

    plugins/inputs/ping/ping.go
    125:	out, err := c.CombinedOutput()

    plugins/inputs/exec/exec.go
    91:	if err := cmd.Run(); err != nil {

    plugins/inputs/ipmi_sensor/command.go
    31:	err := cmd.Run()

    plugins/inputs/sysstat/sysstat.go
    194:	out, err := cmd.CombinedOutput()

    plugins/inputs/leofs/leofs.go
    185:	defer cmd.Wait()

    plugins/inputs/sysstat/sysstat.go
    282:	if err := cmd.Wait(); err != nil {

closes #1067
2016-04-29 12:06:22 -06:00
Cameron Sparr
cbe32c7482 Support default config paths
precedence will be:

 1. --config command-line option
 2. $TELEGRAF_CONFIG_PATH
 3. $HOME/.telegraf/telegraf.conf
 4. /etc/telegraf/telegraf.conf
2016-04-28 17:48:24 -06:00
Cameron Sparr
5d3c582ecf changelog update 2016-04-28 17:34:37 -06:00
Cameron Sparr
3ed006d216 Sanitize invalid opentsdb characters
closes #1098
2016-04-28 17:01:50 -06:00
Cameron Sparr
3e1026286b skip network-dependent unit tests in short mode 2016-04-28 14:44:08 -06:00
Cameron Sparr
b59266249d README fixups for udp_listener, statsd inputs
closes #1119
2016-04-28 13:11:41 -06:00
G-regL
015261a524 Sanitize Field name
Replace '/[sS]ec' for '_persec' and spaces with underscores.

closes #1118
2016-04-28 12:21:28 -06:00
Adithya B Cherunilam
024e1088eb Ensure sure that the post install script is compatible with RHEL 5
closes #1091
closes #1094
2016-04-28 11:58:06 -06:00
Cameron Sparr
08f4b1ae8a Update build to go 1.6.2 2016-04-28 11:41:16 -06:00
Bob Zoller
1390c22004 sanitize * to - in graphite serializer
closes #1110
2016-04-27 18:05:44 -06:00
Cameron Sparr
8742ead585 Change server_ -> jolokia_ in tags and other formatting 2016-04-27 16:00:58 -06:00
Cameron Sparr
59a297abe6 etc/telegraf.conf update 2016-04-27 15:46:21 -06:00
Simone Aiello
18636ea628 jolokia: use always POST
code refactor to use same prepareRequest method
for both 'agent' and 'proxy' mode

closes #1031
closes #1050
closes #473
2016-04-27 15:45:37 -06:00
Simone Aiello
cf5980ace2 jolokia: add proxy mode 2016-04-27 15:39:55 -06:00
Jesse Hanley
a7b0861436 Adding Jobstats support to Lustre2 input plugin
Lustre Jobstats allows for RPCs to be tagged with a value, such
as a job's ID.  This allows for per job statistics. This plugin
collects statistics and tags the data with the jobid.

closes #1107
2016-04-27 15:35:24 -06:00
Cameron Sparr
89f2c0b0a4 Cassandra: update plugin supported prefix & fix panic
fixes #1102
2016-04-27 15:23:05 -06:00
Cameron Sparr
ee4f4d7800 ping plugin: Set default timeout 2016-04-27 15:08:38 -06:00
Cameron Sparr
4de75ce621 Performance refactor of running_output buffers
closes #914
closes #967
2016-04-27 13:40:05 -06:00
John Engelman
1c4043ab39 Closes #1085 - allow for specifying AWS credentials in config.
closes #1085
closes #1086
2016-04-26 17:24:05 -06:00
Cameron Sparr
44c945b9f5 Tail unit tests and README tweaks 2016-04-26 10:43:41 -06:00
Cameron Sparr
c7719ac365 buffers: fix bug when Write called before AddMetric 2016-04-26 10:25:04 -06:00
Cameron Sparr
b9c24189e4 Tail input plugin 2016-04-26 09:42:06 -06:00
Cameron Sparr
411d8d7439 Fix leaky tcp connections in phpfpm plugin
closes #1089
2016-04-26 09:24:32 -06:00
Cameron Sparr
671b40df2a procstat: field prefix fixup 2016-04-25 20:10:34 -06:00
Cameron Sparr
249a860c6f procstat: fix newlines in tags 2016-04-25 19:57:38 -06:00
Mika Eloranta
0367a39e1f postgresql_extensible: custom address in metrics output
Allow overriding the the metrics "server" tag with the specified
value. Can be used to give a more user-friendly value for the server
name.

closes #1093
2016-04-25 16:33:35 -06:00
Mika Eloranta
1a7340bb02 postgresql_extensible: fix nil field values
nil field values would break the output influxdb line procotol.
Skip them from the output.
2016-04-25 16:33:33 -06:00
Mika Eloranta
ce7d852d22 postgresql_extensible: configurable measurement name
The output measurement name can be configured per query.
2016-04-25 16:33:33 -06:00
Hannu Valtonen
01b01c5969 postgresql_extensible: Censor also other security related conn params
While these aren't quite as sensitive as passwords, they do tend to be
long filesystem paths that shouldn't be reported along with
every measurement.
2016-04-25 16:33:33 -06:00
Pierre Fersing
c159460b2c Refactor running_output buffering
closes #1087
2016-04-25 16:32:41 -06:00
Cameron Sparr
07728d7425 Refactor globpath pkg to return a map
this is so that we don't call os.Stat twice for every file matched
by Match(). Also changing the behavior to _not_ return the name of a
file that doesn't exist if it's not a glob.
2016-04-24 14:37:44 -06:00
Cameron Sparr
d3a25e4dc1 globpath refactor into pkg separate from filestat 2016-04-23 11:56:33 -06:00
zensqlmonitor
1751c35f69 SQL Server input. Fix datatype conversion. 2016-04-23 09:18:08 +02:00
zensqlmonitor
93f5b8cc4a Fix datatype conversion 2016-04-23 09:14:04 +02:00
Cameron Sparr
5b1e59a48c filestat plugin config fixup 2016-04-22 19:15:07 -06:00
Cameron Sparr
7b27cad1ba Dont specify AWS credential chain, use default
closes #1078
2016-04-22 11:43:20 -06:00
Cameron Sparr
1b083d63ab add gitattributes file 2016-04-22 11:42:22 -06:00
Cameron Sparr
23f2b47531 Ignore errors in systemd
closes #1022
2016-04-22 11:23:24 -06:00
Victor Garcia
194288c00e Adding replication lag metric
closes #1066
2016-04-22 11:07:32 -06:00
Cameron Sparr
f9c8ed0dc3 Add filestat plugin to README 2016-04-21 19:47:23 -06:00
Cameron Sparr
88def9b71b filestat input plugin
closes #929
2016-04-21 16:53:02 -06:00
Martin Gehrke
f818f44693 Added Network Interface Object block to Generic Queries examples in win_perf_counters/README.md
Network metrics are pretty important and the block adds a couple with a link to the names for more. This adds a block with a few counters to the Generic Queries examples in plugins/inputs/win_perf_counters/README.md
2016-04-21 09:26:46 -04:00
Cameron Sparr
8a395fdb4a changelog update feature 1041 2016-04-20 18:36:14 -06:00
Cameron Sparr
c0588926b8 Add n_cpu field to system plugin
closes #1041
2016-04-20 18:22:04 -06:00
Cameron Sparr
f1b7ecb2a2 procstat: Add user, pidfile, pattern & exe tags
closes #1035
2016-04-20 13:18:07 -06:00
Cameron Sparr
4bcf157d88 Don't replace _ with . in datadog names
closes #1024
2016-04-20 09:06:13 -06:00
Cameron Sparr
2f7da03cce Do not log every tcp connect/disconnect
leaving as comments for whenever I rig up global debug logging.

closes #1062
2016-04-19 22:58:24 -06:00
Cameron Sparr
f1c995dcb8 Update etc/telegraf.conf 2016-04-19 18:01:41 -06:00
Cameron Sparr
9aec58c6b8 Don't allow inputs to overwrite host tag
closes #1054

This affects tags in the following plugins:

- cassandra
- disque
- rethinkdb
2016-04-19 17:44:33 -06:00
Victor Garcia
46aaaa9b70 Adding TTL metrics data
closes #1060
2016-04-19 17:02:25 -06:00
Larry Kim
46543d6323 Possible bug fix for oid_key collision
closes #1044
2016-04-19 17:00:04 -06:00
Cameron Sparr
a585119a67 Change prometheus doc to glob match 2016-04-19 14:46:37 -06:00
Cameron Sparr
8cc72368ca influxdb output: close connections & dont always overwrite
closes #1058
closes #1059

also see https://github.com/influxdata/influxdb/pull/6425
2016-04-19 13:40:08 -06:00
Cameron Sparr
92e57ee06c Set default tags in test accumulator
closes #1012
2016-04-18 19:24:17 -06:00
Eugene Chupriyanov
c737a19d9f Just close Riemann client on send metrics failure
Signed-off-by: Eugene Chupriyanov <e.chupriyanov@cpm.ru>

closes #1013
2016-04-18 17:25:36 -06:00
Eugene Chupriyanov
708a97d773 Try to reconnect to Riemann if metrics upload failed.
Signed-off-by: Eugene Chupriyanov <tchu@tchu.ru>

Error checks added

Don't Close() nil client

Signed-off-by: Eugene Chupriyanov <e.chupriyanov@cpm.ru>
2016-04-18 17:25:19 -06:00
Maksadbek
b95a90dbd6 updated README for mysql input plugin
closes #889
closes #403
2016-04-18 17:21:25 -06:00
Maksadbek
a2d1ee08d4 transposed the matrix of tags/fields for Lock Waits stats gathering 2016-04-18 17:11:26 -06:00
Maksadbek
7e64dc380f preventing tags from mutation by creating new tag for each metric 2016-04-18 17:11:26 -06:00
Maksadbek
046cb6a564 changed types to decrease needless uint64 to float64 casts 2016-04-18 17:11:26 -06:00
Maksadbek
644ce9edab fixed code regarding needless type casting; single creation of map 2016-04-18 17:11:26 -06:00
Maksadbek
059b601b13 mysql plugin conf field names are lowercase-underscored 2016-04-18 17:11:26 -06:00
Maksadbek
d59999f510 improvements on queries and additional comments 2016-04-18 17:11:26 -06:00
Maksadbek
c5d31e7527 statics that lack on MySQL 5.5 is turned off by default 2016-04-18 17:11:26 -06:00
Maksadbek
c121e38da6 mysql plugin, check for existence of table before scanning 2016-04-18 17:11:26 -06:00
Maksadbek
b16bc3d2e3 remove duplicate function; Mysql plugin GatherTableSchema is configurable 2016-04-18 17:11:26 -06:00
maksadbek
c732abbda2 Improved mysql plugin
shows global variables
	shows slave statuses
	shows size and count of binary log files
	shows information_schema.processlist stats
	shows perf table stats
	shows auto increments stats from information schema
	shows perf index stats
	shows table lock waits summary by table
	shows time and operations of event waits
	shows file event statuses
	shows events statements stats from perf_schema
	shows schema statistics
	refactored plugin, provided multiple fields per insert
2016-04-18 17:11:26 -06:00
Cameron Sparr
61d681a7c8 docker changelog update 2016-04-18 16:24:32 -06:00
Cameron Sparr
7828bc09cf Fixup docker blkio container name & docker doc 2016-04-18 16:20:46 -06:00
Cameron Sparr
36d330fea0 docker plugin schema refactor
- renaming cont_name and cont_image to container_name and
container_image.
- cont_id is now a field, called container_id
- docker_cpu, docker_mem, docker_net measurements have been renamed to
  docker_container_cpu, docker_container_mem, and docker_container_net

closes #1014
closes #1052
2016-04-18 15:16:59 -06:00
Cameron Sparr
4d46589d39 JSON input: make string ignores clear 2016-04-18 13:20:06 -06:00
chaton78
93f57edd3a Better logging for MQTT consumer
closes #1023
closes #921
2016-04-18 11:27:50 -06:00
chaton78
8ec8ae0587 Added onConnection and connectionLost Handlers 2016-04-18 11:25:03 -06:00
Pascal Larin
ce94e636bb Resubscribe if not using persistent sessions 2016-04-18 11:25:03 -06:00
Pascal Larin
21c7378b61 Handle onConnect 2016-04-18 11:25:03 -06:00
Thibault Cohen
75a9845d20 SNMP Fix #995
closes #995
2016-04-18 11:19:16 -06:00
Shahzheeb Khan
d638f6e411 mongodb readme and examples
mongodb readme and examples

closes #1039
2016-04-16 15:53:04 -06:00
Cameron Sparr
81d0a64d46 Adds support for removing/keeping tags from metrics
closes #706
2016-04-16 15:13:38 -06:00
Cameron Sparr
f76739cb1b Release 0.12.1 2016-04-14 16:16:26 -06:00
Ross McDonald
7f992fd321 Changed nohup fallback command to use 'sudo -u' so that Telegraf doesn't run as the root user. 2016-04-14 13:56:20 -06:00
Cameron Sparr
e428d11add Update etc/telegraf.conf, README 2016-04-12 14:50:56 -06:00
Subhachandra Chandra
0ed5b75a14 Fixed CHANGELOG with the correct pull request id.
closes #681
closes #1009
2016-04-12 10:57:34 -06:00
subhachandrachandra
b1b4adec74 Added cassandra plugin to access metrics using jolokia and push them to influxdb. 2016-04-12 10:55:59 -06:00
Cameron Sparr
1934cc2e62 Add memstats to the influxdb input plugin
closes #958
2016-04-12 10:13:11 -06:00
Lukasz Jagiello
ae8cf8c35e Fix plugin name in README 2016-04-11 19:43:30 -06:00
Cameron Sparr
f5878eafb9 Create a template system for the graphite serializer
closes #925
closes #879
2016-04-11 16:30:18 -06:00
Cameron Sparr
27fe4f7062 Update changelog, etc/telegraf.conf
closes #998
2016-04-08 12:00:41 -06:00
Michele Fadda
7ad8b26297 dovecot: enabled global, user and ip queries 2016-04-08 11:56:08 -06:00
Cameron Sparr
1a383b7d90 Telegraf no longer depends on lsof
so remove it as a dependency from the linux packages.

closes #974
2016-04-08 11:27:33 -06:00
Shahzheeb Khan
445946792e Adding few metrics example in jolokia plugin
Adding few metrics example in jolokia plugin

closes #993
2016-04-08 11:20:47 -06:00
Shahzheeb Khan
de82c7d5ac Adding few metrics example
Adding more metrics in example to make it easier to better understand the plugin
2016-04-08 11:20:42 -06:00
Cameron Sparr
07f0d561dc Eliminate byte buffer, copy scanner.Bytes directly 2016-04-08 09:50:03 -06:00
Cameron Sparr
be379f3dac Refactor UDP & TCP input buffers
closes #991
2016-04-08 09:50:03 -06:00
Cameron Sparr
1bf904fe60 Godeps: update paho mqtt client dep
this might fix #921

see https://github.com/eclipse/paho.mqtt.golang/issues/32
2016-04-07 17:32:28 -06:00
Cameron Sparr
c6faf005cb Add sysstat dummy file for non-linux builds 2016-04-07 12:08:26 -06:00
Rene Zbinden
b534b58542 fix tests
closes #939
2016-04-07 11:54:41 -06:00
Rene Zbinden
920711533e move pathe lookup for sadf to init() 2016-04-07 10:45:20 -06:00
Rene Zbinden
194110433e change sadf options so that it also works on older linux distributions 2016-04-07 10:45:20 -06:00
Rene Zbinden
7926396d2a cleanup code, set dfltActivities in init() function, this leads to an if less in collect() method 2016-04-07 10:45:20 -06:00
Rene Zbinden
797522e8ca change group=true by default 2016-04-07 10:45:20 -06:00
Rene Zbinden
64b5d1a269 add documentation about sadc path on different linux distributions 2016-04-07 10:45:20 -06:00
Rene Zbinden
d00d3802c9 fix build tags 2016-04-07 10:45:20 -06:00
Rene Zbinden
46fff13341 disable TestInterval with -race test option 2016-04-07 10:45:20 -06:00
Rene Zbinden
be3374a3ef remove interval configuration 2016-04-07 10:45:20 -06:00
Rene Zbinden
264ac0b017 fix race condition 2016-04-07 10:45:20 -06:00
Rene Zbinden
17033b3c6c change readme 2016-04-07 10:45:20 -06:00
Rene Zbinden
c4ea122d66 add sysstat plugin 2016-04-07 10:45:20 -06:00
Cameron Sparr
90185dc6b3 cleanup & comment http_response def config
closes #332
2016-04-07 10:37:52 -06:00
Luke Swithenbank
377b030d88 update to 5 second default and string map for headers 2016-04-07 10:28:39 -06:00
Luke Swithenbank
437bd87d7c added tests and did some refactoring 2016-04-07 10:28:39 -06:00
Luke Swithenbank
73a7916ce3 take a request body as a param 2016-04-07 10:28:39 -06:00
Luke Swithenbank
f947fa86e3 update to allow for following redirects 2016-04-07 10:28:39 -06:00
Luke Swithenbank
7219efbdb7 add the ability to parse http headers 2016-04-07 10:28:39 -06:00
Luke Swithenbank
b7435b9cd1 fmt 2016-04-07 10:28:39 -06:00
Luke Swithenbank
207ab5a0d1 update to make a working sample_config 2016-04-07 10:28:39 -06:00
Luke Swithenbank
70aa0ef85d add plugin to all 2016-04-07 10:28:39 -06:00
Luke Swithenbank
dfbe231a51 add http_response plugin 2016-04-07 10:28:39 -06:00
Cameron Sparr
cce35da366 Godeps_windows: update file 2016-04-07 10:00:10 -06:00
Cameron Sparr
1a612bcae9 Update README and etc/telegraf.conf 2016-04-06 13:49:50 -06:00
Josh Hardy
d5b9e003fe Add CloudWatch input plugin
Rebased commit of previously reviewed branch.
Added cloudwatch client Mock and more rich unit tests.

closes #935
closes #936
2016-04-06 13:45:06 -06:00
Sergio Jimenez
e19c474a92 input(docker): Cleanup
* Removed leftovers, unused code

closes #957
fixes #645
2016-04-06 12:01:36 -06:00
Sergio Jimenez
8274798499 fix(Godeps): Added github.com/opencontainers/runc 2016-04-06 11:55:30 -06:00
Sergio Jimenez
9f68a32934 fix(): Last link on README 2016-04-06 11:55:30 -06:00
Sergio Jimenez
5c688daff1 input(docker): Updated README
* Replaced links to fsouza/go-dockerclient by docker/engine-api
2016-04-06 11:55:30 -06:00
Sergio Jimenez
741fb1181f godeps(): Updated Godeps file for engine-api
* Added required deps for engine-api
* Removed fsouza/go-dockerclient
2016-04-06 11:55:30 -06:00
Sergio Jimenez
fd1f05c8e0 input(docker): Fixed io sectors/io_time recursive
* On engine-api sectors_recursive and io_time_recursive have no Op
2016-04-06 11:55:30 -06:00
Sergio Jimenez
708cbf937f input(docker): Fixed tests to work with engine-api
* Modified tests to work with engine-api
2016-04-06 11:55:30 -06:00
Sergio Jimenez
32213cad01 input(docker): docker/engine-api
* Made required changes to get it to compile
* First manual tests looking good, still unit tests need fixing
* Made go linter happier
2016-04-06 11:55:30 -06:00
Ricard Clau
9320a6e115 windows service docs
closes #954
2016-04-06 11:50:36 -06:00
Cameron Sparr
0f16c0f4cf Reduce TCP listener allocations 2016-04-05 17:37:09 -06:00
Cameron Sparr
30464396d9 Make the UDP input buffer only once 2016-04-05 17:35:43 -06:00
Cameron Sparr
64066c4ea8 Update input data format readme 2016-04-05 17:27:04 -06:00
Cameron Sparr
7e97787d9d More readme fixups 2016-04-05 16:17:45 -06:00
Cameron Sparr
40f2dd8c6c Readme fixup for exec plugin 2016-04-05 15:22:58 -06:00
Cameron Sparr
4dd364e1c3 Update all readme instances of data formats 2016-04-05 14:42:20 -06:00
Cameron Sparr
03f2a35b31 Update jolokia plugin readme 2016-04-05 13:54:02 -06:00
Martti Rannanjärvi
73bd98df57 dovecot: remove extra newline from stats query
Extra newline in the stats query is interpreted as an empty query
which is an error for dovecot.

closes #972
2016-04-05 10:54:27 -06:00
Armin Wolfermann
bcf1fc658d ipmi_sensors: Allow : in password
closes #969
2016-04-05 10:52:41 -06:00
Cameron Sparr
863cbe512d processes plugin: fix case where there are spaces in cmd name
fixes #968
2016-04-05 10:27:30 -06:00
Cameron Sparr
d871e9aee7 Dummy kernel plugin added for consistent config generation 2016-04-04 17:43:53 -06:00
Cameron Sparr
70ef61ac6d Release 0.12 2016-04-04 16:34:41 -06:00
Cameron Sparr
a4a140bfad etc/telegraf.conf update for procstat change 2016-04-04 16:30:24 -06:00
Florent Ramière
d2d91e713a Add plugin links
closes #964
2016-04-04 16:28:36 -06:00
Cameron Sparr
d9bb1ceaec Changelog update 2016-04-04 16:12:50 -06:00
Pierre Fersing
5fe8903fd2 Use timeout smaller than 10 seconds
closes #959
2016-04-04 16:10:23 -06:00
Cameron Sparr
8509101b83 drop cpu_time_* from procstat by default
closes #963
2016-04-04 16:10:09 -06:00
Cameron Sparr
357849c348 Godeps: update wvanbergen/kafka dependency
see https://github.com/wvanbergen/kafka/pull/87

fixes #805
2016-04-02 13:45:24 -06:00
Nikhil Bafna
0f1b4e06f5 Update README.md
Fix redis input plugin name in configuration example
2016-04-02 10:13:21 +05:30
Cameron Sparr
8e041420cd config: parse environment variables in the config file
closes #663
2016-04-01 16:06:19 -06:00
Rubycut
9211d22b2b Add writing in documentation.
closes #950
2016-04-01 11:46:32 -06:00
Cameron Sparr
f5246eb167 Update changelog with config file PR 2016-04-01 11:45:09 -06:00
Cameron Sparr
e436b2d720 Cleanup & standardize config file
changes:

- -sample-config will now comment out all but a few default plugins.
- config file parse errors will output path to bad conf file.
- cleanup 80-char line-length and some other style issues.
- default package conf file will now have all plugins, but commented
  out.

closes #199
closes #944
2016-04-01 10:59:53 -06:00
Florent Ramière
51f4e9c0d3 Update changelog
closes #945
2016-04-01 10:09:04 -06:00
Florent Ramière
8c3371c4ac Use numerical codes instead of symbolic ones 2016-04-01 10:08:55 -06:00
Florent Ramière
6ff0fc6d83 Add compression/acks/retry conf to Kafka output plugin
The following configuration is now possible

	## CompressionCodec represents the various compression codecs
recognized by Kafka in messages.
	##  "none" : No compression
	##  "gzip" : Gzip compression
	##  "snappy" : Snappy compression
	# compression_codec = "none"

	##  RequiredAcks is used in Produce Requests to tell the broker how
many replica acknowledgements it must see before responding
	##  "none" : the producer never waits for an acknowledgement from the
broker. This option provides the lowest latency but the weakest
durability guarantees (some data will be lost when a server fails).
	##  "leader" : the producer gets an acknowledgement after the leader
replica has received the data. This option provides better durability
as the client waits until the server acknowledges the request as
successful (only messages that were written to the now-dead leader but
not yet replicated will be lost).
	##  "leader_and_replicas" : the producer gets an acknowledgement after
all in-sync replicas have received the data. This option provides the
best durability, we guarantee that no messages will be lost as long as
at least one in sync replica remains.
	# required_acks = "leader_and_replicas"

	##  The total number of times to retry sending a message
	# max_retry = "3"
2016-04-01 10:08:55 -06:00
Cameron Sparr
9347a70425 Fix httpjson README
closes #947
2016-03-31 20:37:04 -06:00
Cameron Sparr
91957f0848 Update Godeps_windows file to HEAD 2016-03-30 14:43:05 -06:00
Cameron Sparr
62105bb353 Use github paho mqtt client instead of gerrit
this might fix #921
2016-03-30 11:54:01 -06:00
Rudenkovk Konstantin
e03f684508 Fix parse fcgi URI path in php-fpm input module
closes #934
2016-03-30 10:34:48 -06:00
Ross McDonald
2f41ae24f8 Swap systemd command, as it was causing issues on Debian. 2016-03-30 10:17:31 -06:00
Cameron Sparr
4ad551be9a add '*' to metric prefixes for consistency 2016-03-29 17:00:51 -06:00
Cameron Sparr
bd640ae2c5 changelog fixup 2016-03-29 12:19:07 -06:00
Cameron Sparr
2cfc882c62 changelog & readme update 2016-03-29 12:18:23 -06:00
Cameron Sparr
21ece2d76d Convert ipmi stats/tags to underscore and lowercase
closes #888
2016-03-29 11:39:57 -06:00
张光权
d055d7f496 Add the ipmi plugin 2016-03-29 11:39:57 -06:00
Cameron Sparr
b1cfb1afe4 Deprecate statsd convert_names option, expose separator
closes #876
2016-03-28 12:13:15 -06:00
Cameron Sparr
2f215356d6 Update statsd graphite parser link to telegraf version 2016-03-28 11:57:51 -06:00
Adam Argo
e07c79259b PR feedback changes
closes #927
2016-03-28 10:43:34 -06:00
Adam Argo
59085f072a adds ability to parse datadog-formatted tags in the statsd input 2016-03-28 10:43:26 -06:00
Cameron Sparr
474d6db42f Don't log every string metric that prometheus doesnt support 2016-03-23 16:01:06 -06:00
Thibault Cohen
a95710ed0c SNMP plugin fixes
fixes #873
2016-03-22 22:58:02 -06:00
JP
51d7724255 add verifyValue func for datadog and librato, bail if no good
closes #906
2016-03-22 15:22:57 -06:00
Cameron Sparr
276e7629bd memcached unix socket: fix panic. Do not recreate conn inside if
closes #841
2016-03-22 15:12:35 -06:00
Cameron Sparr
69606a45e0 Fix prometheus label names, and dont panic if invalid
fixes #907
2016-03-22 12:29:55 -06:00
Chris Goller
7f65ffcb15 Add optional parameters to influxdb output README 2016-03-22 09:14:25 -06:00
Cameron Sparr
4f5f6761f3 Update gopsutil dependency
closes #656
2016-03-22 09:13:31 -06:00
Cameron Sparr
f543dbb42f Allow users to tell telegraf Agent not to include host tag
closes #848
2016-03-21 15:51:10 -06:00
Cameron Sparr
5917a42997 influxdb output: quote the database name
closes #898
2016-03-21 14:37:33 -06:00
Cameron Sparr
fbe1664214 README cleanup and update 2016-03-21 14:30:59 -06:00
david birdsong
d09bb13cb6 special case 'value'
it usually connotes a single value type metric, appending just clutters

closes #793
2016-03-21 13:49:34 -06:00
david birdsong
31c323c097 fix prometheus output
if i understand the prometheus data model correctly, the current output
for this plugin is unusable

prometheus only accepts a single value per measurement. prior to this change, the range loop
causes a measurement to end up w/ a random value

for instance:

net,dc=sjc1,grp_dashboard=1,grp_home=1,grp_hwy_fetcher=1,grp_web_admin=1,host=sjc1-b4-8,hw=app,interface=docker0,state=live
bytes_recv=477596i,bytes_sent=152963303i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=7231i,packets_sent=11460i
1457121990003778992

this 'net' measurent  would have all it's tags copied to prometheus
labels, but any of 152963303, or 0, or 7231 as a value for
'net' depending on which field is last in the map iteration

this change expands the fields into new measurements by appending
the field name to the influxdb measurement name.

ie, the above example results with 'net' dropped and new measurements
to take it's place:
	net_bytes_recv
	net_bytes_sent
	net_drop_in
	net_err_in
	net_packets_recv
	net_packets_sent

i hope this can be merged, i love telegraf's composability of tags and
filtering
2016-03-21 13:49:09 -06:00
Thibault Cohen
8f09aadfdf Add nagios parser for exec input plugin
closes #762
2016-03-21 13:34:47 -06:00
Chris H (CruftMaster)
20b4e8c779 GREEDY field templates for the graphite parser, and support for multiple specific field names
closes #789
2016-03-21 13:32:51 -06:00
Cameron Sparr
402a0108ae Merge pull request #896 from jipperinbham/graphite-tag-sanitizer
sanitize known issue characters from graphite tag name
2016-03-21 12:29:05 -06:00
Cameron Sparr
9de4a8efcf Update readme, changelog for couchbase plugin
closes #866
closes #482
2016-03-21 12:12:23 -06:00
Vebjorn Ljosa
077fa2e6b9 Improve README for couchabase input plugin
Proper terminology and case. Exmaples for tags. Example output.
2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
2ae9316f48 Add examples in documentation for couchbase input plugin 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
9b5a90e3b9 Unit test couchbase input plugin 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
483942dc41 Comment on default pool name 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
2ddda6457f Convert measurement names to snake_case 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
681e695170 Don't copy lock when rangeing over map
Make `go vet` happy.
2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
a043664dc4 Couchbase input plugin 2016-03-21 12:09:32 -06:00
JP
e940f99646 sanitize known issue characters from graphite tag name 2016-03-21 10:01:51 -05:00
Cameron Sparr
22073042a9 Merge pull request #891 from jipperinbham/librato-serialize-fix
replace @ character with - for librato
2016-03-18 17:00:00 -06:00
Cameron Sparr
2634cc408a Update README 2016-03-18 11:26:05 -06:00
Thomas Menard
36446bcbc2 Remove the columns used as tag
closes #844
2016-03-18 11:25:04 -06:00
Thomas Menard
b371ec5cf6 Add the postgresql_extensible plugin
This plugin is intended to add an extended support of Postgresql
compared to the legacy postgres plugin.

Basically, the plugin don’t have any metrics defined and it’s up to the
user to define it in the telegraph.conf (as a toml structure).

Each query can have it’s specific tags, and can be written specifically
using a where clause in order to eventually filter per database name.

To be more generic, a minimum postgresql version  has been defined per
query in case you have 2 different version of Postgresql running on the
same host.
2016-03-18 11:23:02 -06:00
HUANG Wei
18f4afb388 Inherit previous instance's stats in statsd plugin.
This way, after a reload, the stats wont restart again at least for the
counter type.

closes #887
2016-03-18 11:20:35 -06:00
Cameron Sparr
77dcbe95c0 Do not write metrics if there are 0 to write
closes #884
2016-03-18 10:54:51 -06:00
Cameron Sparr
061b749041 TLS config: if only given ssl_ca, create tls config anyways
fixes #890
2016-03-18 10:53:55 -06:00
JP
5b0c3951f6 replace @ character with - for librato 2016-03-18 11:25:51 -05:00
Cameron Sparr
f2394b5a8d Merge pull request #886 from entertainyou/typo
Fix typo, should be input instead of output.
2016-03-17 21:36:26 -06:00
Cameron Sparr
fe7b884cc9 Update changelog 2016-03-17 20:40:22 -06:00
Cameron Sparr
5c1b635229 Value parser, for parsing a single value into a metric
closes #849
2016-03-17 20:08:21 -06:00
HUANG Wei
63410491b7 Fix typo, should be input instead of output. 2016-03-18 10:06:44 +08:00
Cameron Sparr
26e0a4bbde Merge pull request #882 from VasuBalakrishnan/master
Fixed SQL Server Plugin issues #881
2016-03-17 19:41:42 -06:00
Balakrishnan
c356e56522 Updated Change log #881 2016-03-17 19:56:39 -04:00
Cameron Sparr
fd26bbbd0b Merge pull request #883 from ljagiello/minor-changelog-fix
Duplicated line
2016-03-17 17:09:08 -06:00
Lukasz Jagiello
7aa55371b5 Duplicate line 2016-03-17 15:54:22 -07:00
Balakrishnan
ba06533c3e Fixed SQL Server Plugin issues #881 2016-03-17 18:01:19 -04:00
Marcelo Salazar
d66d66e74b added json serializer
closes #878
2016-03-17 13:51:16 -06:00
Jonathan Chauncey
d6b5f3efe6 fix(prometheus): Add support for bearer token to prometheus input plugin
closes #864
merges #880
2016-03-17 13:47:22 -06:00
Cameron Sparr
b5a431624b Close UDP listener in udp_listener plugin
also adding waitgroups to udp_listener and statsd plugins to verify that
all goroutines have been cleaned up before Stop() exits.

closes #869
2016-03-17 10:51:35 -06:00
HUANG Wei
8e7284de5a fixup! Close the UDP connection in Stop() of statsd input plugin. 2016-03-17 10:51:35 -06:00
HUANG Wei
b2d38cd31c Close the UDP connection in Stop() of statsd input plugin.
If not, when doing reload, we may listen to the same port, we'll get
error about listen to already used address.
2016-03-17 10:51:35 -06:00
Cameron Sparr
a15fed35b7 Merge pull request #875 from Onefootball/feature/link-freebsd-package
Add FreeBSD tarball location to README
2016-03-17 10:51:21 -06:00
Dirk Pahl
eee6b0059c Add FreeBSD tarball location to README 2016-03-17 16:53:55 +01:00
Eugene Dementiev
530b4f3bee [amqp output] Allow external auth (cert-based tls auth)
closes #863
2016-03-16 19:03:41 -06:00
Thibault Cohen
bac1c223de Improve prometheus plugin
closes #707
2016-03-16 19:00:06 -06:00
marknmel
57f7582b4d Cleanup of Exec Inputs documentation - redux
Hi @sparrc

(Sorry for the noise - new pr)

closes #853

Please find some improvements to readability including the \n for the exec/telegraf line-protocol input.

I hope you (and others) find it easier to read.

/Mark

This is an ammend
2016-03-16 18:55:48 -06:00
Cameron Sparr
5afe819ebd Changelog update for 0.11.1 2016-03-16 14:55:40 -06:00
Cameron Sparr
59568f5311 Release 0.11.1 2016-03-16 14:45:35 -06:00
Cameron Sparr
822706367b provide args for telegraf for consistency with influxd:
- telegraf version
- telegraf config

closes #857
2016-03-16 14:22:01 -06:00
HUANG Wei
f8e9fafda3 Add reload configuration for telegraf service scripts.
closes #794
2016-03-16 11:20:46 -06:00
Cameron Sparr
c2bb9db012 Changelog update 2016-03-16 11:20:28 -06:00
Pierre Fersing
e4e7d7fbfc Improved install script for packaged telegraf:
* Start/stop service on Debian/Ubuntu
* Disable init-script/Systemd-unit on package removal

closes #747
2016-03-16 11:17:28 -06:00
Cameron Sparr
035e4cf90a Fix bug with httpjson client pointer receiver
fixes #859
2016-03-16 10:57:15 -06:00
Cameron Sparr
18fff4a3f5 Merge pull request #858 from LordFPL/patch-1
(very) Little error in changelog
2016-03-16 09:28:30 -06:00
Ross McDonald
675b6dc305 Corrected issue with windows builds where the correct configuration
and filesystem would be used.

closes #852
closes #854
2016-03-16 09:27:09 -06:00
LordFPL
4071c78b2b (very) Little error in changelog
We are not going in the past, no ? ;)
2016-03-16 08:57:33 +01:00
Cameron Sparr
2cb32a683e README fixes for 0.11.0 2016-03-15 11:19:40 +00:00
690 changed files with 93269 additions and 11934 deletions

4
.gitattributes vendored Normal file
View File

@@ -0,0 +1,4 @@
CHANGELOG.md merge=union
README.md merge=union
plugins/inputs/all/all.go merge=union
plugins/outputs/all/all.go merge=union

44
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,44 @@
## Directions
GitHub Issues are reserved for actionable bug reports and feature requests.
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
Erase the other section and everything on and above this line.
*Please note, the quickest way to fix a bug is to open a Pull Request.*
## Bug report
### Relevant telegraf.conf:
### System info:
[Include Telegraf version, operating system name, and other relevant details]
### Steps to reproduce:
1. ...
2. ...
### Expected behavior:
### Actual behavior:
### Additional info:
[Include gist of relevant config, logs, etc.]
## Feature Request
Opening a feature request kicks off a discussion.
### Proposal:
### Current behavior:
### Desired behavior:
### Use case: [Why is this important (helps with prioritizing requests)]

5
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,5 @@
### Required for all PRs:
- [ ] Signed [CLA](https://influxdata.com/community/cla/).
- [ ] Associated README.md updated.
- [ ] Has appropriate unit tests.

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
build
tivan
.vagrant
/telegraf

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
1. [Sign the CLA](http://influxdb.com/community/cla.html)
1. Make changes or write plugin (see below for details)
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go`
1. If your plugin requires a new Go package,
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
1. Write a README for your plugin, if it's an input plugin, it should be structured
@@ -11,11 +11,13 @@ Output plugins READMEs are less structured,
but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
## GoDoc
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
on the GoDoc
Public interfaces for inputs, outputs, processors, aggregators, metrics,
and the accumulator can be found on the GoDoc
[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf)
@@ -30,7 +32,7 @@ Assuming you can already build the project, run these in the telegraf directory:
1. `go get github.com/sparrc/gdm`
1. `gdm restore`
1. `gdm save`
1. `GOOS=linux gdm save`
## Input Plugins
@@ -44,13 +46,13 @@ and submit new inputs.
### Input Plugin Guidelines
* A plugin must conform to the `telegraf.Input` interface.
* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface.
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
plugin can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this plugin does.
Let's say you've written a plugin that emits metrics about processes on the
@@ -82,9 +84,9 @@ func (s *Simple) SampleConfig() string {
func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
} else {
acc.Add("state", "not great", nil)
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
}
return nil
@@ -95,6 +97,13 @@ func init() {
}
```
## Adding Typed Metrics
In addition the the `AddFields` function, the accumulator also supports an
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
metrics. Metric types are ignored for the InfluxDB output, but can be used
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
## Input Plugins Accepting Arbitrary Data Formats
Some input plugins (such as
@@ -114,8 +123,8 @@ creating the `Parser` object.
You should also add the following to your SampleConfig() return:
```toml
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
@@ -168,13 +177,13 @@ similar constructs.
### Output Plugin Guidelines
* An output must conform to the `outputs.Output` interface.
* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
output can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this output does.
### Output Example
@@ -212,8 +221,8 @@ func (s *Simple) Close() error {
}
func (s *Simple) Write(metrics []telegraf.Metric) error {
for _, pt := range points {
// write `pt` to the output sink here
for _, metric := range metrics {
// write `metric` to the output sink here
}
return nil
}
@@ -244,8 +253,8 @@ instantiating and creating the `Serializer` object.
You should also add the following to your SampleConfig() return:
```toml
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
@@ -266,35 +275,210 @@ and `Stop()` methods.
* Same as the `Output` guidelines, except that they must conform to the
`output.ServiceOutput` interface.
## Processor Plugins
This section is for developers who want to create a new processor plugin.
### Processor Plugin Guidelines
* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface.
* Processors should call `processors.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in the output of `telegraf config`.
* The `Description` function should say in one line what this processor does.
### Processor Example
```go
package printer
// printer.go
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
type Printer struct {
}
var sampleConfig = `
`
func (p *Printer) SampleConfig() string {
return sampleConfig
}
func (p *Printer) Description() string {
return "Print all metrics that pass through this filter."
}
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
fmt.Println(metric.String())
}
return in
}
func init() {
processors.Add("printer", func() telegraf.Processor {
return &Printer{}
})
}
```
## Aggregator Plugins
This section is for developers who want to create a new aggregator plugin.
### Aggregator Plugin Guidelines
* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface.
* Aggregators should call `aggregators.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
aggregator can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of each
metric.
* When the `Reset()` function is called, all caches should be cleared.
### Aggregator Example
```go
package min
// min.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type Min struct {
// caches for metric fields, names, and tags
fieldCache map[uint64]map[string]float64
nameCache map[uint64]string
tagCache map[uint64]map[string]string
}
func NewMin() telegraf.Aggregator {
m := &Min{}
m.Reset()
return m
}
var sampleConfig = `
## period is the flush & clear interval of the aggregator.
period = "30s"
## If true drop_original will drop the original metrics and
## only send aggregates.
drop_original = false
`
func (m *Min) SampleConfig() string {
return sampleConfig
}
func (m *Min) Description() string {
return "Keep the aggregate min of each metric passing through."
}
func (m *Min) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.nameCache[id]; !ok {
// hit an uncached metric, create caches for first time:
m.nameCache[id] = in.Name()
m.tagCache[id] = in.Tags()
m.fieldCache[id] = make(map[string]float64)
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
m.fieldCache[id][k] = fv
}
}
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.fieldCache[id][k]; !ok {
// hit an uncached field of a cached metric
m.fieldCache[id][k] = fv
continue
}
if fv < m.fieldCache[id][k] {
// set new minimum
m.fieldCache[id][k] = fv
}
}
}
}
}
func (m *Min) Push(acc telegraf.Accumulator) {
for id, _ := range m.nameCache {
fields := map[string]interface{}{}
for k, v := range m.fieldCache[id] {
fields[k+"_min"] = v
}
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
}
}
func (m *Min) Reset() {
m.fieldCache = make(map[uint64]map[string]float64)
m.nameCache = make(map[uint64]string)
m.tagCache = make(map[uint64]map[string]string)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("min", func() telegraf.Aggregator {
return NewMin()
})
}
```
## Unit Tests
Before opening a pull request you should run the linter checks and
the short tests.
### Execute linter
execute `make lint`
### Execute short tests
execute `make test-short`
execute `make test`
### Execute long tests
### Execute integration tests
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
Running the integration tests requires several docker containers to be
running. You can start the containers with:
```
make docker-run
```
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
a simple mock will suffice.
And run the full test suite with:
```
make test-all
```
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
The Makefile will assume that you have a `docker-machine` box called `default` to
get the IP address.
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running
Use `make docker-kill` to stop the containers.

134
Godeps
View File

@@ -1,54 +1,92 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6

View File

@@ -1,53 +0,0 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4

175
Makefile
View File

@@ -1,108 +1,127 @@
UNAME := $(shell sh -c 'uname')
VERSION := $(shell sh -c 'git describe --always --tags')
PREFIX := /usr/local
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
COMMIT := $(shell git rev-parse --short HEAD)
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
endif
# Standard Telegraf build
default: prepare build
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
# Windows build
windows: prepare-windows build-windows
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
endif
# Only run the build (no dependency grabbing)
build:
go install -ldflags "-X main.Version=$(VERSION)" ./...
all:
$(MAKE) deps
$(MAKE) telegraf
build-windows:
go build -o telegraf.exe -ldflags \
"-X main.Version=$(VERSION)" \
./cmd/telegraf/telegraf.go
build-for-docker:
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.Version=$(VERSION)" \
./cmd/telegraf/telegraf.go
# Build with race detector
dev: prepare
go build -race -ldflags "-X main.Version=$(VERSION)" ./...
# run package script
package:
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
# Get dependencies and use gdm to checkout changesets
prepare:
deps:
go get github.com/sparrc/gdm
gdm restore
# Use the windows godeps file to prepare dependencies
prepare-windows:
go get github.com/sparrc/gdm
gdm restore -f Godeps_windows
telegraf:
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
# Run all docker containers necessary for unit tests
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
install: telegraf
mkdir -p $(DESTDIR)$(PREFIX)/bin/
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
test:
go test -short ./...
test-windows:
go test ./plugins/inputs/ping/...
go test ./plugins/inputs/win_perf_counters/...
go test ./plugins/inputs/win_services/...
lint:
go vet ./...
test-all: lint
go test ./...
package:
./scripts/build.py --package --platform=all --arch=all
clean:
-rm -f telegraf
-rm -f telegraf.exe
docker-image:
./scripts/build.py --package --platform=linux --arch=amd64
cp build/telegraf*$(COMMIT)*.deb .
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
# Run all docker containers necessary for integration tests
docker-run:
ifeq ($(UNAME), Darwin)
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
docker run --name kafka \
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
endif
ifeq ($(UNAME), Linux)
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
endif
--link zookeeper:zookeeper \
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
-e KAFKA_ADVERTISED_PORT=9092 \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CREATE_TOPICS="test:1:1" \
-p "9092:9092" \
-d wurstmeister/kafka
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
docker run --name memcached -p "11211:11211" -d memcached
docker run --name postgres -p "5432:5432" -d postgres
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
docker run --name redis -p "6379:6379" -d redis
docker run --name aerospike -p "3000:3000" -d aerospike
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
docker run --name nats -p "4222:4222" -d nats
docker run --name openldap \
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
docker run --name cratedb \
-p "6543:5432" \
-d crate:2.2 \
-Cnetwork.host=0.0.0.0 \
-Ctransport.host=localhost \
-Clicense.enterprise=false
# Run docker containers necessary for CircleCI unit tests
# Run docker containers necessary for integration tests; skipping services provided
# by CircleCI
docker-run-circle:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
docker run --name aerospike -p "3000:3000" -d aerospike
--link zookeeper:zookeeper \
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
-e KAFKA_ADVERTISED_PORT=9092 \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CREATE_TOPICS="test:1:1" \
-p "9092:9092" \
-d wurstmeister/kafka
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
docker run --name nats -p "4222:4222" -d nats
docker run --name openldap \
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
# Kill all docker containers, ignore errors
docker-kill:
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper cratedb
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper cratedb
# Run full unit tests using docker containers (includes setup and teardown)
test: vet docker-kill docker-run
# Sleeping for kafka leadership election, TSDB setup, etc.
sleep 60
# SUCCESS, running tests
go test -race ./...
# Run "short" unit tests
test-short: vet
go test -short ./...
vet:
go vet ./...
.PHONY: test test-short vet build default
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
package clean docker-run docker-run-circle docker-kill docker-image

449
README.md
View File

@@ -1,212 +1,218 @@
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf)
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/)
Telegraf is an agent written in Go for collecting metrics from the system it's
running on, or from other services, and writing them into InfluxDB or other
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
Telegraf is an agent written in Go for collecting, processing, aggregating,
and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from well known services (like Hadoop, Postgres, or Redis) and third party
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
from local or remote services.
New input and output plugins are designed to be easy to contribute,
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
4. [Output Plugins](#output-plugins) write metrics to various destinations
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
New plugins are designed to be easy to contribute,
we'll eagerly accept pull
requests and will manage the set of plugins that Telegraf supports.
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
new plugins.
## Contributing
There are many ways to contribute:
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
- [Contribute plugins](CONTRIBUTING.md)
## Installation:
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions
of telegraf, both in the database layout and the configuration file. 0.2.x
will continue to be supported, see below for download links.
For more details on the differences between Telegraf 0.2.x and 0.10.x, see
the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/).
### Linux deb and rpm Packages:
Latest:
* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.x86_64.rpm
Latest (arm):
* http://get.influxdb.org/telegraf/telegraf_0.11.0-1_arm.deb
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1.arm.rpm
0.2.x:
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
##### Package Instructions:
* Telegraf binary is installed in `/usr/bin/telegraf`
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
* On sysv systems, the telegraf daemon can be controlled via
`service telegraf [action]`
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
controlled via `systemctl [action] telegraf`
### yum/apt Repositories:
There is a yum/apt repo available for the whole InfluxData stack, see
[here](https://docs.influxdata.com/influxdb/v0.9/introduction/installation/#installation)
for instructions, replacing the `influxdb` package name with `telegraf`.
### Linux tarballs:
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_amd64.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_i386.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_linux_arm.tar.gz
0.2.x:
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
##### tarball Instructions:
To install the full directory structure with config file, run:
```
sudo tar -C / -zxvf ./telegraf-0.11.0-1_linux_amd64.tar.gz
```
To extract only the binary, run:
```
tar -zxvf telegraf-0.11.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
```
You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page
or from the [releases](https://github.com/influxdata/telegraf/releases) section.
### Ansible Role:
Ansible role: https://github.com/rossmcdonald/telegraf
### OSX via Homebrew:
```
brew update
brew install telegraf
```
### Windows Binaries (EXPERIMENTAL)
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_amd64.zip
* http://get.influxdb.org/telegraf/telegraf-0.11.0-1_windows_i386.zip
### From Source:
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
which gets installed via the Makefile
if you don't have it already. You also must build with golang version 1.5+.
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
which is installed by the Makefile if you don't have it already.
1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
3. Run `go get github.com/influxdata/telegraf`
3. Run `go get -d github.com/influxdata/telegraf`
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
### Nightly Builds
These builds are generated from the master branch:
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb)
- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm)
- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb)
- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm)
- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz)
- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz)
- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb)
- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm)
- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz)
- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz)
- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz)
- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz)
- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz)
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz)
## How to use it:
```console
$ telegraf -help
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
See usage with:
Usage:
telegraf <flags>
The flags are:
-config <file> configuration file to load
-test gather metrics once, print them to stdout, and exit
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-output-filter filter the output plugins to enable, separator is :
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-debug print metrics as they're generated to stdout
-quiet run in quiet mode
-version print the version to stdout
Examples:
# generate a telegraf config file:
telegraf -sample-config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf -sample-config -input-filter cpu -output-filter influxdb
# run a single telegraf collection, outputing metrics to stdout
telegraf -config telegraf.conf -test
# run telegraf with all plugins defined in config file
telegraf -config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
```
./telegraf --help
```
#### Generate a telegraf config file:
```
./telegraf config > telegraf.conf
```
#### Generate config with only cpu input & influxdb output plugins defined:
```
./telegraf --input-filter cpu --output-filter influxdb config
```
#### Run a single telegraf collection, outputing metrics to stdout:
```
./telegraf --config telegraf.conf --test
```
#### Run telegraf with all plugins defined in config file:
```
./telegraf --config telegraf.conf
```
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
```
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
```
## Configuration
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
configuration options.
## Supported Input Plugins
## Input Plugins
Telegraf currently has support for collecting metrics from many sources. For
more information on each, please look at the directory of the same name in
`plugins/inputs`.
Currently implemented sources:
* aerospike
* apache
* bcache
* couchdb
* disque
* dns query time
* docker
* dovecot
* elasticsearch
* exec (generic executable plugin, support JSON, influx and graphite)
* haproxy
* httpjson (generic JSON-emitting http service plugin)
* influxdb
* jolokia
* leofs
* lustre2
* mailchimp
* memcached
* mesos
* mongodb
* mysql
* net_response
* nginx
* nsq
* phpfpm
* phusion passenger
* ping
* postgresql
* powerdns
* procstat
* prometheus
* puppetagent
* rabbitmq
* raindrops
* redis
* rethinkdb
* riak
* sensors (only available if built from source)
* snmp
* sql server (microsoft)
* twemproxy
* zfs
* zookeeper
* win_perf_counters (windows performance counters)
* system
* [aerospike](./plugins/inputs/aerospike)
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
* [apache](./plugins/inputs/apache)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
* [bond](./plugins/inputs/bond)
* [cassandra](./plugins/inputs/cassandra)
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
* [chrony](./plugins/inputs/chrony)
* [consul](./plugins/inputs/consul)
* [conntrack](./plugins/inputs/conntrack)
* [couchbase](./plugins/inputs/couchbase)
* [couchdb](./plugins/inputs/couchdb)
* [DC/OS](./plugins/inputs/dcos)
* [disque](./plugins/inputs/disque)
* [dmcache](./plugins/inputs/dmcache)
* [dns query time](./plugins/inputs/dns_query)
* [docker](./plugins/inputs/docker)
* [dovecot](./plugins/inputs/dovecot)
* [elasticsearch](./plugins/inputs/elasticsearch)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [fail2ban](./plugins/inputs/fail2ban)
* [filestat](./plugins/inputs/filestat)
* [fluentd](./plugins/inputs/fluentd)
* [graylog](./plugins/inputs/graylog)
* [haproxy](./plugins/inputs/haproxy)
* [hddtemp](./plugins/inputs/hddtemp)
* [http_response](./plugins/inputs/http_response)
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [internal](./plugins/inputs/internal)
* [influxdb](./plugins/inputs/influxdb)
* [interrupts](./plugins/inputs/interrupts)
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
* [iptables](./plugins/inputs/iptables)
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [jolokia2](./plugins/inputs/jolokia2)
* [kapacitor](./plugins/inputs/kapacitor)
* [kubernetes](./plugins/inputs/kubernetes)
* [leofs](./plugins/inputs/leofs)
* [lustre2](./plugins/inputs/lustre2)
* [mailchimp](./plugins/inputs/mailchimp)
* [memcached](./plugins/inputs/memcached)
* [mesos](./plugins/inputs/mesos)
* [minecraft](./plugins/inputs/minecraft)
* [mongodb](./plugins/inputs/mongodb)
* [mysql](./plugins/inputs/mysql)
* [net_response](./plugins/inputs/net_response)
* [nginx](./plugins/inputs/nginx)
* [nginx_plus](./plugins/inputs/nginx_plus)
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [openldap](./plugins/inputs/openldap)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [pf](./plugins/inputs/pf)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
* [postfix](./plugins/inputs/postfix)
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [postgresql](./plugins/inputs/postgresql)
* [powerdns](./plugins/inputs/powerdns)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
* [puppetagent](./plugins/inputs/puppetagent)
* [rabbitmq](./plugins/inputs/rabbitmq)
* [raindrops](./plugins/inputs/raindrops)
* [redis](./plugins/inputs/redis)
* [rethinkdb](./plugins/inputs/rethinkdb)
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
* [smart](./plugins/inputs/smart)
* [snmp](./plugins/inputs/snmp)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [solr](./plugins/inputs/solr)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [teamspeak](./plugins/inputs/teamspeak)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [unbound](./plugins/input/unbound)
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu
* mem
* net
@@ -216,39 +222,76 @@ Currently implemented sources:
* swap
* processes
* kernel (/proc/stat)
* kernel (/proc/vmstat)
* linux_sysctl_fs (/proc/sys/fs)
Telegraf can also collect metrics via the following service plugins:
* statsd
* udp_listener
* tcp_listener
* mqtt_consumer
* kafka_consumer
* nats_consumer
* github_webhooks
* [http_listener](./plugins/inputs/http_listener)
* [kafka_consumer](./plugins/inputs/kafka_consumer)
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
* [nats_consumer](./plugins/inputs/nats_consumer)
* [nsq_consumer](./plugins/inputs/nsq_consumer)
* [logparser](./plugins/inputs/logparser)
* [statsd](./plugins/inputs/statsd)
* [socket_listener](./plugins/inputs/socket_listener)
* [tail](./plugins/inputs/tail)
* [tcp_listener](./plugins/inputs/socket_listener)
* [udp_listener](./plugins/inputs/socket_listener)
* [webhooks](./plugins/inputs/webhooks)
* [filestack](./plugins/inputs/webhooks/filestack)
* [github](./plugins/inputs/webhooks/github)
* [mandrill](./plugins/inputs/webhooks/mandrill)
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [particle](./plugins/inputs/webhooks/particle)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [zipkin](./plugins/inputs/zipkin)
We'll be adding support for many more over the coming months. Read on if you
want to add support for another service or third-party API.
Telegraf is able to parse the following input data formats into metrics, these
formats may be used with input plugins supporting the `data_format` option:
## Supported Output Plugins
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
* influxdb
* amon
* amqp
* aws kinesis
* aws cloudwatch
* datadog
* graphite
* kafka
* librato
* mqtt
* nsq
* opentsdb
* prometheus
* riemann
## Processor Plugins
## Contributing
* [printer](./plugins/processors/printer)
Please see the
[contributing guide](CONTRIBUTING.md)
for details on contributing a plugin to Telegraf.
## Aggregator Plugins
* [basicstats](./plugins/aggregators/basicstats)
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
## Output Plugins
* [influxdb](./plugins/outputs/influxdb)
* [amon](./plugins/outputs/amon)
* [amqp](./plugins/outputs/amqp) (rabbitmq)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [cratedb](./plugins/outputs/cratedb)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
* [elasticsearch](./plugins/outputs/elasticsearch)
* [file](./plugins/outputs/file)
* [graphite](./plugins/outputs/graphite)
* [graylog](./plugins/outputs/graylog)
* [instrumental](./plugins/outputs/instrumental)
* [kafka](./plugins/outputs/kafka)
* [librato](./plugins/outputs/librato)
* [mqtt](./plugins/outputs/mqtt)
* [nats](./plugins/outputs/nats)
* [nsq](./plugins/outputs/nsq)
* [opentsdb](./plugins/outputs/opentsdb)
* [prometheus](./plugins/outputs/prometheus_client)
* [riemann](./plugins/outputs/riemann)
* [riemann_legacy](./plugins/outputs/riemann_legacy)
* [socket_writer](./plugins/outputs/socket_writer)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
* [wavefront](./plugins/outputs/wavefront)

View File

@@ -2,20 +2,45 @@ package telegraf
import "time"
// Accumulator is an interface for "accumulating" metrics from plugin(s).
// The metrics are sent down a channel shared between all plugins.
type Accumulator interface {
// AddFields adds a metric to the accumulator with the given measurement
// name, fields, and tags (and timestamp). If a timestamp is not provided,
// then the accumulator sets it to "now".
// Create a point with a value, decorating it with tags
// NOTE: tags is expected to be owned by the caller, don't mutate
// it after passing to Add.
Add(measurement string,
value interface{},
tags map[string]string,
t ...time.Time)
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
Debug() bool
SetDebug(enabled bool)
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
AddGauge(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
AddCounter(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
AddSummary(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
AddHistogram(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
SetPrecision(precision, interval time.Duration)
AddError(err error)
}

View File

@@ -1,54 +1,46 @@
package agent
import (
"fmt"
"log"
"math"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/selfstat"
)
var (
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
)
type MetricMaker interface {
Name() string
MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric
}
func NewAccumulator(
inputConfig *internal_models.InputConfig,
maker MetricMaker,
metrics chan telegraf.Metric,
) *accumulator {
acc := accumulator{}
acc.metrics = metrics
acc.inputConfig = inputConfig
acc := accumulator{
maker: maker,
metrics: metrics,
precision: time.Nanosecond,
}
return &acc
}
type accumulator struct {
sync.Mutex
metrics chan telegraf.Metric
defaultTags map[string]string
maker MetricMaker
debug bool
inputConfig *internal_models.InputConfig
prefix string
}
func (ac *accumulator) Add(
measurement string,
value interface{},
tags map[string]string,
t ...time.Time,
) {
fields := make(map[string]interface{})
fields["value"] = value
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
return
}
ac.AddFields(measurement, fields, tags, t...)
precision time.Duration
}
func (ac *accumulator) AddFields(
@@ -57,121 +49,93 @@ func (ac *accumulator) AddFields(
tags map[string]string,
t ...time.Time,
) {
if len(fields) == 0 || len(measurement) == 0 {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddGauge(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddCounter(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddSummary(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddHistogram(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
// AddError passes a runtime error to the accumulator.
// The error will be tagged with the plugin name and written to the log.
func (ac *accumulator) AddError(err error) {
if err == nil {
return
}
NErrors.Incr(1)
//TODO suppress/throttle consecutive duplicate errors?
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
}
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
return
}
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
return
}
// Override measurement name if set
if len(ac.inputConfig.NameOverride) != 0 {
measurement = ac.inputConfig.NameOverride
}
// Apply measurement prefix and suffix if set
if len(ac.inputConfig.MeasurementPrefix) != 0 {
measurement = ac.inputConfig.MeasurementPrefix + measurement
}
if len(ac.inputConfig.MeasurementSuffix) != 0 {
measurement = measurement + ac.inputConfig.MeasurementSuffix
}
if tags == nil {
tags = make(map[string]string)
}
// Apply plugin-wide tags if set
for k, v := range ac.inputConfig.Tags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply daemon-wide tags if set
for k, v := range ac.defaultTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
result := make(map[string]interface{})
for k, v := range fields {
// Filter out any filtered fields
if ac.inputConfig != nil {
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
continue
}
}
// Validate uint64 and float64 fields
switch val := v.(type) {
case uint64:
// InfluxDB does not support writing uint64
if val < uint64(9223372036854775808) {
result[k] = int64(val)
} else {
result[k] = int64(9223372036854775807)
}
continue
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
if ac.debug {
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
}
continue
}
}
result[k] = v
}
fields = nil
if len(result) == 0 {
// SetPrecision takes two time.Duration objects. If the first is non-zero,
// it sets that as the precision. Otherwise, it takes the second argument
// as the order of time that the metrics should be rounded to, with the
// maximum being 1s.
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
if precision > 0 {
ac.precision = precision
return
}
switch {
case interval >= time.Second:
ac.precision = time.Second
case interval >= time.Millisecond:
ac.precision = time.Millisecond
case interval >= time.Microsecond:
ac.precision = time.Microsecond
default:
ac.precision = time.Nanosecond
}
}
func (ac accumulator) getTime(t []time.Time) time.Time {
var timestamp time.Time
if len(t) > 0 {
timestamp = t[0]
} else {
timestamp = time.Now()
}
if ac.prefix != "" {
measurement = ac.prefix + measurement
}
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return
}
if ac.debug {
fmt.Println("> " + m.String())
}
ac.metrics <- m
}
func (ac *accumulator) Debug() bool {
return ac.debug
}
func (ac *accumulator) SetDebug(debug bool) {
ac.debug = debug
}
func (ac *accumulator) setDefaultTags(tags map[string]string) {
ac.defaultTags = tags
}
func (ac *accumulator) addDefaultTag(key, value string) {
if ac.defaultTags == nil {
ac.defaultTags = make(map[string]string)
}
ac.defaultTags[key] = value
return timestamp.Round(ac.precision)
}

View File

@@ -1,27 +1,119 @@
package agent
import (
"bytes"
"fmt"
"math"
"log"
"os"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAdd(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
}
func TestAddFields(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
fields := map[string]interface{}{
"usage": float64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99")
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99")
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
actual)
}
func TestAccAddError(t *testing.T) {
errBuf := bytes.NewBuffer(nil)
log.SetOutput(errBuf)
defer log.SetOutput(os.Stderr)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddError(fmt.Errorf("foo"))
a.AddError(fmt.Errorf("bar"))
a.AddError(fmt.Errorf("baz"))
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
assert.EqualValues(t, int64(3), NErrors.Get())
require.Len(t, errs, 4) // 4 because of trailing newline
assert.Contains(t, string(errs[0]), "TestPlugin")
assert.Contains(t, string(errs[0]), "foo")
assert.Contains(t, string(errs[1]), "TestPlugin")
assert.Contains(t, string(errs[1]), "bar")
assert.Contains(t, string(errs[2]), "TestPlugin")
assert.Contains(t, string(errs[2]), "baz")
}
func TestAddNoIntervalWithPrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
@@ -34,269 +126,215 @@ func TestAdd(t *testing.T) {
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
}
func TestAddDefaultTags(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
func TestAddDisablePrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.SetPrecision(time.Nanosecond, 0)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,default=tag value=101")
assert.Contains(t, actual, "acctest value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
actual)
}
func TestAddFields(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
func TestAddNoPrecisionWithInterval(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
fields := map[string]interface{}{
"usage": float64(99),
a.SetPrecision(0, time.Second)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
}
func TestDifferentPrecisions(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
a.SetPrecision(0, time.Millisecond)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
actual)
a.SetPrecision(0, time.Microsecond)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
actual)
a.SetPrecision(0, time.Nanosecond)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
actual)
}
func TestAddGauge(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Gauge)
}
func TestAddCounter(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Counter)
}
type TestMetricMaker struct {
}
func (tm *TestMetricMaker) Name() string {
return "TestPlugin"
}
func (tm *TestMetricMaker) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
switch mType {
case telegraf.Untyped:
if m, err := metric.New(measurement, tags, fields, t); err == nil {
return m
}
case telegraf.Counter:
if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil {
return m
}
case telegraf.Gauge:
if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil {
return m
}
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
actual)
}
// Test that all Inf fields get dropped, and not added to metrics channel
func TestAddInfFields(t *testing.T) {
inf := math.Inf(1)
ninf := math.Inf(-1)
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": inf,
"nusage": ninf,
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
assert.Len(t, a.metrics, 0)
// test that non-inf fields are kept and not dropped
fields["notinf"] = float64(100)
a.AddFields("acctest", fields, map[string]string{})
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest notinf=100")
}
// Test that nan fields are dropped and not added
func TestAddNaNFields(t *testing.T) {
nan := math.NaN()
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": nan,
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
assert.Len(t, a.metrics, 0)
// test that non-nan fields are kept and not dropped
fields["notnan"] = float64(100)
a.AddFields("acctest", fields, map[string]string{})
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest notnan=100")
}
func TestAddUint64Fields(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": uint64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
actual)
}
func TestAddUint64Overflow(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
fields := map[string]interface{}{
"usage": uint64(9223372036854775808),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
actual)
}
func TestAddInts(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", int(101), map[string]string{})
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,default=tag value=101i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
actual)
}
func TestAddFloats(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
actual)
}
func TestAddStrings(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", "test", map[string]string{"acc": "test"})
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
actual)
}
func TestAddBools(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &internal_models.InputConfig{}
a.Add("acctest", true, map[string]string{"acc": "test"})
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
actual)
return nil
}

View File

@@ -1,19 +1,18 @@
package agent
import (
cryptorand "crypto/rand"
"fmt"
"log"
"math/big"
"math/rand"
"os"
"runtime"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/selfstat"
)
// Agent runs telegraf and collects data based on the given config
@@ -27,40 +26,38 @@ func NewAgent(config *config.Config) (*Agent, error) {
Config: config,
}
if a.Config.Agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, err
if !a.Config.Agent.OmitHostname {
if a.Config.Agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
a.Config.Agent.Hostname = hostname
}
a.Config.Agent.Hostname = hostname
config.Tags["host"] = a.Config.Agent.Hostname
}
config.Tags["host"] = a.Config.Agent.Hostname
return a, nil
}
// Connect connects to all configured outputs
func (a *Agent) Connect() error {
for _, o := range a.Config.Outputs {
o.Quiet = a.Config.Agent.Quiet
switch ot := o.Output.(type) {
case telegraf.ServiceOutput:
if err := ot.Start(); err != nil {
log.Printf("Service for output %s failed to start, exiting\n%s\n",
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
o.Name, err.Error())
return err
}
}
if a.Config.Agent.Debug {
log.Printf("Attempting connection to output: %s\n", o.Name)
}
log.Printf("D! Attempting connection to output: %s\n", o.Name)
err := o.Output.Connect()
if err != nil {
log.Printf("Failed to connect to output %s, retrying in 15s, "+
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
"error was '%s' \n", o.Name, err)
time.Sleep(15 * time.Second)
err = o.Output.Connect()
@@ -68,9 +65,7 @@ func (a *Agent) Connect() error {
return err
}
}
if a.Config.Agent.Debug {
log.Printf("Successfully connected to output: %s\n", o.Name)
}
log.Printf("D! Successfully connected to output: %s\n", o.Name)
}
return nil
}
@@ -88,115 +83,94 @@ func (a *Agent) Close() error {
return err
}
func panicRecover(input *internal_models.RunningInput) {
func panicRecover(input *models.RunningInput) {
if err := recover(); err != nil {
trace := make([]byte, 2048)
runtime.Stack(trace, true)
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
input.Name, err, trace)
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
input.Name(), err, trace)
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
"stack trace, configuration, and OS information: " +
"https://github.com/influxdata/telegraf/issues/new")
}
}
// gatherParallel runs the inputs that are using the same reporting interval
// as the telegraf agent.
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
var wg sync.WaitGroup
start := time.Now()
counter := 0
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
for _, input := range a.Config.Inputs {
if input.Config.Interval != 0 {
continue
}
wg.Add(1)
counter++
go func(input *internal_models.RunningInput) {
defer panicRecover(input)
defer wg.Done()
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
acc.setDefaultTags(a.Config.Tags)
if jitter != 0 {
nanoSleep := rand.Int63n(jitter)
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
if err != nil {
log.Printf("Jittering collection interval failed for plugin %s",
input.Name)
} else {
time.Sleep(d)
}
}
if err := input.Input.Gather(acc); err != nil {
log.Printf("Error in input [%s]: %s", input.Name, err)
}
}(input)
}
if counter == 0 {
return nil
}
wg.Wait()
elapsed := time.Since(start)
if !a.Config.Agent.Quiet {
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
a.Config.Agent.Interval.Duration, counter, elapsed)
}
return nil
}
// gatherSeparate runs the inputs that have been configured with their own
// gatherer runs the inputs that have been configured with their own
// reporting interval.
func (a *Agent) gatherSeparate(
func (a *Agent) gatherer(
shutdown chan struct{},
input *internal_models.RunningInput,
input *models.RunningInput,
interval time.Duration,
metricC chan telegraf.Metric,
) error {
) {
defer panicRecover(input)
ticker := time.NewTicker(input.Config.Interval)
GatherTime := selfstat.RegisterTiming("gather",
"gather_time_ns",
map[string]string{"input": input.Config.Name},
)
acc := NewAccumulator(input, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
var outerr error
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
start := time.Now()
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
acc.setDefaultTags(a.Config.Tags)
if err := input.Input.Gather(acc); err != nil {
log.Printf("Error in input [%s]: %s", input.Name, err)
}
gatherWithTimeout(shutdown, input, acc, interval)
elapsed := time.Since(start)
if !a.Config.Agent.Quiet {
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
input.Config.Interval, input.Name, elapsed)
}
if outerr != nil {
return outerr
}
GatherTime.Incr(elapsed.Nanoseconds())
select {
case <-shutdown:
return nil
return
case <-ticker.C:
continue
}
}
}
// gatherWithTimeout gathers from the given input, with the given timeout.
// when the given timeout is reached, gatherWithTimeout logs an error message
// but continues waiting for it to return. This is to avoid leaving behind
// hung processes, and to prevent re-calling the same hung process over and
// over.
func gatherWithTimeout(
shutdown chan struct{},
input *models.RunningInput,
acc *accumulator,
timeout time.Duration,
) {
ticker := time.NewTicker(timeout)
defer ticker.Stop()
done := make(chan error)
go func() {
done <- input.Input.Gather(acc)
}()
for {
select {
case err := <-done:
if err != nil {
acc.AddError(err)
}
return
case <-ticker.C:
err := fmt.Errorf("took longer to collect than collection interval (%s)",
timeout)
acc.AddError(err)
continue
case <-shutdown:
return
}
}
}
// Test verifies that we can 'Gather' from all inputs with their configured
// Config struct
func (a *Agent) Test() error {
@@ -217,10 +191,19 @@ func (a *Agent) Test() error {
}()
for _, input := range a.Config.Inputs {
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(true)
if _, ok := input.Input.(telegraf.ServiceInput); ok {
fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n",
input.Name())
continue
}
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
acc := NewAccumulator(input, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
input.SetTrace(true)
input.SetDefaultTags(a.Config.Tags)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
if input.Config.Interval != 0 {
fmt.Printf("* Internal: %s\n", input.Config.Interval)
}
@@ -231,10 +214,10 @@ func (a *Agent) Test() error {
// Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages.
switch input.Name {
case "cpu", "mongodb", "procstat":
switch input.Name() {
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
if err := input.Input.Gather(acc); err != nil {
return err
}
@@ -250,11 +233,11 @@ func (a *Agent) flush() {
wg.Add(len(a.Config.Outputs))
for _, o := range a.Config.Outputs {
go func(output *internal_models.RunningOutput) {
go func(output *models.RunningOutput) {
defer wg.Done()
err := output.Write()
if err != nil {
log.Printf("Error writing to output [%s]: %s\n",
log.Printf("E! Error writing to output [%s]: %s\n",
output.Name, err.Error())
}
}(o)
@@ -264,76 +247,141 @@ func (a *Agent) flush() {
}
// flusher monitors the metrics input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 200)
time.Sleep(time.Millisecond * 300)
// create an output metric channel and a gorouting that continuously passes
// each metric onto the output plugins & aggregators.
outMetricC := make(chan telegraf.Metric, 100)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-shutdown:
if len(outMetricC) > 0 {
// keep going until outMetricC is flushed
continue
}
return
case m := <-outMetricC:
// if dropOriginal is set to true, then we will only send this
// metric to the aggregators, not the outputs.
var dropOriginal bool
if !m.IsAggregate() {
for _, agg := range a.Config.Aggregators {
if ok := agg.Add(m.Copy()); ok {
dropOriginal = true
}
}
}
if !dropOriginal {
for i, o := range a.Config.Outputs {
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(m.Copy())
}
}
}
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-shutdown:
if len(aggC) > 0 {
// keep going until aggC is flushed
continue
}
return
case metric := <-aggC:
metrics := []telegraf.Metric{metric}
for _, processor := range a.Config.Processors {
metrics = processor.Apply(metrics...)
}
for _, m := range metrics {
for i, o := range a.Config.Outputs {
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(m.Copy())
}
}
}
}
}
}()
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
semaphore := make(chan struct{}, 1)
for {
select {
case <-shutdown:
log.Println("Hang on, flushing any cached metrics before shutdown")
log.Println("I! Hang on, flushing any cached metrics before shutdown")
// wait for outMetricC to get flushed before flushing outputs
wg.Wait()
a.flush()
return nil
case <-ticker.C:
a.flush()
case m := <-metricC:
for _, o := range a.Config.Outputs {
o.AddMetric(m)
go func() {
select {
case semaphore <- struct{}{}:
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
a.flush()
<-semaphore
default:
// skipping this flush because one is already happening
log.Println("W! Skipping a scheduled flush because there is" +
" already a flush ongoing.")
}
}()
case metric := <-metricC:
// NOTE potential bottleneck here as we put each metric through the
// processors serially.
mS := []telegraf.Metric{metric}
for _, processor := range a.Config.Processors {
mS = processor.Apply(mS...)
}
for _, m := range mS {
outMetricC <- m
}
}
}
}
// jitterInterval applies the the interval jitter to the flush interval using
// crypto/rand number generator
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
var jitter int64
outinterval := ininterval
if injitter.Nanoseconds() != 0 {
maxjitter := big.NewInt(injitter.Nanoseconds())
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
jitter = j.Int64()
}
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
}
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
outinterval = time.Duration(500 * time.Millisecond)
}
return outinterval
}
// Run runs the agent daemon, gathering every Interval
func (a *Agent) Run(shutdown chan struct{}) error {
var wg sync.WaitGroup
a.Config.Agent.FlushInterval.Duration = jitterInterval(
a.Config.Agent.FlushInterval.Duration,
a.Config.Agent.FlushJitter.Duration)
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
"Flush Interval:%s \n",
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating metrics
metricC := make(chan telegraf.Metric, 10000)
metricC := make(chan telegraf.Metric, 100)
aggC := make(chan telegraf.Metric, 100)
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
// Start service of any ServicePlugins
input.SetDefaultTags(a.Config.Tags)
switch p := input.Input.(type) {
case telegraf.ServiceInput:
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
acc.setDefaultTags(a.Config.Tags)
acc := NewAccumulator(input, metricC)
// Service input plugins should set their own precision of their
// metrics.
acc.SetPrecision(time.Nanosecond, 0)
if err := p.Start(acc); err != nil {
log.Printf("Service for input %s failed to start, exiting\n%s\n",
input.Name, err.Error())
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
input.Name(), err.Error())
return err
}
defer p.Stop()
@@ -345,43 +393,41 @@ func (a *Agent) Run(shutdown chan struct{}) error {
i := int64(a.Config.Agent.Interval.Duration)
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
}
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil {
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
if err := a.flusher(shutdown, metricC, aggC); err != nil {
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
}()
wg.Add(len(a.Config.Aggregators))
for _, aggregator := range a.Config.Aggregators {
go func(agg *models.RunningAggregator) {
defer wg.Done()
acc := NewAccumulator(agg, aggC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
agg.Run(acc, shutdown)
}(aggregator)
}
wg.Add(len(a.Config.Inputs))
for _, input := range a.Config.Inputs {
// Special handling for inputs that have their own collection interval
// configured. Default intervals are handled below with gatherParallel
interval := a.Config.Agent.Interval.Duration
// overwrite global interval if this plugin has it's own.
if input.Config.Interval != 0 {
wg.Add(1)
go func(input *internal_models.RunningInput) {
defer wg.Done()
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
log.Printf(err.Error())
}
}(input)
interval = input.Config.Interval
}
go func(in *models.RunningInput, interv time.Duration) {
defer wg.Done()
a.gatherer(shutdown, in, interv, metricC)
}(input, interval)
}
defer wg.Wait()
for {
if err := a.gatherParallel(metricC); err != nil {
log.Printf(err.Error())
}
select {
case <-shutdown:
return nil
case <-ticker.C:
continue
}
}
wg.Wait()
a.Close()
return nil
}

View File

@@ -1,9 +1,7 @@
package agent
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/influxdata/telegraf/internal/config"
@@ -11,8 +9,18 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/all"
// needing to load the outputs
_ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/stretchr/testify/assert"
)
func TestAgent_OmitHostname(t *testing.T) {
c := config.NewConfig()
c.Agent.OmitHostname = true
_, err := NewAgent(c)
assert.NoError(t, err)
assert.NotContains(t, c.Tags, "host")
}
func TestAgent_LoadPlugin(t *testing.T) {
c := config.NewConfig()
c.InputFilters = []string{"mysql"}
@@ -101,75 +109,3 @@ func TestAgent_LoadOutput(t *testing.T) {
a, _ = NewAgent(c)
assert.Equal(t, 3, len(a.Config.Outputs))
}
func TestAgent_ZeroJitter(t *testing.T) {
flushinterval := jitterInterval(time.Duration(10*time.Second),
time.Duration(0*time.Second))
actual := flushinterval.Nanoseconds()
exp := time.Duration(10 * time.Second).Nanoseconds()
if actual != exp {
t.Errorf("Actual %v, expected %v", actual, exp)
}
}
func TestAgent_ZeroInterval(t *testing.T) {
min := time.Duration(500 * time.Millisecond).Nanoseconds()
max := time.Duration(5 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
flushinterval := jitterInterval(time.Duration(0*time.Second),
time.Duration(5*time.Second))
actual := flushinterval.Nanoseconds()
if actual > max {
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
break
}
if actual < min {
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
break
}
}
}
func TestAgent_ZeroBoth(t *testing.T) {
flushinterval := jitterInterval(time.Duration(0*time.Second),
time.Duration(0*time.Second))
actual := flushinterval
exp := time.Duration(500 * time.Millisecond)
if actual != exp {
t.Errorf("Actual %v, expected %v", actual, exp)
}
}
func TestAgent_JitterMax(t *testing.T) {
max := time.Duration(32 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
flushinterval := jitterInterval(time.Duration(30*time.Second),
time.Duration(2*time.Second))
actual := flushinterval.Nanoseconds()
if actual > max {
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
break
}
}
}
func TestAgent_JitterMin(t *testing.T) {
min := time.Duration(30 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
flushinterval := jitterInterval(time.Duration(30*time.Second),
time.Duration(2*time.Second))
actual := flushinterval.Nanoseconds()
if actual < min {
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
break
}
}
}

22
aggregator.go Normal file
View File

@@ -0,0 +1,22 @@
package telegraf
// Aggregator is an interface for implementing an Aggregator plugin.
// the RunningAggregator wraps this interface and guarantees that
// Add, Push, and Reset can not be called concurrently, so locking is not
// required when implementing an Aggregator plugin.
type Aggregator interface {
// SampleConfig returns the default configuration of the Input.
SampleConfig() string
// Description returns a one-sentence description on the Input.
Description() string
// Add the metric to the aggregator.
Add(in Metric)
// Push pushes the current aggregates to the accumulator.
Push(acc Accumulator)
// Reset resets the aggregators caches and aggregates.
Reset()
}

33
appveyor.yml Normal file
View File

@@ -0,0 +1,33 @@
image: Previous Visual Studio 2015
version: "{build}"
cache:
- C:\Cache
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
environment:
GOPATH: C:\gopath
platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version
- go env
build_script:
- cmd: C:\GnuWin32\bin\make
test_script:
- cmd: C:\GnuWin32\bin\make test-windows
artifacts:
- path: telegraf.exe

View File

@@ -1,20 +1,18 @@
machine:
services:
- docker
- memcached
- redis
- rabbitmq-server
post:
- sudo service zookeeper stop
- go version
- go version | grep 1.5.3 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.5.3.linux-amd64.tar.gz
- sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.9.4.linux-amd64.tar.gz
- go version
dependencies:
override:
- docker info
post:
- gem install fpm
- sudo apt-get install -y rpm python-boto
test:
override:

View File

@@ -4,21 +4,30 @@ import (
"flag"
"fmt"
"log"
"net/http"
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/logger"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
"github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
_ "github.com/influxdata/telegraf/plugins/processors/all"
"github.com/kardianos/service"
)
var fDebug = flag.Bool("debug", false,
"show metrics as they're generated to stdout")
"turn on debug logging")
var pprofAddr = flag.String("pprof-addr", "",
"pprof address to listen on, not activate pprof if empty")
var fQuiet = flag.Bool("quiet", false,
"run in quiet mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
@@ -32,225 +41,191 @@ var fPidfile = flag.String("pidfile", "", "file to write our pid to")
var fInputFilters = flag.String("input-filter", "",
"filter the inputs to enable, separator is :")
var fInputList = flag.Bool("input-list", false,
"print available output plugins.")
"print available input plugins.")
var fOutputFilters = flag.String("output-filter", "",
"filter the outputs to enable, separator is :")
var fOutputList = flag.Bool("output-list", false,
"print available output plugins.")
var fAggregatorFilters = flag.String("aggregator-filter", "",
"filter the aggregators to enable, separator is :")
var fProcessorFilters = flag.String("processor-filter", "",
"filter the processors to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
var fInputFiltersLegacy = flag.String("filter", "",
"filter the inputs to enable, separator is :")
var fOutputFiltersLegacy = flag.String("outputfilter", "",
"filter the outputs to enable, separator is :")
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
"directory containing additional *.conf files")
"print usage for a plugin, ie, 'telegraf --usage mysql'")
var fService = flag.String("service", "",
"operate on the service")
// Telegraf version
// -ldflags "-X main.Version=`git describe --always --tags`"
var Version string
var (
nextVersion = "1.5.0"
version string
commit string
branch string
)
func init() {
// If commit or branch are not set, make that clear.
if commit == "" {
commit = "unknown"
}
if branch == "" {
branch = "unknown"
}
}
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf <flags>
telegraf [commands|flags]
The flags are:
The commands & flags are:
-config <file> configuration file to load
-test gather metrics once, print them to stdout, and exit
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-input-list print all the plugins inputs
-output-filter filter the output plugins to enable, separator is :
-output-list print all the available outputs
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-debug print metrics as they're generated to stdout
-quiet run in quiet mode
-version print the version to stdout
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
--quiet run in quiet mode
Examples:
# generate a telegraf config file:
telegraf -sample-config > telegraf.conf
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf -sample-config -input-filter cpu -output-filter influxdb
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf -config telegraf.conf -test
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf -config telegraf.conf
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
# run telegraf with pprof
telegraf --config telegraf.conf --pprof-addr localhost:6060
`
func main() {
var stop chan struct{}
func reloadLoop(
stop chan struct{},
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
reload := make(chan bool, 1)
reload <- true
for <-reload {
reload <- false
flag.Usage = func() { usageExit(0) }
flag.Parse()
if flag.NFlag() == 0 {
usageExit(0)
}
var inputFilters []string
if *fInputFiltersLegacy != "" {
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
inputFilters = strings.Split(":"+inputFilter+":", ":")
}
if *fInputFilters != "" {
inputFilter := strings.TrimSpace(*fInputFilters)
inputFilters = strings.Split(":"+inputFilter+":", ":")
}
var outputFilters []string
if *fOutputFiltersLegacy != "" {
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
if *fOutputFilters != "" {
outputFilter := strings.TrimSpace(*fOutputFilters)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
if *fOutputList {
fmt.Println("Available Output Plugins:")
for k, _ := range outputs.Outputs {
fmt.Printf(" %s\n", k)
}
return
}
if *fInputList {
fmt.Println("Available Input Plugins:")
for k, _ := range inputs.Inputs {
fmt.Printf(" %s\n", k)
}
return
}
if *fVersion {
v := fmt.Sprintf("Telegraf - Version %s", Version)
fmt.Println(v)
return
}
if *fSampleConfig {
config.PrintSampleConfig(inputFilters, outputFilters)
return
}
if *fUsage != "" {
if err := config.PrintInputConfig(*fUsage); err != nil {
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
log.Fatalf("%s and %s", err, err2)
}
}
return
}
var (
c *config.Config
err error
)
if *fConfig != "" {
c = config.NewConfig()
c.OutputFilters = outputFilters
c.InputFilters = inputFilters
err = c.LoadConfig(*fConfig)
if err != nil {
log.Fatal(err)
}
} else {
fmt.Println("You must specify a config file. See telegraf --help")
os.Exit(1)
}
if *fConfigDirectoryLegacy != "" {
err = c.LoadDirectory(*fConfigDirectoryLegacy)
if err != nil {
log.Fatal(err)
}
// If no other options are specified, load the config file and run.
c := config.NewConfig()
c.OutputFilters = outputFilters
c.InputFilters = inputFilters
err := c.LoadConfig(*fConfig)
if err != nil {
log.Fatal("E! " + err.Error())
}
if *fConfigDirectory != "" {
err = c.LoadDirectory(*fConfigDirectory)
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
}
if len(c.Outputs) == 0 {
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
if !*fTest && len(c.Outputs) == 0 {
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
}
if len(c.Inputs) == 0 {
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
}
if int64(c.Agent.Interval.Duration) <= 0 {
log.Fatalf("E! Agent interval must be positive, found %s",
c.Agent.Interval.Duration)
}
if int64(c.Agent.FlushInterval.Duration) <= 0 {
log.Fatalf("E! Agent flush_interval must be positive; found %s",
c.Agent.Interval.Duration)
}
ag, err := agent.NewAgent(c)
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
if *fDebug {
ag.Config.Agent.Debug = true
}
if *fQuiet {
ag.Config.Agent.Quiet = true
}
// Setup logging
logger.SetupLogging(
ag.Config.Agent.Debug || *fDebug,
ag.Config.Agent.Quiet || *fQuiet,
ag.Config.Agent.Logfile,
)
if *fTest {
err = ag.Test()
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
return
os.Exit(0)
}
err = ag.Connect()
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
shutdown := make(chan struct{})
signals := make(chan os.Signal)
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
go func() {
sig := <-signals
if sig == os.Interrupt {
close(shutdown)
}
if sig == syscall.SIGHUP {
log.Printf("Reloading Telegraf config\n")
<-reload
reload <- true
select {
case sig := <-signals:
if sig == os.Interrupt {
close(shutdown)
}
if sig == syscall.SIGHUP {
log.Printf("I! Reloading Telegraf config\n")
<-reload
reload <- true
close(shutdown)
}
case <-stop:
close(shutdown)
}
}()
log.Printf("Starting Telegraf (version %s)\n", Version)
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("Tags enabled: %s", c.ListTags())
log.Printf("I! Starting Telegraf %s\n", displayVersion())
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("I! Tags enabled: %s", c.ListTags())
if *fPidfile != "" {
f, err := os.Create(*fPidfile)
f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatalf("Unable to create pidfile: %s", err)
log.Printf("E! Unable to create pidfile: %s", err)
} else {
fmt.Fprintf(f, "%d\n", os.Getpid())
f.Close()
defer func() {
err := os.Remove(*fPidfile)
if err != nil {
log.Printf("E! Unable to remove pidfile: %s", err)
}
}()
}
fmt.Fprintf(f, "%d\n", os.Getpid())
f.Close()
}
ag.Run(shutdown)
@@ -261,3 +236,175 @@ func usageExit(rc int) {
fmt.Println(usage)
os.Exit(rc)
}
type program struct {
inputFilters []string
outputFilters []string
aggregatorFilters []string
processorFilters []string
}
func (p *program) Start(s service.Service) error {
go p.run()
return nil
}
func (p *program) run() {
stop = make(chan struct{})
reloadLoop(
stop,
p.inputFilters,
p.outputFilters,
p.aggregatorFilters,
p.processorFilters,
)
}
func (p *program) Stop(s service.Service) error {
close(stop)
return nil
}
func displayVersion() string {
if version == "" {
return fmt.Sprintf("v%s~%s", nextVersion, commit)
}
return "v" + version
}
func main() {
flag.Usage = func() { usageExit(0) }
flag.Parse()
args := flag.Args()
inputFilters, outputFilters := []string{}, []string{}
if *fInputFilters != "" {
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
}
if *fOutputFilters != "" {
outputFilters = strings.Split(":"+strings.TrimSpace(*fOutputFilters)+":", ":")
}
aggregatorFilters, processorFilters := []string{}, []string{}
if *fAggregatorFilters != "" {
aggregatorFilters = strings.Split(":"+strings.TrimSpace(*fAggregatorFilters)+":", ":")
}
if *fProcessorFilters != "" {
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
}
if *pprofAddr != "" {
go func() {
pprofHostPort := *pprofAddr
parts := strings.Split(pprofHostPort, ":")
if len(parts) == 2 && parts[0] == "" {
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
}
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
log.Fatal("E! " + err.Error())
}
}()
}
if len(args) > 0 {
switch args[0] {
case "version":
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case "config":
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
}
}
// switch for flags which just do something and exit immediately
switch {
case *fOutputList:
fmt.Println("Available Output Plugins:")
for k, _ := range outputs.Outputs {
fmt.Printf(" %s\n", k)
}
return
case *fInputList:
fmt.Println("Available Input Plugins:")
for k, _ := range inputs.Inputs {
fmt.Printf(" %s\n", k)
}
return
case *fVersion:
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case *fSampleConfig:
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
case *fUsage != "":
err := config.PrintInputConfig(*fUsage)
err2 := config.PrintOutputConfig(*fUsage)
if err != nil && err2 != nil {
log.Fatalf("E! %s and %s", err, err2)
}
return
}
if runtime.GOOS == "windows" {
svcConfig := &service.Config{
Name: "telegraf",
DisplayName: "Telegraf Data Collector Service",
Description: "Collects data using a series of plugins and publishes it to" +
"another series of plugins.",
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
}
prg := &program{
inputFilters: inputFilters,
outputFilters: outputFilters,
aggregatorFilters: aggregatorFilters,
processorFilters: processorFilters,
}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal("E! " + err.Error())
}
// Handle the -service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if *fConfig != "" {
(*svcConfig).Arguments = []string{"-config", *fConfig}
}
if *fConfigDirectory != "" {
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
}
err := service.Control(s, *fService)
if err != nil {
log.Fatal("E! " + err.Error())
}
os.Exit(0)
} else {
err = s.Run()
if err != nil {
log.Println("E! " + err.Error())
}
}
} else {
stop = make(chan struct{})
reloadLoop(
stop,
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
}
}

View File

@@ -0,0 +1,64 @@
# Telegraf Aggregator & Processor Plugins
As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugins.
These plugins sit in-between Input & Output plugins, aggregating and processing
metrics as they pass through Telegraf:
```
┌───────────┐
│ │
│ CPU │───┐
│ │ │
└───────────┘ │
┌───────────┐ │ ┌───────────┐
│ │ │ │ │
│ Memory │───┤ ┌──▶│ InfluxDB │
│ │ │ │ │ │
└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘
│ │ │ │Aggregate │ │
┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐
│ │ │ │ - transform │ │ - quantiles │ │ │ │
│ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │
│ │ │ │ - filter │ │ - count │ │ │ │
└───────────┘ │ │ │ │ │ │ └───────────┘
│ └─────────────┘ └─────────────┘ │
┌───────────┐ │ │ ┌───────────┐
│ │ │ │ │ │
│ SNMP │───┤ └──▶│ Kafka │
│ │ │ │ │
└───────────┘ │ └───────────┘
┌───────────┐ │
│ │ │
│ Docker │───┘
│ │
└───────────┘
```
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
Use [measurement filtering](CONFIGURATION.md#measurement-filtering)
to control which metrics are passed through a processor or aggregator. If a
metric is filtered out the metric bypasses the plugin and is passed downstream
to the next plugin.
**Processor** plugins process metrics as they pass through and immediately emit
results based on the values they process. For example, this could be printing
all metrics or adding a tag to all metrics that pass through.
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
are typically for emitting new _aggregate_ metrics, such as a running mean,
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
plugins are configured with a `period`. The `period` is the size of the window
of metrics that each _aggregate_ represents. In other words, the emitted
_aggregate_ metric will be the aggregated value of the past `period` seconds.
Since many users will only care about their aggregates and not every single metric
gathered, there is also a `drop_original` argument, which tells Telegraf to only
emit the aggregates and not the original metrics.
**NOTE** That since aggregators only aggregate metrics within their period, that
historical data is not supported. In other words, if your metric timestamp is more
than `now() - period` in the past, it will not be aggregated. If this is a feature
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)

View File

@@ -1,30 +1,65 @@
# Telegraf Configuration
You can see the latest config file with all available plugins here:
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
## Generating a Configuration File
A default Telegraf config file can be generated using the -sample-config flag:
`telegraf -sample-config > telegraf.conf`
A default Telegraf config file can be auto-generated by telegraf:
```
telegraf config > telegraf.conf
```
To generate a file with specific inputs and outputs, you can use the
-input-filter and -output-filter flags:
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
--input-filter and --output-filter flags:
## `[global_tags]` Configuration
```
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
```
Global tags can be specific in the `[global_tags]` section of the config file in
key="value" format. All metrics being gathered on this host will be tagged
## Environment Variables
Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
When using the `.deb` or `.rpm` packages, you can define environment variables
in the `/etc/default/telegraf` file.
## Configuration file locations
The location of the configuration file can be set via the `--config` command
line flag.
When the `--config-directory` command line flag is used files ending with
`.conf` in the specified directory will also be included in the Telegraf
configuration.
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
configuration files.
# Global Tags
Global tags can be specified in the `[global_tags]` section of the config file
in key="value" format. All metrics being gathered on this host will be tagged
with the tags specified here.
## `[agent]` Configuration
## Agent Configuration
Telegraf has a few options you can configure under the `agent` section of the
Telegraf has a few options you can configure under the `[agent]` section of the
config.
* **interval**: Default data collection interval for all inputs
* **round_interval**: Rounds collection interval to 'interval'
ie, if interval="10s" then always collect on :00, :10, :20, etc.
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
most metric_batch_size metrics.
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
for each output, and will flush this buffer on a successful write.
This should be a multiple of metric_batch_size and could not be less
than 2 times metric_batch_size.
* **collection_jitter**: Collection jitter is used to jitter
the collection by a random amount.
Each plugin will sleep for a random time within jitter before collecting.
@@ -37,40 +72,114 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
This is primarily to avoid
large write spikes for users running a large number of telegraf instances.
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
* **precision**:
By default or when set to "0s", precision will be set to the same
timestamp order as the collection interval, with the maximum being 1s.
Precision will NOT be used for service inputs. It is up to each individual
service input to set the timestamp at the appropriate precision.
Valid time units are "ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stderr.
* **debug**: Run telegraf in debug mode.
* **quiet**: Run telegraf in quiet mode.
* **quiet**: Run telegraf in quiet mode (error messages only).
* **hostname**: Override default hostname, if empty use os.Hostname().
* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent.
## `[inputs.xxx]` Configuration
## Input Configuration
There are some configuration options that are configurable per input:
The following config parameters are available for all inputs:
* **interval**: How often to gather this metric. Normal plugins use a single
global interval, but if one particular input should be run less or more often,
you can configure that here.
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
* **interval**: How often to gather this metric. Normal plugins use a single
global interval, but if one particular input should be run less or more often,
you can configure that here.
#### Input Filters
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the input plugin.
There are also filters that can be configured per input:
## Output Configuration
* **namepass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against
measurement names and if it matches, the field is emitted.
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
* **fieldpass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against field names
and if it matches, the field is emitted.
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
* **tagpass**: tag names and arrays of strings that are used to filter
measurements by the current input. Each string in the array is tested as a glob
match against the tag name, and if it matches the measurement is emitted.
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
emitted. This is tested on measurements that have passed the tagpass test.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the output plugin.
## Aggregator Configuration
The following config parameters are available for all aggregators:
* **period**: The period on which to flush & clear each aggregator. All metrics
that are sent with timestamps outside of this period will be ignored by the
aggregator.
* **delay**: The delay before each aggregator is flushed. This is to control
how long for aggregators to wait before receiving metrics from input plugins,
in the case that aggregators are flushing and inputs are gathering on the
same interval.
* **drop_original**: If true, the original metric will be dropped by the
aggregator and will not get sent to the output plugins.
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters be used to
limit what metrics are handled by the aggregator. Excluded metrics are passed
downstream to the next aggregator.
## Processor Configuration
The following config parameters are available for all processors:
* **order**: This is the order in which the processor(s) get executed. If this
is not specified then processor execution order will be random.
The [measurement filtering](#measurement-filtering) can parameters may be used
to limit what metrics are handled by the processor. Excluded metrics are
passed downstream to the next processor.
#### Measurement Filtering
Filters can be configured per input, output, processor, or aggregator,
see below for examples.
* **namepass**:
An array of glob pattern strings. Only points whose measurement name matches
a pattern in this list are emitted.
* **namedrop**:
The inverse of `namepass`. If a match is found the point is discarded. This
is tested on points after they have passed the `namepass` test.
* **fieldpass**:
An array of glob pattern strings. Only fields whose field key matches a
pattern in this list are emitted. Not available for outputs.
* **fielddrop**:
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the point. This is tested on points after
they have passed the `fieldpass` test. Not available for outputs.
* **tagpass**:
A table mapping tag keys to arrays of glob pattern strings. Only points
that contain a tag key in the table and a tag value matching one of its
patterns is emitted.
* **tagdrop**:
The inverse of `tagpass`. If a match is found the point is discarded. This
is tested on points after they have passed the `tagpass` test.
* **taginclude**:
An array of glob pattern strings. Only tags with a tag key matching one of
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
point based on its tag, `taginclude` removes all non matching tags from the
point. This filter can be used on both inputs & outputs, but it is
_recommended_ to be used on inputs, as it is more efficient to filter out tags
at the ingestion point.
* **tagexclude**:
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
will be discarded from the point.
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
must be defined at the _end_ of the plugin definition, otherwise subsequent
plugin config options will be interpreted as part of the tagpass/tagdrop
tables.
#### Input Configuration Examples
@@ -90,7 +199,6 @@ fields which begin with `time_`.
[[outputs.influxdb]]
url = "http://192.168.59.103:8086" # required.
database = "telegraf" # required.
precision = "s"
# INPUTS
[[inputs.cpu]]
@@ -102,6 +210,10 @@ fields which begin with `time_`.
#### Input Config: tagpass and tagdrop
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
```toml
[[inputs.cpu]]
percpu = true
@@ -141,12 +253,26 @@ fields which begin with `time_`.
# Drop all metrics about containers for kubelet
[[inputs.prometheus]]
urls = ["http://kube-node-1:4194/metrics"]
namedrop = ["container_"]
namedrop = ["container_*"]
# Only store rest client related metrics for kubelet
[[inputs.prometheus]]
urls = ["http://kube-node-1:4194/metrics"]
namepass = ["rest_client_"]
namepass = ["rest_client_*"]
```
#### Input Config: taginclude and tagexclude
```toml
# Only include the "cpu" tag in the measurements for the cpu plugin.
[[inputs.cpu]]
percpu = true
totalcpu = true
taginclude = ["cpu"]
# Exclude the "fstype" tag from the measurements for the disk plugin.
[[inputs.disk]]
tagexclude = ["fstype"]
```
#### Input config: prefix, suffix, and override
@@ -174,6 +300,9 @@ This will emit measurements with the name `foobar`
This plugin will emit measurements with two additional tags: `tag1=foo` and
`tag2=bar`
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
plugin definition.
```toml
[[inputs.cpu]]
percpu = false
@@ -202,35 +331,73 @@ to avoid measurement collisions:
fielddrop = ["cpu_time*"]
```
## `[outputs.xxx]` Configuration
Telegraf also supports specifying multiple output sinks to send data to,
configuring each output sink is different, but examples can be
found by running `telegraf -sample-config`.
Outputs also support the same configurable options as inputs
(namepass, namedrop, tagpass, tagdrop)
#### Output Configuration Examples:
```toml
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf"
precision = "s"
# Drop all measurements that start with "aerospike"
namedrop = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-aerospike-data"
precision = "s"
# Only accept aerospike data:
namepass = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-cpu0-data"
precision = "s"
# Only store measurements where the tag "cpu" matches the value "cpu0"
[outputs.influxdb.tagpass]
cpu = ["cpu0"]
```
#### Aggregator Configuration Examples:
This will collect and emit the min/max of the system load1 metric every
30s, dropping the originals.
```toml
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
[[outputs.file]]
files = ["stdout"]
```
This will collect and emit the min/max of the swap metrics every
30s, dropping the originals. The aggregator will not be applied
to the system load metrics due to the `namepass` parameter.
```toml
[[inputs.swap]]
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
[[outputs.file]]
files = ["stdout"]
```
#### Processor Configuration Examples:
Print only the metrics with `cpu` as the measurement name, all metrics are
passed to the output:
```toml
[[processors.printer]]
namepass = "cpu"
[[outputs.file]]
files = ["/tmp/metrics.out"]
```

View File

@@ -1,5 +1,14 @@
# Telegraf Input Data Formats
Telegraf is able to parse the following input data formats into metrics:
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
are a combination of four basic parts:
@@ -31,8 +40,8 @@ example, in the exec plugin:
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -43,7 +52,7 @@ example, in the exec plugin:
Each data_format has an additional set of configuration options available, which
I'll go over below.
## Influx:
# Influx:
There are no additional configuration options for InfluxDB line-protocol. The
metrics are parsed directly into Telegraf metrics.
@@ -58,23 +67,28 @@ metrics are parsed directly into Telegraf metrics.
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```
## JSON:
# JSON:
The JSON data format flattens JSON into metric _fields_. For example, this JSON:
The JSON data format flattens JSON into metric _fields_.
NOTE: Only numerical values are converted to fields, and they are converted
into a float. strings are ignored unless specified as a tag_key (see below).
So for example, this JSON:
```json
{
"a": 5,
"b": {
"c": 6
}
},
"ignored": "I'm a string"
}
```
@@ -103,8 +117,8 @@ For example, if you had this configuration:
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -134,62 +148,191 @@ Your Telegraf metrics would get tagged with "my_tag_1"
exec_mycollector,my_tag_1=foo a=5,b_c=6
```
## Graphite:
If the JSON data is an array, then each element of the array is parsed with the configured settings.
Each resulting metric will be output with the same timestamp.
For example, if the following configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/usr/bin/mycollector --foo=bar"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
## List of tag names to extract from top-level of JSON server response
tag_keys = [
"my_tag_1",
"my_tag_2"
]
```
with this JSON output from a command:
```json
[
{
"a": 5,
"b": {
"c": 6
},
"my_tag_1": "foo",
"my_tag_2": "baz"
},
{
"a": 7,
"b": {
"c": 8
},
"my_tag_1": "bar",
"my_tag_2": "baz"
}
]
```
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
```
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
```
# Value:
The "value" data format translates single values into Telegraf metrics. This
is done by assigning a measurement name and setting a single field ("value")
as the parsed metric.
#### Value Configuration:
You **must** tell Telegraf what type of metric to collect by using the
`data_type` configuration option. Available options are:
1. integer
2. float or long
3. string
4. boolean
**Note:** It is also recommended that you set `name_override` to a measurement
name that makes sense for your metric, otherwise it will just be set to the
name of the plugin.
```toml
[[inputs.exec]]
## Commands array
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
## override the default metric name of "exec"
name_override = "entropy_available"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "value"
data_type = "integer" # required
```
# Graphite:
The Graphite data format translates graphite _dot_ buckets directly into
telegraf measurement names, with a single value field, and without any tags. For
more advanced options, Telegraf supports specifying "templates" to translate
telegraf measurement names, with a single value field, and without any tags.
By default, the separator is left as ".", but this can be changed using the
"separator" argument. For more advanced options,
Telegraf supports specifying "templates" to translate
graphite buckets into Telegraf metrics.
#### Separator:
You can specify a separator to use for the parsed metrics.
By default, it will leave the metrics with a "." separator.
Setting `separator = "_"` will translate:
Templates are of the form:
```
cpu.usage.idle 99
=> cpu_usage_idle value=99
"host.mytag.mytag.measurement.measurement.field*"
```
#### Measurement/Tag Templates:
Where the following keywords exist:
1. `measurement`: specifies that this section of the graphite bucket corresponds
to the measurement name. This can be specified multiple times.
2. `field`: specifies that this section of the graphite bucket corresponds
to the field name. This can be specified multiple times.
3. `measurement*`: specifies that all remaining elements of the graphite bucket
correspond to the measurement name.
4. `field*`: specifies that all remaining elements of the graphite bucket
correspond to the field name.
Any part of the template that is not a keyword is treated as a tag key. This
can also be specified multiple times.
NOTE: `field*` cannot be used in conjunction with `measurement*`!
#### Measurement & Tag Templates:
The most basic template is to specify a single transformation to apply to all
incoming metrics. _measurement_ is a special keyword that tells Telegraf which
parts of the graphite bucket to combine into the measurement name. It can have a
trailing `*` to indicate that the remainder of the metric should be used.
Other words are considered tag keys. So the following template:
incoming metrics. So the following template:
```toml
templates = [
"region.measurement*"
"region.region.measurement*"
]
```
would result in the following Graphite -> Telegraf transformation.
```
us-west.cpu.load 100
=> cpu.load,region=us-west value=100
us.west.cpu.load 100
=> cpu.load,region=us.west value=100
```
Multiple templates can also be specified, but these should be differentiated
using _filters_ (see below for more details)
```toml
templates = [
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
]
```
#### Field Templates:
There is also a _field_ keyword, which can only be specified once.
The field keyword tells Telegraf to give the metric that field name.
So the following template:
```toml
separator = "_"
templates = [
"measurement.measurement.field.region"
"measurement.measurement.field.field.region"
]
```
would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.idle.us-west 100
=> cpu_usage,region=us-west idle=100
cpu.usage.idle.percent.eu-east 100
=> cpu_usage,region=eu-east idle_percent=100
```
The field key can also be derived from all remaining elements of the graphite
bucket by specifying `field*`:
```toml
separator = "_"
templates = [
"measurement.measurement.region.field*"
]
```
which would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.eu-east.idle.percentage 100
=> cpu_usage,region=eu-east idle_percentage=100
```
#### Filter Templates:
@@ -207,8 +350,8 @@ templates = [
which would result in the following transformation:
```
cpu.load.us-west 100
=> cpu_load,region=us-west value=100
cpu.load.eu-east 100
=> cpu_load,region=eu-east value=100
mem.cached.localhost 256
=> mem_cached,host=localhost value=256
@@ -230,8 +373,8 @@ templates = [
would result in the following Graphite -> Telegraf transformation.
```
cpu.usage.idle.us-west 100
=> cpu_usage,region=us-west,datacenter=1a idle=100
cpu.usage.idle.eu-east 100
=> cpu_usage,region=eu-east,datacenter=1a idle=100
```
There are many more options available,
@@ -247,8 +390,8 @@ There are many more options available,
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx" or "graphite" (line-protocol)
## Each data format has it's own unique set of configuration options, read
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "graphite"
@@ -262,13 +405,77 @@ There are many more options available,
## similar to the line protocol format. There can be only one default template.
## Templates support below format:
## 1. filter + template
## 2. filter + template + extra tag
## 2. filter + template + extra tag(s)
## 3. filter + template with field key
## 4. default template
templates = [
"*.app env.service.resource.measurement",
"stats.* .host.measurement* region=us-west,agent=sensu",
"stats.* .host.measurement* region=eu-east,agent=sensu",
"stats2.* .host.measurement.field",
"measurement*"
]
```
# Nagios:
There are no additional configuration options for Nagios line-protocol. The
metrics are parsed directly into Telegraf metrics.
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
#### Nagios Configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "nagios"
```
# Collectd:
The collectd format parses the collectd binary network protocol. Tags are
created for host, instance, type, and type instance. All collectd values are
added as float64 fields.
For more information about the binary network protocol see
[here](https://collectd.org/wiki/index.php/Binary_protocol).
You can control the cryptographic settings with parser options. Create an
authentication file and set `collectd_auth_file` to the path of the file, then
set the desired security level in `collectd_security_level`.
Additional information including client setup can be found
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
You can also change the path to the typesdb or add additional typesdb using
`collectd_typesdb`.
#### Collectd Configuration:
```toml
[[inputs.socket_listener]]
service_address = "udp://127.0.0.1:25826"
name_prefix = "collectd_"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "collectd"
## Authentication file for cryptographic security levels
collectd_auth_file = "/etc/collectd/auth_file"
## One of none (default), sign, or encrypt
collectd_security_level = "encrypt"
## Path of to TypesDB specifications
collectd_typesdb = ["/usr/share/collectd/types.db"]
```

View File

@@ -1,5 +1,11 @@
# Telegraf Output Data Formats
Telegraf is able to serialize metrics into the following output data formats:
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
are a combination of four basic parts:
@@ -29,8 +35,8 @@ config option, for example, in the `file` output plugin:
## Files to write to, "stdout" is a specially handled file.
files = ["stdout"]
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
@@ -41,34 +47,46 @@ config option, for example, in the `file` output plugin:
Each data_format has an additional set of configuration options available, which
I'll go over below.
## Influx:
# Influx:
There are no additional configuration options for InfluxDB line-protocol. The
metrics are serialized directly into InfluxDB line-protocol.
#### Influx Configuration:
### Influx Configuration:
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
```
## Graphite:
# Graphite:
The Graphite data format translates Telegraf metrics into _dot_ buckets.
The format is:
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
template can be specified for the output of Telegraf metrics into Graphite
buckets. The default template is:
```
[prefix].[host tag].[all tags (alphabetical)].[measurement name].[field name] value timestamp
template = "host.tags.measurement.field"
```
In the above template, we have four parts:
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
tag value will be filled in.
1. _tags_ is a special keyword that outputs all remaining tag values, separated
by dots and in alphabetical order (by tag key). These will be filled after all
tag keys are filled.
1. _measurement_ is a special keyword that outputs the measurement name.
1. _field_ is a special keyword that outputs the field name.
Which means the following influx metric -> graphite conversion would happen:
```
@@ -78,20 +96,68 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
`prefix` is a configuration option when using the graphite output data format.
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
#### Graphite Configuration:
### Graphite Configuration:
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output. This can be "influx" or "graphite"
## Each data format has it's own unique set of configuration options, read
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
data_format = "graphite"
# prefix each graphite bucket
prefix = "telegraf"
# graphite template
template = "host.tags.measurement.field"
```
# JSON:
The JSON data format serialized Telegraf metrics in json format. The format is:
```json
{
"fields":{
"field_1":30,
"field_2":4,
"field_N":59,
"n_images":660
},
"name":"docker",
"tags":{
"host":"raynor"
},
"timestamp":1458229140
}
```
### JSON Configuration:
```toml
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
json_timestamp_units = "1ns"
```
By default, the timestamp that is output in JSON data format serialized Telegraf
metrics is in seconds. The precision of this timestamp can be adjusted for any output
by adding the optional `json_timestamp_units` parameter to the configuration for
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
output in hundredths of a second (`10ms`).

46
docs/FAQ.md Normal file
View File

@@ -0,0 +1,46 @@
# Frequently Asked Questions
### Q: How can I monitor the Docker Engine Host from within a container?
You will need to setup several volume mounts as well as some environment
variables:
```
docker run --name telegraf
-v /:/hostfs:ro
-v /etc:/hostfs/etc:ro
-v /proc:/hostfs/proc:ro
-v /sys:/hostfs/sys:ro
-v /var/run/utmp:/var/run/utmp:ro
-e HOST_ETC=/hostfs/etc
-e HOST_PROC=/hostfs/proc
-e HOST_SYS=/hostfs/sys
-e HOST_MOUNT_PREFIX=/hostfs
telegraf
```
### Q: Why do I get a "no such host" error resolving hostnames that other
programs can resolve?
Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution).
This resolver behaves differently than the C library functions but is more
efficient when used with the Go runtime.
If you encounter problems or want to use more advanced name resolution methods
that are unsupported by the pure Go resolver, you can switch to the cgo
resolver.
If running manually set:
```
export GODEBUG=netdns=cgo
```
If running as a service add the environment variable to `/etc/default/telegraf`:
```
GODEBUG=netdns=cgo
```
### Q: When will the next version be released?
The latest release date estimate can be viewed on the
[milestones](https://github.com/influxdata/telegraf/milestones) page.

View File

@@ -1,33 +1,104 @@
# List
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
# Licenses of dependencies
When distributed in a binary form, Telegraf may contain portions of the
following works:
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)

24
docs/PROFILING.md Normal file
View File

@@ -0,0 +1,24 @@
# Telegraf profiling
Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
By default, the profiling is turned off.
To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
```
telegraf --config telegraf.conf --pprof-addr localhost:6060
```
There are several paths to get different profiling information:
To look at the heap profile:
`go tool pprof http://localhost:6060/debug/pprof/heap`
or to look at a 30-second CPU profile:
`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.

45
docs/WINDOWS_SERVICE.md Normal file
View File

@@ -0,0 +1,45 @@
# Running Telegraf as a Windows Service
Telegraf natively supports running as a Windows Service. Outlined below is are
the general steps to set it up.
1. Obtain the telegraf windows distribution
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
location simply specify the `-config` parameter with the desired location)
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
```
> C:\"Program Files"\Telegraf\telegraf.exe --service install
```
5. Edit the configuration file to meet your needs
6. To check that it works, run:
```
> C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
```
7. To start collecting data, run:
```
> net start telegraf
```
## Other supported operations
Telegraf can manage its own service through the --service flag:
| Command | Effect |
|------------------------------------|-------------------------------|
| `telegraf.exe --service install` | Install telegraf as a service |
| `telegraf.exe --service uninstall` | Remove the telegraf service |
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
Troubleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
--config C:\"Program Files"\Telegraf\telegraf.conf

File diff suppressed because it is too large Load Diff

View File

@@ -42,10 +42,14 @@
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "/Program Files/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
@@ -59,8 +63,8 @@
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
@@ -85,7 +89,7 @@
# Windows Performance Counters plugin.
# These are the recommended method of monitoring system metrics on windows,
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
# which utilizes a lot of system resources.
# which utilize more system resources.
#
# See more configuration examples at:
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
@@ -95,70 +99,168 @@
# Processor usage, alternative to native, reports on a per core.
ObjectName = "Processor"
Instances = ["*"]
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
Counters = [
"% Idle Time",
"% Interrupt Time",
"% Privileged Time",
"% User Time",
"% Processor Time",
"% DPC Time",
]
Measurement = "win_cpu"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
IncludeTotal=true
[[inputs.win_perf_counters.object]]
# Disk times and queues
ObjectName = "LogicalDisk"
Instances = ["*"]
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
Counters = [
"% Idle Time",
"% Disk Time",
"% Disk Read Time",
"% Disk Write Time",
"Current Disk Queue Length",
"% Free Space",
"Free Megabytes",
]
Measurement = "win_disk"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
ObjectName = "PhysicalDisk"
Instances = ["*"]
Counters = [
"Disk Read Bytes/sec",
"Disk Write Bytes/sec",
"Current Disk Queue Length",
"Disk Reads/sec",
"Disk Writes/sec",
"% Disk Time",
"% Disk Read Time",
"% Disk Write Time",
]
Measurement = "win_diskio"
[[inputs.win_perf_counters.object]]
ObjectName = "Network Interface"
Instances = ["*"]
Counters = [
"Bytes Received/sec",
"Bytes Sent/sec",
"Packets Received/sec",
"Packets Sent/sec",
"Packets Received Discarded",
"Packets Outbound Discarded",
"Packets Received Errors",
"Packets Outbound Errors",
]
Measurement = "win_net"
[[inputs.win_perf_counters.object]]
ObjectName = "System"
Counters = ["Context Switches/sec","System Calls/sec"]
Counters = [
"Context Switches/sec",
"System Calls/sec",
"Processor Queue Length",
"System Up Time",
]
Instances = ["------"]
Measurement = "win_system"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
# Example query where the Instance portion must be removed to get data back,
# such as from the Memory object.
ObjectName = "Memory"
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
Counters = [
"Available Bytes",
"Cache Faults/sec",
"Demand Zero Faults/sec",
"Page Faults/sec",
"Pages/sec",
"Transition Faults/sec",
"Pool Nonpaged Bytes",
"Pool Paged Bytes",
"Standby Cache Reserve Bytes",
"Standby Cache Normal Priority Bytes",
"Standby Cache Core Bytes",
]
# Use 6 x - to remove the Instance bit from the query.
Instances = ["------"]
Measurement = "win_mem"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
# Example query where the Instance portion must be removed to get data back,
# such as from the Paging File object.
ObjectName = "Paging File"
Counters = [
"% Usage",
]
Instances = ["_Total"]
Measurement = "win_swap"
[[inputs.win_perf_counters.object]]
ObjectName = "Network Interface"
Instances = ["*"]
Counters = [
"Bytes Sent/sec",
"Bytes Received/sec",
"Packets Sent/sec",
"Packets Received/sec",
"Packets Received Discarded",
"Packets Received Errors",
"Packets Outbound Discarded",
"Packets Outbound Errors",
]
# Windows system plugins using WMI (disabled by default, using
# win_perf_counters over WMI is recommended)
# Read metrics about cpu usage
#[[inputs.cpu]]
## Whether to report per-cpu stats or not
#percpu = true
## Whether to report total system cpu stats or not
#totalcpu = true
## Comment this line if you want the raw CPU time metrics
#fielddrop = ["time_*"]
# # Read metrics about cpu usage
# [[inputs.cpu]]
# ## Whether to report per-cpu stats or not
# percpu = true
# ## Whether to report total system cpu stats or not
# totalcpu = true
# ## Comment this line if you want the raw CPU time metrics
# fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
#[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
## mount_points=["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
#ignore_fs = ["tmpfs", "devtmpfs"]
# # Read metrics about disk usage by mount point
# [[inputs.disk]]
# ## By default, telegraf gather stats for all mountpoints.
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
# ## mount_points=["/"]
#
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
# ## present on /run, /var/run, /dev/shm or /dev).
# # ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
#[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
## devices = ["sda", "sdb"]
## Uncomment the following line if you do not need disk serial numbers.
## skip_serial_number = true
# Read metrics about memory usage
#[[inputs.mem]]
# no configuration
# # Read metrics about disk IO by device
# [[inputs.diskio]]
# ## By default, telegraf will gather stats for all devices including
# ## disk partitions.
# ## Setting devices will restrict the stats to the specified devices.
# ## devices = ["sda", "sdb"]
# ## Uncomment the following line if you do not need disk serial numbers.
# ## skip_serial_number = true
# Read metrics about swap memory usage
#[[inputs.swap]]
# no configuration
# # Read metrics about memory usage
# [[inputs.mem]]
# # no configuration
# # Read metrics about swap memory usage
# [[inputs.swap]]
# # no configuration

116
filter/filter.go Normal file
View File

@@ -0,0 +1,116 @@
package filter
import (
"strings"
"github.com/gobwas/glob"
)
type Filter interface {
Match(string) bool
}
// Compile takes a list of string filters and returns a Filter interface
// for matching a given string against the filter list. The filter list
// supports glob matching too, ie:
//
// f, _ := Compile([]string{"cpu", "mem", "net*"})
// f.Match("cpu") // true
// f.Match("network") // true
// f.Match("memory") // false
//
func Compile(filters []string) (Filter, error) {
// return if there is nothing to compile
if len(filters) == 0 {
return nil, nil
}
// check if we can compile a non-glob filter
noGlob := true
for _, filter := range filters {
if hasMeta(filter) {
noGlob = false
break
}
}
switch {
case noGlob:
// return non-globbing filter if not needed.
return compileFilterNoGlob(filters), nil
case len(filters) == 1:
return glob.Compile(filters[0])
default:
return glob.Compile("{" + strings.Join(filters, ",") + "}")
}
}
// hasMeta reports whether path contains any magic glob characters.
func hasMeta(s string) bool {
return strings.IndexAny(s, "*?[") >= 0
}
type filter struct {
m map[string]struct{}
}
func (f *filter) Match(s string) bool {
_, ok := f.m[s]
return ok
}
type filtersingle struct {
s string
}
func (f *filtersingle) Match(s string) bool {
return f.s == s
}
func compileFilterNoGlob(filters []string) Filter {
if len(filters) == 1 {
return &filtersingle{s: filters[0]}
}
out := filter{m: make(map[string]struct{})}
for _, filter := range filters {
out.m[filter] = struct{}{}
}
return &out
}
type IncludeExcludeFilter struct {
include Filter
exclude Filter
}
func NewIncludeExcludeFilter(
include []string,
exclude []string,
) (Filter, error) {
in, err := Compile(include)
if err != nil {
return nil, err
}
ex, err := Compile(exclude)
if err != nil {
return nil, err
}
return &IncludeExcludeFilter{in, ex}, nil
}
func (f *IncludeExcludeFilter) Match(s string) bool {
if f.include != nil {
if !f.include.Match(s) {
return false
}
}
if f.exclude != nil {
if f.exclude.Match(s) {
return false
}
}
return true
}

96
filter/filter_test.go Normal file
View File

@@ -0,0 +1,96 @@
package filter
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCompile(t *testing.T) {
f, err := Compile([]string{})
assert.NoError(t, err)
assert.Nil(t, f)
f, err = Compile([]string{"cpu"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0"))
assert.False(t, f.Match("mem"))
f, err = Compile([]string{"cpu*"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.True(t, f.Match("cpu0"))
assert.False(t, f.Match("mem"))
f, err = Compile([]string{"cpu", "mem"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0"))
assert.True(t, f.Match("mem"))
f, err = Compile([]string{"cpu", "mem", "net*"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0"))
assert.True(t, f.Match("mem"))
assert.True(t, f.Match("network"))
}
var benchbool bool
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
f, _ := Compile([]string{"cpu"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("network")
}
benchbool = tmp
}
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
f, _ := Compile([]string{"cpu"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("cpu")
}
benchbool = tmp
}
func BenchmarkFilter(b *testing.B) {
f, _ := Compile([]string{"cpu", "mem", "net*"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("network")
}
benchbool = tmp
}
func BenchmarkFilterNoGlob(b *testing.B) {
f, _ := Compile([]string{"cpu", "mem", "net"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("net")
}
benchbool = tmp
}
func BenchmarkFilter2(b *testing.B) {
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("network")
}
benchbool = tmp
}
func BenchmarkFilter2NoGlob(b *testing.B) {
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("net")
}
benchbool = tmp
}

76
internal/buffer/buffer.go Normal file
View File

@@ -0,0 +1,76 @@
package buffer
import (
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/selfstat"
)
var (
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
)
// Buffer is an object for storing metrics in a circular buffer.
type Buffer struct {
buf chan telegraf.Metric
mu sync.Mutex
}
// NewBuffer returns a Buffer
// size is the maximum number of metrics that Buffer will cache. If Add is
// called when the buffer is full, then the oldest metric(s) will be dropped.
func NewBuffer(size int) *Buffer {
return &Buffer{
buf: make(chan telegraf.Metric, size),
}
}
// IsEmpty returns true if Buffer is empty.
func (b *Buffer) IsEmpty() bool {
return len(b.buf) == 0
}
// Len returns the current length of the buffer.
func (b *Buffer) Len() int {
return len(b.buf)
}
// Add adds metrics to the buffer.
func (b *Buffer) Add(metrics ...telegraf.Metric) {
for i, _ := range metrics {
MetricsWritten.Incr(1)
select {
case b.buf <- metrics[i]:
default:
b.mu.Lock()
MetricsDropped.Incr(1)
<-b.buf
b.buf <- metrics[i]
b.mu.Unlock()
}
}
}
// Batch returns a batch of metrics of size batchSize.
// the batch will be of maximum length batchSize. It can be less than batchSize,
// if the length of Buffer is less than batchSize.
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
b.mu.Lock()
n := min(len(b.buf), batchSize)
out := make([]telegraf.Metric, n)
for i := 0; i < n; i++ {
out[i] = <-b.buf
}
b.mu.Unlock()
return out
}
func min(a, b int) int {
if b < a {
return b
}
return a
}

View File

@@ -0,0 +1,100 @@
package buffer
import (
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
var metricList = []telegraf.Metric{
testutil.TestMetric(2, "mymetric1"),
testutil.TestMetric(1, "mymetric2"),
testutil.TestMetric(11, "mymetric3"),
testutil.TestMetric(15, "mymetric4"),
testutil.TestMetric(8, "mymetric5"),
}
func BenchmarkAddMetrics(b *testing.B) {
buf := NewBuffer(10000)
m := testutil.TestMetric(1, "mymetric")
for n := 0; n < b.N; n++ {
buf.Add(m)
}
}
func TestNewBufferBasicFuncs(t *testing.T) {
b := NewBuffer(10)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
assert.True(t, b.IsEmpty())
assert.Zero(t, b.Len())
assert.Zero(t, MetricsDropped.Get())
assert.Zero(t, MetricsWritten.Get())
m := testutil.TestMetric(1, "mymetric")
b.Add(m)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 1)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(1), MetricsWritten.Get())
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 6)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(6), MetricsWritten.Get())
}
func TestDroppingMetrics(t *testing.T) {
b := NewBuffer(10)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
// Add up to the size of the buffer
b.Add(metricList...)
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 10)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(10), MetricsWritten.Get())
// Add 5 more and verify they were dropped
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 10)
assert.Equal(t, int64(5), MetricsDropped.Get())
assert.Equal(t, int64(15), MetricsWritten.Get())
}
func TestGettingBatches(t *testing.T) {
b := NewBuffer(20)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
// Verify that the buffer returned is smaller than requested when there are
// not as many items as requested.
b.Add(metricList...)
batch := b.Batch(10)
assert.Len(t, batch, 5)
// Verify that the buffer is now empty
assert.True(t, b.IsEmpty())
assert.Zero(t, b.Len())
assert.Zero(t, MetricsDropped.Get())
assert.Equal(t, int64(5), MetricsWritten.Get())
// Verify that the buffer returned is not more than the size requested
b.Add(metricList...)
batch = b.Batch(3)
assert.Len(t, batch, 3)
// Verify that buffer is not empty
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 2)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(10), MetricsWritten.Get())
}

View File

@@ -0,0 +1,49 @@
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/session"
)
type CredentialConfig struct {
Region string
AccessKey string
SecretKey string
RoleARN string
Profile string
Filename string
Token string
}
func (c *CredentialConfig) Credentials() client.ConfigProvider {
if c.RoleARN != "" {
return c.assumeCredentials()
} else {
return c.rootCredentials()
}
}
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
config := &aws.Config{
Region: aws.String(c.Region),
}
if c.AccessKey != "" || c.SecretKey != "" {
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
} else if c.Profile != "" || c.Filename != "" {
config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile)
}
return session.New(config)
}
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
rootCredentials := c.rootCredentials()
config := &aws.Config{
Region: aws.String(c.Region),
}
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
return session.New(config)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,7 @@
package config
import (
"os"
"testing"
"time"
@@ -10,9 +11,53 @@ import (
"github.com/influxdata/telegraf/plugins/inputs/memcached"
"github.com/influxdata/telegraf/plugins/inputs/procstat"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/stretchr/testify/assert"
)
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
c := NewConfig()
err := os.Setenv("MY_TEST_SERVER", "192.168.1.1")
assert.NoError(t, err)
err = os.Setenv("TEST_INTERVAL", "10s")
assert.NoError(t, err)
c.LoadConfig("./testdata/single_plugin_env_vars.toml")
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"192.168.1.1"}
filter := models.Filter{
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []models.TagFilter{
models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []models.TagFilter{
models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
}
assert.NoError(t, filter.Compile())
mConfig := &models.InputConfig{
Name: "memcached",
Filter: filter,
Interval: 10 * time.Second,
}
mConfig.Tags = make(map[string]string)
assert.Equal(t, memcached, c.Inputs[0].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Inputs[0].Config,
"Testdata did not produce correct memcached metadata.")
}
func TestConfig_LoadSingleInput(t *testing.T) {
c := NewConfig()
c.LoadConfig("./testdata/single_plugin.toml")
@@ -20,27 +65,28 @@ func TestConfig_LoadSingleInput(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &internal_models.InputConfig{
Name: "memcached",
Filter: internal_models.Filter{
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
filter := models.Filter{
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []models.TagFilter{
models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
TagPass: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
IsActive: true,
},
TagPass: []models.TagFilter{
models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
}
assert.NoError(t, filter.Compile())
mConfig := &models.InputConfig{
Name: "memcached",
Filter: filter,
Interval: 5 * time.Second,
}
mConfig.Tags = make(map[string]string)
@@ -65,27 +111,28 @@ func TestConfig_LoadDirectory(t *testing.T) {
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &internal_models.InputConfig{
Name: "memcached",
Filter: internal_models.Filter{
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
filter := models.Filter{
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []models.TagFilter{
models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
TagPass: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
IsActive: true,
},
TagPass: []models.TagFilter{
models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
}
assert.NoError(t, filter.Compile())
mConfig := &models.InputConfig{
Name: "memcached",
Filter: filter,
Interval: 5 * time.Second,
}
mConfig.Tags = make(map[string]string)
@@ -100,7 +147,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
assert.NoError(t, err)
ex.SetParser(p)
ex.Command = "/usr/bin/myothercollector --foo=bar"
eConfig := &internal_models.InputConfig{
eConfig := &models.InputConfig{
Name: "exec",
MeasurementSuffix: "_myothercollector",
}
@@ -119,7 +166,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
pstat.PidFile = "/var/run/grafana-server.pid"
pConfig := &internal_models.InputConfig{Name: "procstat"}
pConfig := &models.InputConfig{Name: "procstat"}
pConfig.Tags = make(map[string]string)
assert.Equal(t, pstat, c.Inputs[3].Input,

View File

@@ -0,0 +1,11 @@
[[inputs.memcached]]
servers = ["$MY_TEST_SERVER"]
namepass = ["metricname1"]
namedrop = ["metricname2"]
fieldpass = ["some", "strings"]
fielddrop = ["other", "stuff"]
interval = "$TEST_INTERVAL"
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View File

@@ -60,7 +60,7 @@
# Kafka topic for producer messages
topic = "telegraf"
# Telegraf tag to use as a routing key
# ie, if this tag exists, it's value will be used as the routing key
# ie, if this tag exists, its value will be used as the routing key
routing_tag = "host"
@@ -143,19 +143,31 @@
[[inputs.diskio]]
# no configuration
# read metrics from a Kafka topic
# read metrics from a Kafka 0.9+ topic
[[inputs.kafka_consumer]]
# topic(s) to consume
## kafka brokers
brokers = ["localhost:9092"]
## topic(s) to consume
topics = ["telegraf"]
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# read metrics from a Kafka legacy topic
[[inputs.kafka_consumer_legacy]]
## topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
# the name of the consumer group
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
# Maximum number of points to buffer between collection intervals
point_buffer = 100000
# Offset (must be either "oldest" or "newest")
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
# An array of URI to gather stats about LeoFS.

View File

@@ -0,0 +1,116 @@
package globpath
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/gobwas/glob"
)
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
type GlobPath struct {
path string
hasMeta bool
hasSuperMeta bool
g glob.Glob
root string
}
func Compile(path string) (*GlobPath, error) {
out := GlobPath{
hasMeta: hasMeta(path),
hasSuperMeta: hasSuperMeta(path),
path: path,
}
// if there are no glob meta characters in the path, don't bother compiling
// a glob object or finding the root directory. (see short-circuit in Match)
if !out.hasMeta || !out.hasSuperMeta {
return &out, nil
}
var err error
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
return nil, err
}
// Get the root directory for this filepath
out.root = findRootDir(path)
return &out, nil
}
func (g *GlobPath) Match() map[string]os.FileInfo {
if !g.hasMeta {
out := make(map[string]os.FileInfo)
info, err := os.Stat(g.path)
if err == nil {
out[g.path] = info
}
return out
}
if !g.hasSuperMeta {
out := make(map[string]os.FileInfo)
files, _ := filepath.Glob(g.path)
for _, file := range files {
info, err := os.Stat(file)
if err == nil {
out[file] = info
}
}
return out
}
return walkFilePath(g.root, g.g)
}
// walk the filepath from the given root and return a list of files that match
// the given glob.
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
matchedFiles := make(map[string]os.FileInfo)
walkfn := func(path string, info os.FileInfo, _ error) error {
if g.Match(path) {
matchedFiles[path] = info
}
return nil
}
filepath.Walk(root, walkfn)
return matchedFiles
}
// find the root dir of the given path (could include globs).
// ie:
// /var/log/telegraf.conf -> /var/log
// /home/** -> /home
// /home/*/** -> /home
// /lib/share/*/*/**.txt -> /lib/share
func findRootDir(path string) string {
pathItems := strings.Split(path, sepStr)
out := sepStr
for i, item := range pathItems {
if i == len(pathItems)-1 {
break
}
if item == "" {
continue
}
if hasMeta(item) {
break
}
out += item + sepStr
}
if out != "/" {
out = strings.TrimSuffix(out, "/")
}
return out
}
// hasMeta reports whether path contains any magic glob characters.
func hasMeta(path string) bool {
return strings.IndexAny(path, "*?[") >= 0
}
// hasSuperMeta reports whether path contains any super magic glob characters (**).
func hasSuperMeta(path string) bool {
return strings.Index(path, "**") >= 0
}

View File

@@ -0,0 +1,90 @@
package globpath
import (
"os"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCompileAndMatch(t *testing.T) {
dir := getTestdataDir()
// test super asterisk
g1, err := Compile(dir + "/**")
require.NoError(t, err)
// test single asterisk
g2, err := Compile(dir + "/*.log")
require.NoError(t, err)
// test no meta characters (file exists)
g3, err := Compile(dir + "/log1.log")
require.NoError(t, err)
// test file that doesn't exist
g4, err := Compile(dir + "/i_dont_exist.log")
require.NoError(t, err)
// test super asterisk that doesn't exist
g5, err := Compile(dir + "/dir_doesnt_exist/**")
require.NoError(t, err)
matches := g1.Match()
assert.Len(t, matches, 6)
matches = g2.Match()
assert.Len(t, matches, 2)
matches = g3.Match()
assert.Len(t, matches, 1)
matches = g4.Match()
assert.Len(t, matches, 0)
matches = g5.Match()
assert.Len(t, matches, 0)
}
func TestFindRootDir(t *testing.T) {
tests := []struct {
input string
output string
}{
{"/var/log/telegraf.conf", "/var/log"},
{"/home/**", "/home"},
{"/home/*/**", "/home"},
{"/lib/share/*/*/**.txt", "/lib/share"},
}
for _, test := range tests {
actual := findRootDir(test.input)
assert.Equal(t, test.output, actual)
}
}
func TestFindNestedTextFile(t *testing.T) {
dir := getTestdataDir()
// test super asterisk
g1, err := Compile(dir + "/**.txt")
require.NoError(t, err)
matches := g1.Match()
assert.Len(t, matches, 1)
}
func getTestdataDir() string {
_, filename, _, _ := runtime.Caller(1)
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
}
func TestMatch_ErrPermission(t *testing.T) {
tests := []struct {
input string
expected map[string]os.FileInfo
}{
{"/root/foo", map[string]os.FileInfo{}},
{"/root/f*", map[string]os.FileInfo{}},
}
for _, test := range tests {
glob, err := Compile(test.input)
require.NoError(t, err)
actual := glob.Match()
require.Equal(t, test.expected, actual)
}
}

0
internal/globpath/testdata/log1.log vendored Normal file
View File

0
internal/globpath/testdata/log2.log vendored Normal file
View File

View File

5
internal/globpath/testdata/test.conf vendored Normal file
View File

@@ -0,0 +1,5 @@
# this is a fake testing config file
# for testing the filestat plugin
option1 = "foo"
option2 = "bar"

View File

@@ -2,19 +2,31 @@ package internal
import (
"bufio"
"bytes"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"log"
"math/big"
"os"
"os/exec"
"strconv"
"strings"
"time"
"unicode"
)
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var (
TimeoutErr = errors.New("Command timed out.")
NotImplementedError = errors.New("not implemented yet")
)
// Duration just wraps time.Duration
type Duration struct {
Duration time.Duration
@@ -22,18 +34,39 @@ type Duration struct {
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
if err != nil {
return err
var err error
b = bytes.Trim(b, `'`)
// see if we can directly convert it
d.Duration, err = time.ParseDuration(string(b))
if err == nil {
return nil
}
d.Duration = dur
// Parse string duration, ie, "1s"
if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
d.Duration, err = time.ParseDuration(uq)
if err == nil {
return nil
}
}
// First try parsing as integer seconds
sI, err := strconv.ParseInt(string(b), 10, 64)
if err == nil {
d.Duration = time.Second * time.Duration(sI)
return nil
}
// Second try parsing as float seconds
sF, err := strconv.ParseFloat(string(b), 64)
if err == nil {
d.Duration = time.Second * time.Duration(sF)
return nil
}
return nil
}
var NotImplementedError = errors.New("not implemented yet")
// ReadLines reads contents from a file and splits them by new lines.
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
func ReadLines(filename string) ([]string, error) {
@@ -86,15 +119,15 @@ func GetTLSConfig(
SSLCert, SSLKey, SSLCA string,
InsecureSkipVerify bool,
) (*tls.Config, error) {
t := &tls.Config{}
if SSLCert != "" && SSLKey != "" && SSLCA != "" {
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
if err != nil {
return nil, errors.New(fmt.Sprintf(
"Could not load TLS client key/certificate: %s",
err))
}
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
return nil, nil
}
t := &tls.Config{
InsecureSkipVerify: InsecureSkipVerify,
}
if SSLCA != "" {
caCert, err := ioutil.ReadFile(SSLCA)
if err != nil {
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
@@ -103,75 +136,107 @@ func GetTLSConfig(
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
t = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: InsecureSkipVerify,
}
} else {
if InsecureSkipVerify {
t.InsecureSkipVerify = true
} else {
return nil, nil
}
t.RootCAs = caCertPool
}
if SSLCert != "" && SSLKey != "" {
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
if err != nil {
return nil, errors.New(fmt.Sprintf(
"Could not load TLS client key/certificate from %s:%s: %s",
SSLKey, SSLCert, err))
}
t.Certificates = []tls.Certificate{cert}
t.BuildNameToCertificate()
}
// will be nil by default if nothing is provided
return t, nil
}
// Glob will test a string pattern, potentially containing globs, against a
// subject string. The result is a simple true/false, determining whether or
// not the glob pattern matched the subject text.
//
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
// thanks Ryan Uber!
func Glob(pattern, measurement string) bool {
// Empty pattern can only match empty subject
if pattern == "" {
return measurement == pattern
}
// SnakeCase converts the given string to snake case following the Golang format:
// acronyms are converted to lower-case and preceded by an underscore.
func SnakeCase(in string) string {
runes := []rune(in)
length := len(runes)
// If the pattern _is_ a glob, it matches everything
if pattern == "*" {
return true
}
parts := strings.Split(pattern, "*")
if len(parts) == 1 {
// No globs in pattern, so test for match
return pattern == measurement
}
leadingGlob := strings.HasPrefix(pattern, "*")
trailingGlob := strings.HasSuffix(pattern, "*")
end := len(parts) - 1
for i, part := range parts {
switch i {
case 0:
if leadingGlob {
continue
}
if !strings.HasPrefix(measurement, part) {
return false
}
case end:
if len(measurement) > 0 {
return trailingGlob || strings.HasSuffix(measurement, part)
}
default:
if !strings.Contains(measurement, part) {
return false
}
var out []rune
for i := 0; i < length; i++ {
if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {
out = append(out, '_')
}
// Trim evaluated text from measurement as we loop over the pattern.
idx := strings.Index(measurement, part) + len(part)
measurement = measurement[idx:]
out = append(out, unicode.ToLower(runes[i]))
}
// All parts of the pattern matched
return true
return string(out)
}
// CombinedOutputTimeout runs the given command with the given timeout and
// returns the combined output of stdout and stderr.
// If the command times out, it attempts to kill the process.
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
var b bytes.Buffer
c.Stdout = &b
c.Stderr = &b
if err := c.Start(); err != nil {
return nil, err
}
err := WaitTimeout(c, timeout)
return b.Bytes(), err
}
// RunTimeout runs the given command with the given timeout.
// If the command times out, it attempts to kill the process.
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
if err := c.Start(); err != nil {
return err
}
return WaitTimeout(c, timeout)
}
// WaitTimeout waits for the given command to finish with a timeout.
// It assumes the command has already been started.
// If the command times out, it attempts to kill the process.
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
timer := time.NewTimer(timeout)
done := make(chan error)
go func() { done <- c.Wait() }()
select {
case err := <-done:
timer.Stop()
return err
case <-timer.C:
if err := c.Process.Kill(); err != nil {
log.Printf("E! FATAL error killing process: %s", err)
return err
}
// wait for the command to return after killing it
<-done
return TimeoutErr
}
}
// RandomSleep will sleep for a random amount of time up to max.
// If the shutdown channel is closed, it will return before it has finished
// sleeping.
func RandomSleep(max time.Duration, shutdown chan struct{}) {
if max == 0 {
return
}
maxSleep := big.NewInt(max.Nanoseconds())
var sleepns int64
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
sleepns = j.Int64()
}
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
select {
case <-t.C:
return
case <-shutdown:
t.Stop()
return
}
}

View File

@@ -1,44 +1,157 @@
package internal
import "testing"
import (
"os/exec"
"testing"
"time"
func testGlobMatch(t *testing.T, pattern, subj string) {
if !Glob(pattern, subj) {
t.Errorf("%s should match %s", pattern, subj)
"github.com/stretchr/testify/assert"
)
type SnakeTest struct {
input string
output string
}
var tests = []SnakeTest{
{"a", "a"},
{"snake", "snake"},
{"A", "a"},
{"ID", "id"},
{"MOTD", "motd"},
{"Snake", "snake"},
{"SnakeTest", "snake_test"},
{"APIResponse", "api_response"},
{"SnakeID", "snake_id"},
{"SnakeIDGoogle", "snake_id_google"},
{"LinuxMOTD", "linux_motd"},
{"OMGWTFBBQ", "omgwtfbbq"},
{"omg_wtf_bbq", "omg_wtf_bbq"},
}
func TestSnakeCase(t *testing.T) {
for _, test := range tests {
if SnakeCase(test.input) != test.output {
t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input))
}
}
}
func testGlobNoMatch(t *testing.T, pattern, subj string) {
if Glob(pattern, subj) {
t.Errorf("%s should not match %s", pattern, subj)
var (
sleepbin, _ = exec.LookPath("sleep")
echobin, _ = exec.LookPath("echo")
shell, _ = exec.LookPath("sh")
)
func TestRunTimeout(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "10")
start := time.Now()
err := RunTimeout(cmd, time.Millisecond*20)
elapsed := time.Since(start)
assert.Equal(t, TimeoutErr, err)
// Verify that command gets killed in 20ms, with some breathing room
assert.True(t, elapsed < time.Millisecond*75)
}
func TestEmptyPattern(t *testing.T) {
testGlobMatch(t, "", "")
testGlobNoMatch(t, "", "test")
}
func TestPatternWithoutGlobs(t *testing.T) {
testGlobMatch(t, "test", "test")
}
func TestGlob(t *testing.T) {
for _, pattern := range []string{
"*test", // Leading glob
"this*", // Trailing glob
"*is*a*", // Lots of globs
"**test**", // Double glob characters
"**is**a***test*", // Varying number of globs
} {
testGlobMatch(t, pattern, "this_is_a_test")
func TestCombinedOutputTimeout(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "10")
start := time.Now()
_, err := CombinedOutputTimeout(cmd, time.Millisecond*20)
elapsed := time.Since(start)
for _, pattern := range []string{
"test*", // Implicit substring match should fail
"*is", // Partial match should fail
"*no*", // Globs without a match between them should fail
} {
testGlobNoMatch(t, pattern, "this_is_a_test")
}
assert.Equal(t, TimeoutErr, err)
// Verify that command gets killed in 20ms, with some breathing room
assert.True(t, elapsed < time.Millisecond*75)
}
func TestCombinedOutput(t *testing.T) {
if echobin == "" {
t.Skip("'echo' binary not available on OS, skipping.")
}
cmd := exec.Command(echobin, "foo")
out, err := CombinedOutputTimeout(cmd, time.Second)
assert.NoError(t, err)
assert.Equal(t, "foo\n", string(out))
}
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
// the same output from a failed command.
func TestCombinedOutputError(t *testing.T) {
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(shell, "-c", "false")
expected, err := cmd.CombinedOutput()
cmd2 := exec.Command(shell, "-c", "false")
actual, err := CombinedOutputTimeout(cmd2, time.Second)
assert.Error(t, err)
assert.Equal(t, expected, actual)
}
func TestRunError(t *testing.T) {
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(shell, "-c", "false")
err := RunTimeout(cmd, time.Second)
assert.Error(t, err)
}
func TestRandomSleep(t *testing.T) {
// test that zero max returns immediately
s := time.Now()
RandomSleep(time.Duration(0), make(chan struct{}))
elapsed := time.Since(s)
assert.True(t, elapsed < time.Millisecond)
// test that max sleep is respected
s = time.Now()
RandomSleep(time.Millisecond*50, make(chan struct{}))
elapsed = time.Since(s)
assert.True(t, elapsed < time.Millisecond*100)
// test that shutdown is respected
s = time.Now()
shutdown := make(chan struct{})
go func() {
time.Sleep(time.Millisecond * 100)
close(shutdown)
}()
RandomSleep(time.Second, shutdown)
elapsed = time.Since(s)
assert.True(t, elapsed < time.Millisecond*150)
}
func TestDuration(t *testing.T) {
var d Duration
d.UnmarshalTOML([]byte(`"1s"`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`1s`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`'1s'`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`10`))
assert.Equal(t, 10*time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`1.5`))
assert.Equal(t, time.Second, d.Duration)
}

View File

@@ -0,0 +1,59 @@
package limiter
import (
"sync"
"time"
)
// NewRateLimiter returns a rate limiter that will will emit from the C
// channel only 'n' times every 'rate' seconds.
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
r := &rateLimiter{
C: make(chan bool),
rate: rate,
n: n,
shutdown: make(chan bool),
}
r.wg.Add(1)
go r.limiter()
return r
}
type rateLimiter struct {
C chan bool
rate time.Duration
n int
shutdown chan bool
wg sync.WaitGroup
}
func (r *rateLimiter) Stop() {
close(r.shutdown)
r.wg.Wait()
close(r.C)
}
func (r *rateLimiter) limiter() {
defer r.wg.Done()
ticker := time.NewTicker(r.rate)
defer ticker.Stop()
counter := 0
for {
select {
case <-r.shutdown:
return
case <-ticker.C:
counter = 0
default:
if counter < r.n {
select {
case r.C <- true:
counter++
case <-r.shutdown:
return
}
}
}
}
}

View File

@@ -0,0 +1,54 @@
package limiter
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestRateLimiter(t *testing.T) {
r := NewRateLimiter(5, time.Second)
ticker := time.NewTicker(time.Millisecond * 75)
// test that we can only get 5 receives from the rate limiter
counter := 0
outer:
for {
select {
case <-r.C:
counter++
case <-ticker.C:
break outer
}
}
assert.Equal(t, 5, counter)
r.Stop()
// verify that the Stop function closes the channel.
_, ok := <-r.C
assert.False(t, ok)
}
func TestRateLimiterMultipleIterations(t *testing.T) {
r := NewRateLimiter(5, time.Millisecond*50)
ticker := time.NewTicker(time.Millisecond * 250)
// test that we can get 15 receives from the rate limiter
counter := 0
outer:
for {
select {
case <-ticker.C:
break outer
case <-r.C:
counter++
}
}
assert.True(t, counter > 10)
r.Stop()
// verify that the Stop function closes the channel.
_, ok := <-r.C
assert.False(t, ok)
}

View File

@@ -1,123 +1,263 @@
package internal_models
package models
import (
"strings"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/filter"
)
// TagFilter is the name of a tag, and the values on which to filter
type TagFilter struct {
Name string
Filter []string
filter filter.Filter
}
// Filter containing drop/pass and tagdrop/tagpass rules
type Filter struct {
NameDrop []string
nameDrop filter.Filter
NamePass []string
namePass filter.Filter
FieldDrop []string
fieldDrop filter.Filter
FieldPass []string
fieldPass filter.Filter
TagDrop []TagFilter
TagPass []TagFilter
IsActive bool
TagExclude []string
tagExclude filter.Filter
TagInclude []string
tagInclude filter.Filter
isActive bool
}
func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
// Compile all Filter lists into filter.Filter objects.
func (f *Filter) Compile() error {
if len(f.NameDrop) == 0 &&
len(f.NamePass) == 0 &&
len(f.FieldDrop) == 0 &&
len(f.FieldPass) == 0 &&
len(f.TagInclude) == 0 &&
len(f.TagExclude) == 0 &&
len(f.TagPass) == 0 &&
len(f.TagDrop) == 0 {
return nil
}
f.isActive = true
var err error
f.nameDrop, err = filter.Compile(f.NameDrop)
if err != nil {
return fmt.Errorf("Error compiling 'namedrop', %s", err)
}
f.namePass, err = filter.Compile(f.NamePass)
if err != nil {
return fmt.Errorf("Error compiling 'namepass', %s", err)
}
f.fieldDrop, err = filter.Compile(f.FieldDrop)
if err != nil {
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
}
f.fieldPass, err = filter.Compile(f.FieldPass)
if err != nil {
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
}
f.tagExclude, err = filter.Compile(f.TagExclude)
if err != nil {
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
}
f.tagInclude, err = filter.Compile(f.TagInclude)
if err != nil {
return fmt.Errorf("Error compiling 'taginclude', %s", err)
}
for i, _ := range f.TagDrop {
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
}
}
for i, _ := range f.TagPass {
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagpass', %s", err)
}
}
return nil
}
// Apply applies the filter to the given measurement name, fields map, and
// tags map. It will return false if the metric should be "filtered out", and
// true if the metric should "pass".
// It will modify tags & fields in-place if they need to be deleted.
func (f *Filter) Apply(
measurement string,
fields map[string]interface{},
tags map[string]string,
) bool {
if !f.isActive {
return true
}
return false
// check if the measurement name should pass
if !f.shouldNamePass(measurement) {
return false
}
// check if the tags should pass
if !f.shouldTagsPass(tags) {
return false
}
// filter fields
for fieldkey, _ := range fields {
if !f.shouldFieldPass(fieldkey) {
delete(fields, fieldkey)
}
}
if len(fields) == 0 {
return false
}
// filter tags
f.filterTags(tags)
return true
}
// ShouldFieldsPass returns true if the metric should pass, false if should drop
// IsActive checking if filter is active
func (f *Filter) IsActive() bool {
return f.isActive
}
// shouldNamePass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f Filter) ShouldNamePass(key string) bool {
if f.NamePass != nil {
for _, pat := range f.NamePass {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return true
}
func (f *Filter) shouldNamePass(key string) bool {
pass := func(f *Filter) bool {
if f.namePass.Match(key) {
return true
}
return false
}
if f.NameDrop != nil {
for _, pat := range f.NameDrop {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return false
}
drop := func(f *Filter) bool {
if f.nameDrop.Match(key) {
return false
}
return true
}
if f.namePass != nil && f.nameDrop != nil {
return pass(f) && drop(f)
} else if f.namePass != nil {
return pass(f)
} else if f.nameDrop != nil {
return drop(f)
}
return true
}
// ShouldFieldsPass returns true if the metric should pass, false if should drop
// shouldFieldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f Filter) ShouldFieldsPass(key string) bool {
if f.FieldPass != nil {
for _, pat := range f.FieldPass {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return true
}
func (f *Filter) shouldFieldPass(key string) bool {
pass := func(f *Filter) bool {
if f.fieldPass.Match(key) {
return true
}
return false
}
if f.FieldDrop != nil {
for _, pat := range f.FieldDrop {
// TODO remove HasPrefix check, leaving it for now for legacy support.
// Cam, 2015-12-07
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
return false
}
drop := func(f *Filter) bool {
if f.fieldDrop.Match(key) {
return false
}
return true
}
if f.fieldPass != nil && f.fieldDrop != nil {
return pass(f) && drop(f)
} else if f.fieldPass != nil {
return pass(f)
} else if f.fieldDrop != nil {
return drop(f)
}
return true
}
// ShouldTagsPass returns true if the metric should pass, false if should drop
// shouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
if f.TagPass != nil {
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
pass := func(f *Filter) bool {
for _, pat := range f.TagPass {
if pat.filter == nil {
continue
}
if tagval, ok := tags[pat.Name]; ok {
for _, filter := range pat.Filter {
if internal.Glob(filter, tagval) {
return true
}
if pat.filter.Match(tagval) {
return true
}
}
}
return false
}
if f.TagDrop != nil {
drop := func(f *Filter) bool {
for _, pat := range f.TagDrop {
if pat.filter == nil {
continue
}
if tagval, ok := tags[pat.Name]; ok {
for _, filter := range pat.Filter {
if internal.Glob(filter, tagval) {
return false
}
if pat.filter.Match(tagval) {
return false
}
}
}
return true
}
// Add additional logic in case where both parameters are set.
// see: https://github.com/influxdata/telegraf/issues/2860
if f.TagPass != nil && f.TagDrop != nil {
// return true only in case when tag pass and won't be dropped (true, true).
// in case when the same tag should be passed and dropped it will be dropped (true, false).
return pass(f) && drop(f)
} else if f.TagPass != nil {
return pass(f)
} else if f.TagDrop != nil {
return drop(f)
}
return true
}
// Apply TagInclude and TagExclude filters.
// modifies the tags map in-place.
func (f *Filter) filterTags(tags map[string]string) {
if f.tagInclude != nil {
for k, _ := range tags {
if !f.tagInclude.Match(k) {
delete(tags, k)
}
}
}
if f.tagExclude != nil {
for k, _ := range tags {
if f.tagExclude.Match(k) {
delete(tags, k)
}
}
}
}

View File

@@ -1,9 +1,64 @@
package internal_models
package models
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFilter_ApplyEmpty(t *testing.T) {
f := Filter{}
require.NoError(t, f.Compile())
assert.False(t, f.IsActive())
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
}
func TestFilter_ApplyTagsDontPass(t *testing.T) {
filters := []TagFilter{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
}
f := Filter{
TagDrop: filters,
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
assert.False(t, f.Apply("m",
map[string]interface{}{"value": int64(1)},
map[string]string{"cpu": "cpu-total"}))
}
func TestFilter_ApplyDeleteFields(t *testing.T) {
f := Filter{
FieldDrop: []string{"value"},
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.True(t, f.Apply("m", fields, nil))
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
}
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
f := Filter{
FieldDrop: []string{"value*"},
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.False(t, f.Apply("m", fields, nil))
}
func TestFilter_Empty(t *testing.T) {
f := Filter{}
@@ -18,7 +73,7 @@ func TestFilter_Empty(t *testing.T) {
}
for _, measurement := range measurements {
if !f.ShouldFieldsPass(measurement) {
if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
@@ -28,6 +83,7 @@ func TestFilter_NamePass(t *testing.T) {
f := Filter{
NamePass: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.Compile())
passes := []string{
"foo",
@@ -45,13 +101,13 @@ func TestFilter_NamePass(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldNamePass(measurement) {
if !f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldNamePass(measurement) {
if f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -61,6 +117,7 @@ func TestFilter_NameDrop(t *testing.T) {
f := Filter{
NameDrop: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.Compile())
drops := []string{
"foo",
@@ -78,13 +135,13 @@ func TestFilter_NameDrop(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldNamePass(measurement) {
if !f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldNamePass(measurement) {
if f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -94,6 +151,7 @@ func TestFilter_FieldPass(t *testing.T) {
f := Filter{
FieldPass: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.Compile())
passes := []string{
"foo",
@@ -111,13 +169,13 @@ func TestFilter_FieldPass(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) {
if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) {
if f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -127,6 +185,7 @@ func TestFilter_FieldDrop(t *testing.T) {
f := Filter{
FieldDrop: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.Compile())
drops := []string{
"foo",
@@ -144,13 +203,13 @@ func TestFilter_FieldDrop(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) {
if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) {
if f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -169,6 +228,7 @@ func TestFilter_TagPass(t *testing.T) {
f := Filter{
TagPass: filters,
}
require.NoError(t, f.Compile())
passes := []map[string]string{
{"cpu": "cpu-total"},
@@ -187,13 +247,13 @@ func TestFilter_TagPass(t *testing.T) {
}
for _, tags := range passes {
if !f.ShouldTagsPass(tags) {
if !f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags)
}
}
for _, tags := range drops {
if f.ShouldTagsPass(tags) {
if f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags)
}
}
@@ -212,6 +272,7 @@ func TestFilter_TagDrop(t *testing.T) {
f := Filter{
TagDrop: filters,
}
require.NoError(t, f.Compile())
drops := []map[string]string{
{"cpu": "cpu-total"},
@@ -230,14 +291,154 @@ func TestFilter_TagDrop(t *testing.T) {
}
for _, tags := range passes {
if !f.ShouldTagsPass(tags) {
if !f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags)
}
}
for _, tags := range drops {
if f.ShouldTagsPass(tags) {
if f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags)
}
}
}
func TestFilter_FilterTagsNoMatches(t *testing.T) {
pretags := map[string]string{
"host": "localhost",
"mytag": "foobar",
}
f := Filter{
TagExclude: []string{"nomatch"},
}
require.NoError(t, f.Compile())
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"host": "localhost",
"mytag": "foobar",
}, pretags)
f = Filter{
TagInclude: []string{"nomatch"},
}
require.NoError(t, f.Compile())
f.filterTags(pretags)
assert.Equal(t, map[string]string{}, pretags)
}
func TestFilter_FilterTagsMatches(t *testing.T) {
pretags := map[string]string{
"host": "localhost",
"mytag": "foobar",
}
f := Filter{
TagExclude: []string{"ho*"},
}
require.NoError(t, f.Compile())
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"mytag": "foobar",
}, pretags)
pretags = map[string]string{
"host": "localhost",
"mytag": "foobar",
}
f = Filter{
TagInclude: []string{"my*"},
}
require.NoError(t, f.Compile())
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"mytag": "foobar",
}, pretags)
}
// TestFilter_FilterNamePassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
inputData := []string{"name1", "name2", "name3", "name4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
NamePass: []string{"name1", "name2"},
NameDrop: []string{"name1", "name3"},
}
require.NoError(t, f.Compile())
for i, name := range inputData {
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
}
}
// TestFilter_FilterFieldPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
inputData := []string{"field1", "field2", "field3", "field4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
FieldPass: []string{"field1", "field2"},
FieldDrop: []string{"field1", "field3"},
}
require.NoError(t, f.Compile())
for i, field := range inputData {
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
}
}
// TestFilter_FilterTagsPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
inputData := []map[string]string{
{"tag1": "1", "tag2": "3"},
{"tag1": "1", "tag2": "2"},
{"tag1": "2", "tag2": "1"},
{"tag1": "4", "tag2": "1"},
}
expectedResult := []bool{false, true, false, false}
filterPass := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"1", "4"},
},
}
filterDrop := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"4"},
},
TagFilter{
Name: "tag2",
Filter: []string{"3"},
},
}
f := Filter{
TagDrop: filterDrop,
TagPass: filterPass,
}
require.NoError(t, f.Compile())
for i, tag := range inputData {
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
}
}

View File

@@ -0,0 +1,166 @@
package models
import (
"log"
"math"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
// makemetric is used by both RunningAggregator & RunningInput
// to make metrics.
// nameOverride: override the name of the measurement being made.
// namePrefix: add this prefix to each measurement name.
// nameSuffix: add this suffix to each measurement name.
// pluginTags: these are tags that are specific to this plugin.
// daemonTags: these are daemon-wide global tags, and get applied after pluginTags.
// filter: this is a filter to apply to each metric being made.
// applyFilter: if false, the above filter is not applied to each metric.
// This is used by Aggregators, because aggregators use filters
// on incoming metrics instead of on created metrics.
// TODO refactor this to not have such a huge func signature.
func makemetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
nameOverride string,
namePrefix string,
nameSuffix string,
pluginTags map[string]string,
daemonTags map[string]string,
filter Filter,
applyFilter bool,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
if len(fields) == 0 || len(measurement) == 0 {
return nil
}
if tags == nil {
tags = make(map[string]string)
}
// Override measurement name if set
if len(nameOverride) != 0 {
measurement = nameOverride
}
// Apply measurement prefix and suffix if set
if len(namePrefix) != 0 {
measurement = namePrefix + measurement
}
if len(nameSuffix) != 0 {
measurement = measurement + nameSuffix
}
// Apply plugin-wide tags if set
for k, v := range pluginTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply daemon-wide tags if set
for k, v := range daemonTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply the metric filter(s)
// for aggregators, the filter does not get applied when the metric is made.
// instead, the filter is applied to metric incoming into the plugin.
// ie, it gets applied in the RunningAggregator.Apply function.
if applyFilter {
if ok := filter.Apply(measurement, fields, tags); !ok {
return nil
}
}
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
log.Printf("D! Measurement [%s] tag [%s] "+
"ends with a backslash, skipping", measurement, k)
delete(tags, k)
continue
} else if strings.HasSuffix(v, `\`) {
log.Printf("D! Measurement [%s] tag [%s] has a value "+
"ending with a backslash, skipping", measurement, k)
delete(tags, k)
continue
}
}
for k, v := range fields {
if strings.HasSuffix(k, `\`) {
log.Printf("D! Measurement [%s] field [%s] "+
"ends with a backslash, skipping", measurement, k)
delete(fields, k)
continue
}
// Validate uint64 and float64 fields
// convert all int & uint types to int64
switch val := v.(type) {
case nil:
// delete nil fields
delete(fields, k)
case uint:
fields[k] = int64(val)
continue
case uint8:
fields[k] = int64(val)
continue
case uint16:
fields[k] = int64(val)
continue
case uint32:
fields[k] = int64(val)
continue
case int:
fields[k] = int64(val)
continue
case int8:
fields[k] = int64(val)
continue
case int16:
fields[k] = int64(val)
continue
case int32:
fields[k] = int64(val)
continue
case uint64:
// InfluxDB does not support writing uint64
if val < uint64(9223372036854775808) {
fields[k] = int64(val)
} else {
fields[k] = int64(9223372036854775807)
}
continue
case float32:
fields[k] = float64(val)
continue
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
delete(fields, k)
continue
}
case string:
fields[k] = v
default:
fields[k] = v
}
}
m, err := metric.New(measurement, tags, fields, t, mType)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return nil
}
return m
}

View File

@@ -0,0 +1,166 @@
package models
import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
type RunningAggregator struct {
a telegraf.Aggregator
Config *AggregatorConfig
metrics chan telegraf.Metric
periodStart time.Time
periodEnd time.Time
}
func NewRunningAggregator(
a telegraf.Aggregator,
conf *AggregatorConfig,
) *RunningAggregator {
return &RunningAggregator{
a: a,
Config: conf,
metrics: make(chan telegraf.Metric, 100),
}
}
// AggregatorConfig containing configuration parameters for the running
// aggregator plugin.
type AggregatorConfig struct {
Name string
DropOriginal bool
NameOverride string
MeasurementPrefix string
MeasurementSuffix string
Tags map[string]string
Filter Filter
Period time.Duration
Delay time.Duration
}
func (r *RunningAggregator) Name() string {
return "aggregators." + r.Config.Name
}
func (r *RunningAggregator) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
m := makemetric(
measurement,
fields,
tags,
r.Config.NameOverride,
r.Config.MeasurementPrefix,
r.Config.MeasurementSuffix,
r.Config.Tags,
nil,
r.Config.Filter,
false,
mType,
t,
)
if m != nil {
m.SetAggregate(true)
}
return m
}
// Add applies the given metric to the aggregator.
// Before applying to the plugin, it will run any defined filters on the metric.
// Apply returns true if the original metric should be dropped.
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
if r.Config.Filter.IsActive() {
// check if the aggregator should apply this metric
name := in.Name()
fields := in.Fields()
tags := in.Tags()
t := in.Time()
if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
// aggregator should not apply this metric
return false
}
in, _ = metric.New(name, tags, fields, t)
}
r.metrics <- in
return r.Config.DropOriginal
}
func (r *RunningAggregator) add(in telegraf.Metric) {
r.a.Add(in)
}
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
r.a.Push(acc)
}
func (r *RunningAggregator) reset() {
r.a.Reset()
}
// Run runs the running aggregator, listens for incoming metrics, and waits
// for period ticks to tell it when to push and reset the aggregator.
func (r *RunningAggregator) Run(
acc telegraf.Accumulator,
shutdown chan struct{},
) {
// The start of the period is truncated to the nearest second.
//
// Every metric then gets it's timestamp checked and is dropped if it
// is not within:
//
// start < t < end + truncation + delay
//
// So if we start at now = 00:00.2 with a 10s period and 0.3s delay:
// now = 00:00.2
// start = 00:00
// truncation = 00:00.2
// end = 00:10
// 1st interval: 00:00 - 00:10.5
// 2nd interval: 00:10 - 00:20.5
// etc.
//
now := time.Now()
r.periodStart = now.Truncate(time.Second)
truncation := now.Sub(r.periodStart)
r.periodEnd = r.periodStart.Add(r.Config.Period)
time.Sleep(r.Config.Delay)
periodT := time.NewTicker(r.Config.Period)
defer periodT.Stop()
for {
select {
case <-shutdown:
if len(r.metrics) > 0 {
// wait until metrics are flushed before exiting
continue
}
return
case m := <-r.metrics:
if m.Time().Before(r.periodStart) ||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
// the metric is outside the current aggregation period, so
// skip it.
continue
}
r.add(m)
case <-periodT.C:
r.periodStart = r.periodEnd
r.periodEnd = r.periodStart.Add(r.Config.Period)
r.push(acc)
r.reset()
}
}
}

View File

@@ -0,0 +1,256 @@
package models
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
func TestAdd(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*150),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if atomic.LoadInt64(&a.sum) > 0 {
break
}
}
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
}
func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
// metric before current period
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(-time.Hour),
)
assert.False(t, ra.Add(m))
// metric after current period
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Hour),
)
assert.False(t, ra.Add(m))
// "now" metric
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*50),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if atomic.LoadInt64(&a.sum) > 0 {
break
}
}
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
}
func TestAddAndPushOnePeriod(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
shutdown := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
ra.Run(&acc, shutdown)
}()
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*100),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if acc.NMetrics() > 0 {
break
}
}
acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)})
close(shutdown)
wg.Wait()
}
func TestAddDropOriginal(t *testing.T) {
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"RI*"},
},
DropOriginal: true,
})
assert.NoError(t, ra.Config.Filter.Compile())
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now(),
)
assert.True(t, ra.Add(m))
// this metric name doesn't match the filter, so Add will return false
m2 := ra.MakeMetric(
"foobar",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now(),
)
assert.False(t, ra.Add(m2))
}
// make an untyped, counter, & gauge metric
func TestMakeMetricA(t *testing.T) {
now := time.Now()
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
Name: "TestRunningAggregator",
})
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
)
}
type TestAggregator struct {
sum int64
}
func (t *TestAggregator) Description() string { return "" }
func (t *TestAggregator) SampleConfig() string { return "" }
func (t *TestAggregator) Reset() {
atomic.StoreInt64(&t.sum, 0)
}
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
acc.AddFields("TestMetric",
map[string]interface{}{"sum": t.sum},
map[string]string{},
)
}
func (t *TestAggregator) Add(in telegraf.Metric) {
for _, v := range in.Fields() {
if vi, ok := v.(int64); ok {
atomic.AddInt64(&t.sum, vi)
}
}
}

View File

@@ -1,15 +1,38 @@
package internal_models
package models
import (
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/selfstat"
)
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
type RunningInput struct {
Name string
Input telegraf.Input
Config *InputConfig
trace bool
defaultTags map[string]string
MetricsGathered selfstat.Stat
}
func NewRunningInput(
input telegraf.Input,
config *InputConfig,
) *RunningInput {
return &RunningInput{
Input: input,
Config: config,
MetricsGathered: selfstat.Register(
"gather",
"metrics_gathered",
map[string]string{"input": config.Name},
),
}
}
// InputConfig containing a name, interval, and filter
@@ -22,3 +45,52 @@ type InputConfig struct {
Filter Filter
Interval time.Duration
}
func (r *RunningInput) Name() string {
return "inputs." + r.Config.Name
}
// MakeMetric either returns a metric, or returns nil if the metric doesn't
// need to be created (because of filtering, an error, etc.)
func (r *RunningInput) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
m := makemetric(
measurement,
fields,
tags,
r.Config.NameOverride,
r.Config.MeasurementPrefix,
r.Config.MeasurementSuffix,
r.Config.Tags,
r.defaultTags,
r.Config.Filter,
true,
mType,
t,
)
if r.trace && m != nil {
fmt.Print("> " + m.String())
}
r.MetricsGathered.Incr(1)
GlobalMetricsGathered.Incr(1)
return m
}
func (r *RunningInput) Trace() bool {
return r.trace
}
func (r *RunningInput) SetTrace(trace bool) {
r.trace = trace
}
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
r.defaultTags = tags
}

View File

@@ -0,0 +1,463 @@
package models
import (
"fmt"
"math"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMakeMetricNoFields(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Nil(t, m)
}
// nil fields should get dropped
func TestMakeMetricNilFields(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"value": int(101),
"nil": nil,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
}
// make an untyped, counter, & gauge metric
func TestMakeMetric(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
)
}
func TestMakeMetricWithPluginTags(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
m.String(),
)
}
func TestMakeMetricFilteredOut(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
Filter: Filter{NamePass: []string{"foobar"}},
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.NoError(t, ri.Config.Filter.Compile())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
now,
)
assert.Nil(t, m)
}
func TestMakeMetricWithDaemonTags(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetDefaultTags(map[string]string{
"foo": "bar",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
m.String(),
)
}
// make an untyped, counter, & gauge metric
func TestMakeMetricInfFields(t *testing.T) {
inf := math.Inf(1)
ninf := math.Inf(-1)
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"value": int(101),
"inf": inf,
"ninf": ninf,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
}
func TestMakeMetricAllFieldTypes(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"a": int(10),
"b": int8(10),
"c": int16(10),
"d": int32(10),
"e": uint(10),
"f": uint8(10),
"g": uint16(10),
"h": uint32(10),
"i": uint64(10),
"j": float32(10),
"k": uint64(9223372036854775810),
"l": "foobar",
"m": true,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Contains(t, m.String(), "a=10i")
assert.Contains(t, m.String(), "b=10i")
assert.Contains(t, m.String(), "c=10i")
assert.Contains(t, m.String(), "d=10i")
assert.Contains(t, m.String(), "e=10i")
assert.Contains(t, m.String(), "f=10i")
assert.Contains(t, m.String(), "g=10i")
assert.Contains(t, m.String(), "h=10i")
assert.Contains(t, m.String(), "i=10i")
assert.Contains(t, m.String(), "j=10")
assert.NotContains(t, m.String(), "j=10i")
assert.Contains(t, m.String(), "k=9223372036854775807i")
assert.Contains(t, m.String(), "l=\"foobar\"")
assert.Contains(t, m.String(), "m=true")
}
func TestMakeMetricNameOverride(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
NameOverride: "foobar",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
m.String(),
)
}
func TestMakeMetricNamePrefix(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
MeasurementPrefix: "foobar_",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
}
func TestMakeMetricNameSuffix(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
MeasurementSuffix: "_foobar",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
m.String(),
)
}
func TestMakeMetric_TrailingSlash(t *testing.T) {
now := time.Now()
tests := []struct {
name string
measurement string
fields map[string]interface{}
tags map[string]string
expectedNil bool
expectedMeasurement string
expectedFields map[string]interface{}
expectedTags map[string]string
}{
{
name: "Measurement cannot have trailing slash",
measurement: `cpu\`,
fields: map[string]interface{}{
"value": int64(42),
},
tags: map[string]string{},
expectedNil: true,
},
{
name: "Field key with trailing slash dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
`bad\`: `xyzzy`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
},
expectedTags: map[string]string{},
},
{
name: "Field value with trailing slash okay",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
expectedTags: map[string]string{},
},
{
name: "Must have one field after dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"bad": math.NaN(),
},
tags: map[string]string{},
expectedNil: true,
},
{
name: "Tag key with trailing slash dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
},
tags: map[string]string{
`host\`: "localhost",
"a": "x",
},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
},
expectedTags: map[string]string{
"a": "x",
},
},
{
name: "Tag value with trailing slash dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
},
tags: map[string]string{
`host`: `localhost\`,
"a": "x",
},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
},
expectedTags: map[string]string{
"a": "x",
},
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
m := ri.MakeMetric(
tc.measurement,
tc.fields,
tc.tags,
telegraf.Untyped,
now)
if tc.expectedNil {
require.Nil(t, m)
} else {
require.NotNil(t, m)
require.Equal(t, tc.expectedMeasurement, m.Name())
require.Equal(t, tc.expectedFields, m.Fields())
require.Equal(t, tc.expectedTags, m.Tags())
}
})
}
}
type testInput struct{}
func (t *testInput) Description() string { return "" }
func (t *testInput) SampleConfig() string { return "" }
func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil }

View File

@@ -1,4 +1,4 @@
package internal_models
package models
import (
"log"
@@ -6,29 +6,37 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/buffer"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/selfstat"
)
const (
// Default number of metrics kept between flushes.
DEFAULT_METRIC_BUFFER_LIMIT = 1000
// Default size of metrics batch size.
DEFAULT_METRIC_BATCH_SIZE = 1000
// Limit how many full metric buffers are kept due to failed writes.
FULL_METRIC_BUFFERS_LIMIT = 100
// Default number of metrics kept. It should be a multiple of batch size.
DEFAULT_METRIC_BUFFER_LIMIT = 10000
)
// RunningOutput contains the output configuration
type RunningOutput struct {
Name string
Output telegraf.Output
Config *OutputConfig
Quiet bool
MetricBufferLimit int
FlushBufferWhenFull bool
Name string
Output telegraf.Output
Config *OutputConfig
MetricBufferLimit int
MetricBatchSize int
metrics []telegraf.Metric
tmpmetrics map[int][]telegraf.Metric
overwriteI int
mapI int
MetricsFiltered selfstat.Stat
MetricsWritten selfstat.Stat
BufferSize selfstat.Stat
BufferLimit selfstat.Stat
WriteTime selfstat.Stat
metrics *buffer.Buffer
failMetrics *buffer.Buffer
// Guards against concurrent calls to the Output as described in #3009
sync.Mutex
}
@@ -36,99 +44,145 @@ func NewRunningOutput(
name string,
output telegraf.Output,
conf *OutputConfig,
batchSize int,
bufferLimit int,
) *RunningOutput {
if bufferLimit == 0 {
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
}
if batchSize == 0 {
batchSize = DEFAULT_METRIC_BATCH_SIZE
}
ro := &RunningOutput{
Name: name,
metrics: make([]telegraf.Metric, 0),
tmpmetrics: make(map[int][]telegraf.Metric),
metrics: buffer.NewBuffer(batchSize),
failMetrics: buffer.NewBuffer(bufferLimit),
Output: output,
Config: conf,
MetricBufferLimit: DEFAULT_METRIC_BUFFER_LIMIT,
MetricBufferLimit: bufferLimit,
MetricBatchSize: batchSize,
MetricsWritten: selfstat.Register(
"write",
"metrics_written",
map[string]string{"output": name},
),
MetricsFiltered: selfstat.Register(
"write",
"metrics_filtered",
map[string]string{"output": name},
),
BufferSize: selfstat.Register(
"write",
"buffer_size",
map[string]string{"output": name},
),
BufferLimit: selfstat.Register(
"write",
"buffer_limit",
map[string]string{"output": name},
),
WriteTime: selfstat.RegisterTiming(
"write",
"write_time_ns",
map[string]string{"output": name},
),
}
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
return ro
}
// AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
if ro.Config.Filter.IsActive {
if !ro.Config.Filter.ShouldMetricPass(metric) {
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
if m == nil {
return
}
// Filter any tagexclude/taginclude parameters before adding metric
if ro.Config.Filter.IsActive() {
// In order to filter out tags, we need to create a new metric, since
// metrics are immutable once created.
name := m.Name()
tags := m.Tags()
fields := m.Fields()
t := m.Time()
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
ro.MetricsFiltered.Incr(1)
return
}
// error is not possible if creating from another metric, so ignore.
m, _ = metric.New(name, tags, fields, t)
}
ro.Lock()
defer ro.Unlock()
if len(ro.metrics) < ro.MetricBufferLimit {
ro.metrics = append(ro.metrics, metric)
} else {
if ro.FlushBufferWhenFull {
ro.metrics = append(ro.metrics, metric)
tmpmetrics := make([]telegraf.Metric, len(ro.metrics))
copy(tmpmetrics, ro.metrics)
ro.metrics = make([]telegraf.Metric, 0)
err := ro.write(tmpmetrics)
if err != nil {
log.Printf("ERROR writing full metric buffer to output %s, %s",
ro.Name, err)
if len(ro.tmpmetrics) == FULL_METRIC_BUFFERS_LIMIT {
ro.mapI = 0
// overwrite one
ro.tmpmetrics[ro.mapI] = tmpmetrics
ro.mapI++
} else {
ro.tmpmetrics[ro.mapI] = tmpmetrics
ro.mapI++
}
}
} else {
if ro.overwriteI == 0 {
log.Printf("WARNING: overwriting cached metrics, you may want to " +
"increase the metric_buffer_limit setting in your [agent] " +
"config if you do not wish to overwrite metrics.\n")
}
if ro.overwriteI == len(ro.metrics) {
ro.overwriteI = 0
}
ro.metrics[ro.overwriteI] = metric
ro.overwriteI++
ro.metrics.Add(m)
if ro.metrics.Len() == ro.MetricBatchSize {
batch := ro.metrics.Batch(ro.MetricBatchSize)
err := ro.write(batch)
if err != nil {
ro.failMetrics.Add(batch...)
}
}
}
// Write writes all cached points to this output.
func (ro *RunningOutput) Write() error {
ro.Lock()
defer ro.Unlock()
err := ro.write(ro.metrics)
if err != nil {
return err
} else {
ro.metrics = make([]telegraf.Metric, 0)
ro.overwriteI = 0
}
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
ro.BufferSize.Set(int64(nFails + nMetrics))
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
var err error
if !ro.failMetrics.IsEmpty() {
// how many batches of failed writes we need to write.
nBatches := nFails/ro.MetricBatchSize + 1
batchSize := ro.MetricBatchSize
// Write any cached metric buffers that failed previously
for i, tmpmetrics := range ro.tmpmetrics {
if err := ro.write(tmpmetrics); err != nil {
return err
} else {
delete(ro.tmpmetrics, i)
for i := 0; i < nBatches; i++ {
// If it's the last batch, only grab the metrics that have not had
// a write attempt already (this is primarily to preserve order).
if i == nBatches-1 {
batchSize = nFails % ro.MetricBatchSize
}
batch := ro.failMetrics.Batch(batchSize)
// If we've already failed previous writes, don't bother trying to
// write to this output again. We are not exiting the loop just so
// that we can rotate the metrics to preserve order.
if err == nil {
err = ro.write(batch)
}
if err != nil {
ro.failMetrics.Add(batch...)
}
}
}
batch := ro.metrics.Batch(ro.MetricBatchSize)
// see comment above about not trying to write to an already failed output.
// if ro.failMetrics is empty then err will always be nil at this point.
if err == nil {
err = ro.write(batch)
}
if err != nil {
ro.failMetrics.Add(batch...)
return err
}
return nil
}
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
nMetrics := len(metrics)
if nMetrics == 0 {
return nil
}
ro.Lock()
defer ro.Unlock()
start := time.Now()
err := ro.Output.Write(metrics)
elapsed := time.Since(start)
if err == nil {
if !ro.Quiet {
log.Printf("Wrote %d metrics to output %s in %s\n",
len(metrics), ro.Name, elapsed)
}
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
ro.Name, nMetrics, elapsed)
ro.MetricsWritten.Incr(int64(nMetrics))
ro.WriteTime.Incr(elapsed.Nanoseconds())
}
return err
}

View File

@@ -1,8 +1,7 @@
package internal_models
package models
import (
"fmt"
"sort"
"sync"
"testing"
@@ -29,16 +28,106 @@ var next5 = []telegraf.Metric{
testutil.TestMetric(101, "metric10"),
}
// Test that we can write metrics with simple default setup.
func TestRunningOutputDefault(t *testing.T) {
// Benchmark adding metrics.
func BenchmarkRunningOutputAddWrite(b *testing.B) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &perfOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
for n := 0; n < b.N; n++ {
ro.AddMetric(testutil.TestMetric(101, "metric1"))
ro.Write()
}
}
// Benchmark adding metrics.
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &perfOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
for n := 0; n < b.N; n++ {
ro.AddMetric(testutil.TestMetric(101, "metric1"))
if n%100 == 0 {
ro.Write()
}
}
}
// Benchmark adding metrics.
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &perfOutput{}
m.failWrite = true
ro := NewRunningOutput("test", m, conf, 1000, 10000)
for n := 0; n < b.N; n++ {
ro.AddMetric(testutil.TestMetric(101, "metric1"))
}
}
func TestAddingNilMetric(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf)
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(nil)
ro.AddMetric(nil)
ro.AddMetric(nil)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 0)
}
// Test that NameDrop filters ger properly applied.
func TestRunningOutput_DropFilter(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
NameDrop: []string{"metric1", "metric2"},
},
}
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
for _, metric := range first5 {
ro.AddMetric(metric)
}
for _, metric := range next5 {
ro.AddMetric(metric)
}
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 8)
}
// Test that NameDrop filters without a match do nothing.
func TestRunningOutput_PassFilter(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
NameDrop: []string{"metric1000", "foo*"},
},
}
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
for _, metric := range first5 {
ro.AddMetric(metric)
@@ -53,50 +142,98 @@ func TestRunningOutputDefault(t *testing.T) {
assert.Len(t, m.Metrics(), 10)
}
// Test that the first metric gets overwritten if there is a buffer overflow.
func TestRunningOutputOverwrite(t *testing.T) {
// Test that tags are properly included
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
TagInclude: []string{"nothing*"},
},
}
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf)
ro.MetricBufferLimit = 4
ro := NewRunningOutput("test", m, conf, 1000, 10000)
for _, metric := range first5 {
ro.AddMetric(metric)
}
require.Len(t, m.Metrics(), 0)
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
require.NoError(t, err)
require.Len(t, m.Metrics(), 4)
var expected, actual []string
for i, exp := range first5[1:] {
expected = append(expected, exp.String())
actual = append(actual, m.Metrics()[i].String())
}
sort.Strings(expected)
sort.Strings(actual)
assert.Equal(t, expected, actual)
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 1)
assert.Empty(t, m.Metrics()[0].Tags())
}
// Test that multiple buffer overflows are handled properly.
func TestRunningOutputMultiOverwrite(t *testing.T) {
// Test that tags are properly excluded
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
TagExclude: []string{"tag*"},
},
}
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 1)
assert.Len(t, m.Metrics()[0].Tags(), 0)
}
// Test that tags are properly Excluded
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
TagExclude: []string{"nothing*"},
},
}
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 1)
assert.Len(t, m.Metrics()[0].Tags(), 1)
}
// Test that tags are properly included
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
TagInclude: []string{"tag*"},
},
}
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 1)
assert.Len(t, m.Metrics()[0].Tags(), 1)
}
// Test that we can write metrics with simple default setup.
func TestRunningOutputDefault(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf)
ro.MetricBufferLimit = 3
ro := NewRunningOutput("test", m, conf, 1000, 10000)
for _, metric := range first5 {
ro.AddMetric(metric)
@@ -104,39 +241,24 @@ func TestRunningOutputMultiOverwrite(t *testing.T) {
for _, metric := range next5 {
ro.AddMetric(metric)
}
require.Len(t, m.Metrics(), 0)
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
require.NoError(t, err)
require.Len(t, m.Metrics(), 3)
var expected, actual []string
for i, exp := range next5[2:] {
expected = append(expected, exp.String())
actual = append(actual, m.Metrics()[i].String())
}
sort.Strings(expected)
sort.Strings(actual)
assert.Equal(t, expected, actual)
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 10)
}
// Test that running output doesn't flush until it's full when
// FlushBufferWhenFull is set.
func TestRunningOutputFlushWhenFull(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf)
ro.FlushBufferWhenFull = true
ro.MetricBufferLimit = 5
ro := NewRunningOutput("test", m, conf, 6, 10)
// Fill buffer to limit
// Fill buffer to 1 under limit
for _, metric := range first5 {
ro.AddMetric(metric)
}
@@ -159,15 +281,11 @@ func TestRunningOutputFlushWhenFull(t *testing.T) {
// FlushBufferWhenFull is set, twice.
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf)
ro.FlushBufferWhenFull = true
ro.MetricBufferLimit = 4
ro := NewRunningOutput("test", m, conf, 4, 12)
// Fill buffer past limit twive
for _, metric := range first5 {
@@ -177,23 +295,19 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
ro.AddMetric(metric)
}
// flushed twice
assert.Len(t, m.Metrics(), 10)
assert.Len(t, m.Metrics(), 8)
}
func TestRunningOutputWriteFail(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
m.failWrite = true
ro := NewRunningOutput("test", m, conf)
ro.FlushBufferWhenFull = true
ro.MetricBufferLimit = 4
ro := NewRunningOutput("test", m, conf, 4, 12)
// Fill buffer past limit twice
// Fill buffer to limit twice
for _, metric := range first5 {
ro.AddMetric(metric)
}
@@ -216,6 +330,155 @@ func TestRunningOutputWriteFail(t *testing.T) {
assert.Len(t, m.Metrics(), 10)
}
// Verify that the order of points is preserved during a write failure.
func TestRunningOutputWriteFailOrder(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
m.failWrite = true
ro := NewRunningOutput("test", m, conf, 100, 1000)
// add 5 metrics
for _, metric := range first5 {
ro.AddMetric(metric)
}
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
// Write fails
err := ro.Write()
require.Error(t, err)
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
m.failWrite = false
// add 5 more metrics
for _, metric := range next5 {
ro.AddMetric(metric)
}
err = ro.Write()
require.NoError(t, err)
// Verify that 10 metrics were written
assert.Len(t, m.Metrics(), 10)
// Verify that they are in order
expected := append(first5, next5...)
assert.Equal(t, expected, m.Metrics())
}
// Verify that the order of points is preserved during many write failures.
func TestRunningOutputWriteFailOrder2(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
m.failWrite = true
ro := NewRunningOutput("test", m, conf, 5, 100)
// add 5 metrics
for _, metric := range first5 {
ro.AddMetric(metric)
}
// Write fails
err := ro.Write()
require.Error(t, err)
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
// add 5 metrics
for _, metric := range next5 {
ro.AddMetric(metric)
}
// Write fails
err = ro.Write()
require.Error(t, err)
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
// add 5 metrics
for _, metric := range first5 {
ro.AddMetric(metric)
}
// Write fails
err = ro.Write()
require.Error(t, err)
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
// add 5 metrics
for _, metric := range next5 {
ro.AddMetric(metric)
}
// Write fails
err = ro.Write()
require.Error(t, err)
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
m.failWrite = false
err = ro.Write()
require.NoError(t, err)
// Verify that 10 metrics were written
assert.Len(t, m.Metrics(), 20)
// Verify that they are in order
expected := append(first5, next5...)
expected = append(expected, first5...)
expected = append(expected, next5...)
assert.Equal(t, expected, m.Metrics())
}
// Verify that the order of points is preserved when there is a remainder
// of points for the batch.
//
// ie, with a batch size of 5:
//
// 1 2 3 4 5 6 <-- order, failed points
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
//
func TestRunningOutputWriteFailOrder3(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
m.failWrite = true
ro := NewRunningOutput("test", m, conf, 5, 1000)
// add 5 metrics
for _, metric := range first5 {
ro.AddMetric(metric)
}
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
// Write fails
err := ro.Write()
require.Error(t, err)
// no successful flush yet
assert.Len(t, m.Metrics(), 0)
// add and attempt to write a single metric:
ro.AddMetric(next5[0])
err = ro.Write()
require.Error(t, err)
// unset fail and write metrics
m.failWrite = false
err = ro.Write()
require.NoError(t, err)
// Verify that 6 metrics were written
assert.Len(t, m.Metrics(), 6)
// Verify that they are in order
expected := append(first5, next5[0])
assert.Equal(t, expected, m.Metrics())
}
type mockOutput struct {
sync.Mutex
@@ -263,3 +526,31 @@ func (m *mockOutput) Metrics() []telegraf.Metric {
defer m.Unlock()
return m.metrics
}
type perfOutput struct {
// if true, mock a write failure
failWrite bool
}
func (m *perfOutput) Connect() error {
return nil
}
func (m *perfOutput) Close() error {
return nil
}
func (m *perfOutput) Description() string {
return ""
}
func (m *perfOutput) SampleConfig() string {
return ""
}
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
if m.failWrite {
return fmt.Errorf("Failed Write!")
}
return nil
}

View File

@@ -0,0 +1,51 @@
package models
import (
"sync"
"github.com/influxdata/telegraf"
)
type RunningProcessor struct {
Name string
sync.Mutex
Processor telegraf.Processor
Config *ProcessorConfig
}
type RunningProcessors []*RunningProcessor
func (rp RunningProcessors) Len() int { return len(rp) }
func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
// FilterConfig containing a name and filter
type ProcessorConfig struct {
Name string
Order int64
Filter Filter
}
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
rp.Lock()
defer rp.Unlock()
ret := []telegraf.Metric{}
for _, metric := range in {
if rp.Config.Filter.IsActive() {
// check if the filter should be applied to this metric
if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok {
// this means filter should not be applied
ret = append(ret, metric)
continue
}
}
// This metric should pass through the filter, so call the filter Apply
// function and append results to the output slice.
ret = append(ret, rp.Processor.Apply(metric)...)
}
return ret
}

View File

@@ -0,0 +1,117 @@
package models
import (
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
type TestProcessor struct {
}
func (f *TestProcessor) SampleConfig() string { return "" }
func (f *TestProcessor) Description() string { return "" }
// Apply renames:
// "foo" to "fuz"
// "bar" to "baz"
// And it also drops measurements named "dropme"
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
out := make([]telegraf.Metric, 0)
for _, m := range in {
switch m.Name() {
case "foo":
out = append(out, testutil.TestMetric(1, "fuz"))
case "bar":
out = append(out, testutil.TestMetric(1, "baz"))
case "dropme":
// drop the metric!
default:
out = append(out, m)
}
}
return out
}
func NewTestRunningProcessor() *RunningProcessor {
out := &RunningProcessor{
Name: "test",
Processor: &TestProcessor{},
Config: &ProcessorConfig{Filter: Filter{}},
}
return out
}
func TestRunningProcessor(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
}
expectedNames := []string{
"fuz",
"baz",
"baz",
}
rfp := NewTestRunningProcessor()
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
filteredMetrics[2].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}
func TestRunningProcessor_WithNameDrop(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
}
expectedNames := []string{
"foo",
"baz",
"baz",
}
rfp := NewTestRunningProcessor()
rfp.Config.Filter.NameDrop = []string{"foo"}
assert.NoError(t, rfp.Config.Filter.Compile())
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
filteredMetrics[2].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}
func TestRunningProcessor_DroppedMetric(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "dropme"),
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
}
expectedNames := []string{
"fuz",
"baz",
}
rfp := NewTestRunningProcessor()
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}

69
logger/logger.go Normal file
View File

@@ -0,0 +1,69 @@
package logger
import (
"io"
"log"
"os"
"regexp"
"time"
"github.com/influxdata/wlog"
)
var prefixRegex = regexp.MustCompile("^[DIWE]!")
// newTelegrafWriter returns a logging-wrapped writer.
func newTelegrafWriter(w io.Writer) io.Writer {
return &telegrafLog{
writer: wlog.NewWriter(w),
}
}
type telegrafLog struct {
writer io.Writer
}
func (t *telegrafLog) Write(b []byte) (n int, err error) {
var line []byte
if !prefixRegex.Match(b) {
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
} else {
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
}
return t.writer.Write(line)
}
// SetupLogging configures the logging output.
// debug will set the log level to DEBUG
// quiet will set the log level to ERROR
// logfile will direct the logging output to a file. Empty string is
// interpreted as stderr. If there is an error opening the file the
// logger will fallback to stderr.
func SetupLogging(debug, quiet bool, logfile string) {
log.SetFlags(0)
if debug {
wlog.SetLevel(wlog.DEBUG)
}
if quiet {
wlog.SetLevel(wlog.ERROR)
}
var oFile *os.File
if logfile != "" {
if _, err := os.Stat(logfile); os.IsNotExist(err) {
if oFile, err = os.Create(logfile); err != nil {
log.Printf("E! Unable to create %s (%s), using stderr", logfile, err)
oFile = os.Stderr
}
} else {
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err)
oFile = os.Stderr
}
}
} else {
oFile = os.Stderr
}
log.SetOutput(newTelegrafWriter(oFile))
}

75
logger/logger_test.go Normal file
View File

@@ -0,0 +1,75 @@
package logger
import (
"bytes"
"io/ioutil"
"log"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(false, false, tmpfile.Name())
log.Printf("I! TEST")
log.Printf("D! TEST") // <- should be ignored
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
}
func TestDebugWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(true, false, tmpfile.Name())
log.Printf("D! TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z D! TEST\n"))
}
func TestErrorWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(false, true, tmpfile.Name())
log.Printf("E! TEST")
log.Printf("I! TEST") // <- should be ignored
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
}
func TestAddDefaultLogLevel(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(true, false, tmpfile.Name())
log.Printf("TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
}
func BenchmarkTelegrafLogWrite(b *testing.B) {
var msg = []byte("test")
var buf bytes.Buffer
w := newTelegrafWriter(&buf)
for i := 0; i < b.N; i++ {
buf.Reset()
w.Write(msg)
}
}

134
metric.go
View File

@@ -2,93 +2,63 @@ package telegraf
import (
"time"
)
"github.com/influxdata/influxdb/client/v2"
// ValueType is an enumeration of metric types that represent a simple value.
type ValueType int
// Possible values for the ValueType enum.
const (
_ ValueType = iota
Counter
Gauge
Untyped
Summary
Histogram
)
type Metric interface {
// Name returns the measurement name of the metric
Name() string
// Name returns the tags associated with the metric
Tags() map[string]string
// Time return the timestamp for the metric
Time() time.Time
// UnixNano returns the unix nano time of the metric
UnixNano() int64
// Fields returns the fields for the metric
Fields() map[string]interface{}
// String returns a line-protocol string of the metric
// Serialize serializes the metric into a line-protocol byte buffer,
// including a newline at the end.
Serialize() []byte
// same as Serialize, but avoids an allocation.
// returns number of bytes copied into dst.
SerializeTo(dst []byte) int
// String is the same as Serialize, but returns a string.
String() string
// Copy deep-copies the metric.
Copy() Metric
// Split will attempt to return multiple metrics with the same timestamp
// whose string representations are no longer than maxSize.
// Metrics with a single field may exceed the requested size.
Split(maxSize int) []Metric
// PrecisionString returns a line-protocol string of the metric, at precision
PrecisionString(precison string) string
// Tag functions
HasTag(key string) bool
AddTag(key, value string)
RemoveTag(key string)
// Point returns a influxdb client.Point object
Point() *client.Point
}
// metric is a wrapper of the influxdb client.Point struct
type metric struct {
pt *client.Point
}
// NewMetric returns a metric with the given timestamp. If a timestamp is not
// given, then data is sent to the database without a timestamp, in which case
// the server will assign local time upon reception. NOTE: it is recommended to
// send data with a timestamp.
func NewMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t ...time.Time,
) (Metric, error) {
var T time.Time
if len(t) > 0 {
T = t[0]
}
pt, err := client.NewPoint(name, tags, fields, T)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
}, nil
}
func (m *metric) Name() string {
return m.pt.Name()
}
func (m *metric) Tags() map[string]string {
return m.pt.Tags()
}
func (m *metric) Time() time.Time {
return m.pt.Time()
}
func (m *metric) UnixNano() int64 {
return m.pt.UnixNano()
}
func (m *metric) Fields() map[string]interface{} {
return m.pt.Fields()
}
func (m *metric) String() string {
return m.pt.String()
}
func (m *metric) PrecisionString(precison string) string {
return m.pt.PrecisionString(precison)
}
func (m *metric) Point() *client.Point {
return m.pt
// Field functions
HasField(key string) bool
AddField(key string, value interface{})
RemoveField(key string) error
// Name functions
SetName(name string)
SetPrefix(prefix string)
SetSuffix(suffix string)
// Getting data structure functions
Name() string
Tags() map[string]string
Fields() map[string]interface{}
Time() time.Time
UnixNano() int64
Type() ValueType
Len() int // returns the length of the serialized metric, including newline
HashID() uint64
// aggregator things:
SetAggregate(bool)
IsAggregate() bool
}

55
metric/escape.go Normal file
View File

@@ -0,0 +1,55 @@
package metric
import (
"strings"
)
var (
// escaper is for escaping:
// - tag keys
// - tag values
// - field keys
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
// nameEscaper is for escaping measurement names only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
// stringFieldEscaper is for escaping string field values only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
stringFieldUnEscaper = strings.NewReplacer(
`\"`, `"`,
`\\`, `\`,
)
)
func escape(s string, t string) string {
switch t {
case "fieldkey", "tagkey", "tagval":
return escaper.Replace(s)
case "name":
return nameEscaper.Replace(s)
case "fieldval":
return stringFieldEscaper.Replace(s)
}
return s
}
func unescape(s string, t string) string {
switch t {
case "fieldkey", "tagkey", "tagval":
return unEscaper.Replace(s)
case "name":
return nameUnEscaper.Replace(s)
case "fieldval":
return stringFieldUnEscaper.Replace(s)
}
return s
}

View File

@@ -0,0 +1,38 @@
package metric
import (
"reflect"
"strconv"
"unsafe"
)
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseInt(s, base, bitSize)
}
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
s := unsafeBytesToString(b)
return strconv.ParseFloat(s, bitSize)
}
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
func parseBoolBytes(b []byte) (bool, error) {
return strconv.ParseBool(unsafeBytesToString(b))
}
// unsafeBytesToString converts a []byte to a string without a heap allocation.
//
// It is unsafe, and is intended to prepare input to short-lived functions
// that require strings.
func unsafeBytesToString(in []byte) string {
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
dst := reflect.StringHeader{
Data: src.Data,
Len: src.Len,
}
s := *(*string)(unsafe.Pointer(&dst))
return s
}

View File

@@ -0,0 +1,103 @@
package metric
import (
"strconv"
"testing"
"testing/quick"
)
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
f := func(b []byte, base int, bitSize int) bool {
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
got, gotErr := parseIntBytes(b, base, bitSize)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
buf := []byte{}
f := func(n int64) bool {
buf = strconv.AppendInt(buf[:0], n, 10)
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
got, gotErr := parseIntBytes(buf, 10, 64)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
f := func(b []byte, bitSize int) bool {
exp, expErr := strconv.ParseFloat(string(b), bitSize)
got, gotErr := parseFloatBytes(b, bitSize)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
buf := []byte{}
f := func(n float64) bool {
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
exp, expErr := strconv.ParseFloat(string(buf), 64)
got, gotErr := parseFloatBytes(buf, 64)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseBoolBytesEquivalence(t *testing.T) {
var buf []byte
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
buf = append(buf[:0], s...)
exp, expErr := strconv.ParseBool(s)
got, gotErr := parseBoolBytes(buf)
if got != exp || !checkErrs(expErr, gotErr) {
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
}
}
}
func checkErrs(a, b error) bool {
if (a == nil) != (b == nil) {
return false
}
return a == nil || a.Error() == b.Error()
}

623
metric/metric.go Normal file
View File

@@ -0,0 +1,623 @@
package metric
import (
"bytes"
"fmt"
"hash/fnv"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
)
const MaxInt = int(^uint(0) >> 1)
func New(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
mType ...telegraf.ValueType,
) (telegraf.Metric, error) {
if len(name) == 0 {
return nil, fmt.Errorf("missing measurement name")
}
if len(fields) == 0 {
return nil, fmt.Errorf("%s: must have one or more fields", name)
}
if strings.HasSuffix(name, `\`) {
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
}
var thisType telegraf.ValueType
if len(mType) > 0 {
thisType = mType[0]
} else {
thisType = telegraf.Untyped
}
m := &metric{
name: []byte(escape(name, "name")),
t: []byte(fmt.Sprint(t.UnixNano())),
nsec: t.UnixNano(),
mType: thisType,
}
// pre-allocate exact size of the tags slice
taglen := 0
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
}
if strings.HasSuffix(v, `\`) {
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
}
if len(k) == 0 || len(v) == 0 {
continue
}
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
}
m.tags = make([]byte, taglen)
i := 0
for k, v := range tags {
if len(k) == 0 || len(v) == 0 {
continue
}
m.tags[i] = ','
i++
i += copy(m.tags[i:], escape(k, "tagkey"))
m.tags[i] = '='
i++
i += copy(m.tags[i:], escape(v, "tagval"))
}
// pre-allocate capacity of the fields slice
fieldlen := 0
for k, _ := range fields {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
}
// 10 bytes is completely arbitrary, but will at least prevent some
// amount of allocations. There's a small possibility this will create
// slightly more allocations for a metric that has many short fields.
fieldlen += len(k) + 10
}
m.fields = make([]byte, 0, fieldlen)
i = 0
for k, v := range fields {
if i != 0 {
m.fields = append(m.fields, ',')
}
m.fields = appendField(m.fields, k, v)
i++
}
return m, nil
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
// not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if buf[keyi-1] != '\\' {
break
} else {
keyi++
}
}
return keyi
}
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
// to b in buf that is not escaped. Allows for the escape char `\` to be
// escaped. Returns -1 if not found.
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if countBackslashes(buf, keyi-1)%2 == 0 {
break
} else {
keyi++
}
}
return keyi
}
// countBackslashes counts the number of preceding backslashes starting at
// the 'start' index.
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
} else {
break
}
}
return count
}
type metric struct {
name []byte
tags []byte
fields []byte
t []byte
mType telegraf.ValueType
aggregate bool
// cached values for reuse in "get" functions
hashID uint64
nsec int64
}
func (m *metric) String() string {
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
}
func (m *metric) SetAggregate(b bool) {
m.aggregate = b
}
func (m *metric) IsAggregate() bool {
return m.aggregate
}
func (m *metric) Type() telegraf.ValueType {
return m.mType
}
func (m *metric) Len() int {
// 3 is for 2 spaces surrounding the fields array + newline at the end.
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
}
func (m *metric) Serialize() []byte {
tmp := make([]byte, m.Len())
i := 0
i += copy(tmp[i:], m.name)
i += copy(tmp[i:], m.tags)
tmp[i] = ' '
i++
i += copy(tmp[i:], m.fields)
tmp[i] = ' '
i++
i += copy(tmp[i:], m.t)
tmp[i] = '\n'
return tmp
}
func (m *metric) SerializeTo(dst []byte) int {
i := 0
if i >= len(dst) {
return i
}
i += copy(dst[i:], m.name)
if i >= len(dst) {
return i
}
i += copy(dst[i:], m.tags)
if i >= len(dst) {
return i
}
dst[i] = ' '
i++
if i >= len(dst) {
return i
}
i += copy(dst[i:], m.fields)
if i >= len(dst) {
return i
}
dst[i] = ' '
i++
if i >= len(dst) {
return i
}
i += copy(dst[i:], m.t)
if i >= len(dst) {
return i
}
dst[i] = '\n'
return i + 1
}
func (m *metric) Split(maxSize int) []telegraf.Metric {
if m.Len() <= maxSize {
return []telegraf.Metric{m}
}
var out []telegraf.Metric
// constant number of bytes for each metric (in addition to field bytes)
constant := len(m.name) + len(m.tags) + len(m.t) + 3
// currently selected fields
fields := make([]byte, 0, maxSize)
i := 0
for {
if i >= len(m.fields) {
// hit the end of the field byte slice
if len(fields) > 0 {
out = append(out, copyWith(m.name, m.tags, fields, m.t))
}
break
}
// find the end of the next field
j := indexUnescapedByte(m.fields[i:], ',')
if j == -1 {
j = len(m.fields)
} else {
j += i
}
// if true, then we need to create a metric _not_ including the currently
// selected field
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
// if false, then we'll create a metric including the currently
// selected field anyways. This means that the given maxSize is too
// small for a single field to fit.
if len(fields) > 0 {
out = append(out, copyWith(m.name, m.tags, fields, m.t))
}
fields = make([]byte, 0, maxSize)
}
if len(fields) > 0 {
fields = append(fields, ',')
}
fields = append(fields, m.fields[i:j]...)
i = j + 1
}
return out
}
func (m *metric) Fields() map[string]interface{} {
fieldMap := map[string]interface{}{}
i := 0
for {
if i >= len(m.fields) {
break
}
// end index of field key
i1 := indexUnescapedByte(m.fields[i:], '=')
if i1 == -1 {
break
}
// start index of field value
i2 := i1 + 1
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}
i3 += i2 + 2 // increment index to the comma
} else {
i3 = indexUnescapedByte(m.fields[i:], ',')
if i3 == -1 {
i3 = len(m.fields[i:])
}
}
switch m.fields[i:][i2] {
case '"':
// string field
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
// number field
switch m.fields[i:][i3-1] {
case 'i':
// integer field
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
if err == nil {
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
} else {
// TODO handle error or just ignore field silently?
}
default:
// float field
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
if err == nil {
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
} else {
// TODO handle error or just ignore field silently?
}
}
case 'T', 't':
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
case 'F', 'f':
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
default:
// TODO handle unsupported field type
}
i += i3 + 1
}
return fieldMap
}
func (m *metric) Tags() map[string]string {
tagMap := map[string]string{}
if len(m.tags) == 0 {
return tagMap
}
i := 0
for {
// start index of tag key
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
if i0 == 0 {
// didn't find a tag start
break
}
// end index of tag key
i1 := indexUnescapedByte(m.tags[i:], '=')
// start index of tag value
i2 := i1 + 1
// end index of tag value (starting from i2)
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
if i3 == -1 {
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
break
}
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
// increment start index for the next tag
i += i2 + i3
}
return tagMap
}
func (m *metric) Name() string {
return unescape(string(m.name), "name")
}
func (m *metric) Time() time.Time {
// assume metric has been verified already and ignore error:
if m.nsec == 0 {
m.nsec, _ = parseIntBytes(m.t, 10, 64)
}
return time.Unix(0, m.nsec)
}
func (m *metric) UnixNano() int64 {
// assume metric has been verified already and ignore error:
if m.nsec == 0 {
m.nsec, _ = parseIntBytes(m.t, 10, 64)
}
return m.nsec
}
func (m *metric) SetName(name string) {
m.hashID = 0
m.name = []byte(nameEscaper.Replace(name))
}
func (m *metric) SetPrefix(prefix string) {
m.hashID = 0
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
}
func (m *metric) SetSuffix(suffix string) {
m.hashID = 0
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
}
func (m *metric) AddTag(key, value string) {
m.RemoveTag(key)
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
}
func (m *metric) HasTag(key string) bool {
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return false
}
return true
}
func (m *metric) RemoveTag(key string) {
m.hashID = 0
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return
}
tmp := m.tags[0 : i-1]
j := indexUnescapedByte(m.tags[i:], ',')
if j != -1 {
tmp = append(tmp, m.tags[i+j:]...)
}
m.tags = tmp
return
}
func (m *metric) AddField(key string, value interface{}) {
m.fields = append(m.fields, ',')
m.fields = appendField(m.fields, key, value)
}
func (m *metric) HasField(key string) bool {
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return false
}
return true
}
func (m *metric) RemoveField(key string) error {
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return nil
}
var tmp []byte
if i != 0 {
tmp = m.fields[0 : i-1]
}
j := indexUnescapedByte(m.fields[i:], ',')
if j != -1 {
tmp = append(tmp, m.fields[i+j:]...)
}
if len(tmp) == 0 {
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
}
m.fields = tmp
return nil
}
func (m *metric) Copy() telegraf.Metric {
return copyWith(m.name, m.tags, m.fields, m.t)
}
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
out := metric{
name: make([]byte, len(name)),
tags: make([]byte, len(tags)),
fields: make([]byte, len(fields)),
t: make([]byte, len(t)),
}
copy(out.name, name)
copy(out.tags, tags)
copy(out.fields, fields)
copy(out.t, t)
return &out
}
func (m *metric) HashID() uint64 {
if m.hashID == 0 {
h := fnv.New64a()
h.Write(m.name)
tags := m.Tags()
tmp := make([]string, len(tags))
i := 0
for k, v := range tags {
tmp[i] = k + v
i++
}
sort.Strings(tmp)
for _, s := range tmp {
h.Write([]byte(s))
}
m.hashID = h.Sum64()
}
return m.hashID
}
func appendField(b []byte, k string, v interface{}) []byte {
if v == nil {
return b
}
b = append(b, []byte(escape(k, "tagkey")+"=")...)
// check popular types first
switch v := v.(type) {
case float64:
b = strconv.AppendFloat(b, v, 'f', -1, 64)
case int64:
b = strconv.AppendInt(b, v, 10)
b = append(b, 'i')
case string:
b = append(b, '"')
b = append(b, []byte(escape(v, "fieldval"))...)
b = append(b, '"')
case bool:
b = strconv.AppendBool(b, v)
case int32:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int16:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int8:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint64:
// Cap uints above the maximum int value
var intv int64
if v <= uint64(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case uint32:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint16:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint8:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint:
// Cap uints above the maximum int value
var intv int64
if v <= uint(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case float32:
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
case []byte:
b = append(b, v...)
default:
// Can't determine the type, so convert to string
b = append(b, '"')
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
b = append(b, '"')
}
return b
}

View File

@@ -0,0 +1,148 @@
package metric
import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf"
)
// vars for making sure that the compiler doesnt optimize out the benchmarks:
var (
s string
I interface{}
tags map[string]string
fields map[string]interface{}
)
func BenchmarkNewMetric(b *testing.B) {
var mt telegraf.Metric
for n := 0; n < b.N; n++ {
mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
}
s = string(mt.String())
}
func BenchmarkAddTag(b *testing.B) {
var mt telegraf.Metric
mt = &metric{
name: []byte("cpu"),
tags: []byte(",host=localhost"),
fields: []byte("a=101"),
t: []byte("1480614053000000000"),
}
for n := 0; n < b.N; n++ {
mt.AddTag("foo", "bar")
}
s = string(mt.String())
}
func BenchmarkSplit(b *testing.B) {
var mt telegraf.Metric
mt = &metric{
name: []byte("cpu"),
tags: []byte(",host=localhost"),
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
t: []byte("1480614053000000000"),
}
var metrics []telegraf.Metric
for n := 0; n < b.N; n++ {
metrics = mt.Split(60)
}
s = string(metrics[0].String())
}
func BenchmarkTags(b *testing.B) {
for n := 0; n < b.N; n++ {
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
tags = mt.Tags()
}
s = fmt.Sprint(tags)
}
func BenchmarkFields(b *testing.B) {
for n := 0; n < b.N; n++ {
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
fields = mt.Fields()
}
s = fmt.Sprint(fields)
}
func BenchmarkString(b *testing.B) {
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
var S string
for n := 0; n < b.N; n++ {
S = mt.String()
}
s = S
}
func BenchmarkSerialize(b *testing.B) {
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
var B []byte
for n := 0; n < b.N; n++ {
B = mt.Serialize()
}
s = string(B)
}

737
metric/metric_test.go Normal file
View File

@@ -0,0 +1,737 @@
package metric
import (
"fmt"
"math"
"regexp"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, telegraf.Untyped, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewErrors(t *testing.T) {
// creating a metric with an empty name produces an error:
m, err := New(
"",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.Error(t, err)
assert.Nil(t, m)
// creating a metric with empty fields produces an error:
m, err = New(
"foobar",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{},
time.Now(),
)
assert.Error(t, err)
assert.Nil(t, m)
}
func TestNewMetric_Tags(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.True(t, m.HasTag("host"))
assert.True(t, m.HasTag("datacenter"))
m.AddTag("newtag", "foo")
assert.True(t, m.HasTag("newtag"))
m.RemoveTag("host")
assert.False(t, m.HasTag("host"))
assert.True(t, m.HasTag("newtag"))
assert.True(t, m.HasTag("datacenter"))
m.RemoveTag("datacenter")
assert.False(t, m.HasTag("datacenter"))
assert.True(t, m.HasTag("newtag"))
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
m.RemoveTag("newtag")
assert.False(t, m.HasTag("newtag"))
assert.Equal(t, map[string]string{}, m.Tags())
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
}
func TestSerialize(t *testing.T) {
now := time.Now()
tags := map[string]string{
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t,
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
m.Serialize())
m.RemoveTag("datacenter")
assert.Equal(t,
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
m.Serialize())
}
func TestHashID(t *testing.T) {
m, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
hash := m.HashID()
// adding a field doesn't change the hash:
m.AddField("foo", int64(100))
assert.Equal(t, hash, m.HashID())
// removing a non-existent tag doesn't change the hash:
m.RemoveTag("no-op")
assert.Equal(t, hash, m.HashID())
// adding a tag does change it:
m.AddTag("foo", "bar")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
// removing a tag also changes it:
m.RemoveTag("mytag")
assert.NotEqual(t, hash, m.HashID())
}
func TestHashID_Consistency(t *testing.T) {
m, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
hash := m.HashID()
for i := 0; i < 1000; i++ {
m2, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.Equal(t, hash, m2.HashID())
}
}
func TestNewMetric_NameModifiers(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
hash := m.HashID()
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
assert.Equal(t, "cpu"+suffix, m.String())
m.SetPrefix("pre_")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
assert.Equal(t, "pre_cpu"+suffix, m.String())
m.SetSuffix("_post")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
m.SetName("mem")
assert.NotEqual(t, hash, m.HashID())
assert.Equal(t, "mem"+suffix, m.String())
}
func TestNewMetric_FieldModifiers(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.True(t, m.HasField("value"))
assert.False(t, m.HasField("foo"))
m.AddField("newfield", "foo")
assert.True(t, m.HasField("newfield"))
assert.NoError(t, m.RemoveField("newfield"))
assert.False(t, m.HasField("newfield"))
// don't allow user to remove all fields:
assert.Error(t, m.RemoveField("value"))
m.AddField("value2", int64(101))
assert.NoError(t, m.RemoveField("value"))
assert.False(t, m.HasField("value"))
}
func TestNewMetric_Fields(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
"quote_string": `x"y`,
"backslash_quote_string": `x\"y`,
"backslash": `x\y`,
"ends_with_backslash": `x\`,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, fields, m.Fields())
}
func TestNewMetric_Time(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
m = m.Copy()
m2 := m.Copy()
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m2.UnixNano())
}
func TestNewMetric_Copy(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"float": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
m2 := m.Copy()
assert.Equal(t,
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
m.String())
m.AddTag("host", "localhost")
assert.Equal(t,
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
m.String())
assert.Equal(t,
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
m2.String())
}
func TestNewMetric_AllTypes(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"float64": float64(1),
"float32": float32(1),
"int64": int64(1),
"int32": int32(1),
"int16": int16(1),
"int8": int8(1),
"int": int(1),
"uint64": uint64(1),
"uint32": uint32(1),
"uint16": uint16(1),
"uint8": uint8(1),
"uint": uint(1),
"bytes": []byte("foo"),
"nil": nil,
"maxuint64": uint64(MaxInt) + 10,
"maxuint": uint(MaxInt) + 10,
"unsupported": []int{1, 2},
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Contains(t, m.String(), "float64=1")
assert.Contains(t, m.String(), "float32=1")
assert.Contains(t, m.String(), "int64=1i")
assert.Contains(t, m.String(), "int32=1i")
assert.Contains(t, m.String(), "int16=1i")
assert.Contains(t, m.String(), "int8=1i")
assert.Contains(t, m.String(), "int=1i")
assert.Contains(t, m.String(), "uint64=1i")
assert.Contains(t, m.String(), "uint32=1i")
assert.Contains(t, m.String(), "uint16=1i")
assert.Contains(t, m.String(), "uint8=1i")
assert.Contains(t, m.String(), "uint=1i")
assert.NotContains(t, m.String(), "nil")
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
}
func TestIndexUnescapedByte(t *testing.T) {
tests := []struct {
in []byte
b byte
expected int
}{
{
in: []byte(`foobar`),
b: 'b',
expected: 3,
},
{
in: []byte(`foo\bar`),
b: 'b',
expected: -1,
},
{
in: []byte(`foo\\bar`),
b: 'b',
expected: -1,
},
{
in: []byte(`foobar`),
b: 'f',
expected: 0,
},
{
in: []byte(`foobar`),
b: 'r',
expected: 5,
},
{
in: []byte(`\foobar`),
b: 'f',
expected: -1,
},
}
for _, test := range tests {
got := indexUnescapedByte(test.in, test.b)
assert.Equal(t, test.expected, got)
}
}
func TestNewGaugeMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
assert.NoError(t, err)
assert.Equal(t, telegraf.Gauge, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewCounterMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now, telegraf.Counter)
assert.NoError(t, err)
assert.Equal(t, telegraf.Counter, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
// test splitting metric into various max lengths
func TestSplitMetric(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split80 := m.Split(80)
assert.Len(t, split80, 2)
split70 := m.Split(70)
assert.Len(t, split70, 3)
split60 := m.Split(60)
assert.Len(t, split60, 5)
}
// test splitting metric into various max lengths
// use a simple regex check to verify that the split metrics are valid
func TestSplitMetric_RegexVerify(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"foo": float64(98934259085),
"bar": float64(19385292),
"number": float64(19385292),
"another": float64(19385292),
"n": float64(19385292),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
// verification regex
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
split90 := m.Split(90)
assert.Len(t, split90, 2)
for _, splitM := range split90 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
split70 := m.Split(70)
assert.Len(t, split70, 3)
for _, splitM := range split70 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
split20 := m.Split(20)
assert.Len(t, split20, 5)
for _, splitM := range split20 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
}
// test splitting metric even when given length is shorter than
// shortest possible length
// Split should split metric as short as possible, ie, 1 field per metric
func TestSplitMetric_TooShort(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split := m.Split(10)
assert.Len(t, split, 5)
strings := make([]string, 5)
for i, splitM := range split {
strings[i] = splitM.String()
}
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
}
func TestSplitMetric_NoOp(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split := m.Split(1000)
assert.Len(t, split, 1)
assert.Equal(t, m, split[0])
}
func TestSplitMetric_OneField(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
split := m.Split(1000)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
split = m.Split(1)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
split = m.Split(40)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
}
func TestSplitMetric_ExactSize(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
actual := m.Split(m.Len())
// check that no copy was made
require.Equal(t, &m, &actual[0])
}
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
actual := m.Split(m.Len() - 1)
require.Equal(t, 2, len(actual))
}
func TestNewMetricAggregate(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.False(t, m.IsAggregate())
m.SetAggregate(true)
assert.True(t, m.IsAggregate())
}
func TestNewMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
now.UnixNano())
assert.Equal(t, lineProto, m.String())
}
func TestNewMetricFailNaN(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": math.NaN(),
}
_, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
}
func TestEmptyTagValueOrKey(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"emptytag": "",
"": "valuewithoutkey",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.True(t, m.HasTag("host"))
assert.False(t, m.HasTag("emptytag"))
assert.Equal(t,
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
m.String())
assert.NoError(t, err)
}
func TestNewMetric_TrailingSlash(t *testing.T) {
now := time.Now()
tests := []struct {
name string
tags map[string]string
fields map[string]interface{}
}{
{
name: `cpu\`,
fields: map[string]interface{}{
"value": int64(42),
},
},
{
name: "cpu",
fields: map[string]interface{}{
`value\`: "x",
},
},
{
name: "cpu",
tags: map[string]string{
`host\`: "localhost",
},
fields: map[string]interface{}{
"value": int64(42),
},
},
{
name: "cpu",
tags: map[string]string{
"host": `localhost\`,
},
fields: map[string]interface{}{
"value": int64(42),
},
},
}
for _, tc := range tests {
_, err := New(tc.name, tc.tags, tc.fields, now)
assert.Error(t, err)
}
}

680
metric/parse.go Normal file
View File

@@ -0,0 +1,680 @@
package metric
import (
"bytes"
"errors"
"fmt"
"strconv"
"time"
"github.com/influxdata/telegraf"
)
var (
ErrInvalidNumber = errors.New("invalid number")
)
const (
// the number of characters for the largest possible int64 (9223372036854775807)
maxInt64Digits = 19
// the number of characters for the smallest possible int64 (-9223372036854775808)
minInt64Digits = 20
// the number of characters required for the largest float64 before a range check
// would occur during parsing
maxFloat64Digits = 25
// the number of characters required for smallest float64 before a range check occur
// would occur during parsing
minFloat64Digits = 27
MaxKeyLength = 65535
)
// The following constants allow us to specify which state to move to
// next, when scanning sections of a Point.
const (
tagKeyState = iota
tagValueState
fieldsState
)
func Parse(buf []byte) ([]telegraf.Metric, error) {
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
}
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
return ParseWithDefaultTimePrecision(buf, t, "")
}
func ParseWithDefaultTimePrecision(
buf []byte,
t time.Time,
precision string,
) ([]telegraf.Metric, error) {
if len(buf) == 0 {
return []telegraf.Metric{}, nil
}
if len(buf) <= 6 {
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
}
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
var errStr string
i := 0
for {
j := bytes.IndexByte(buf[i:], '\n')
if j == -1 {
break
}
if len(buf[i:i+j]) < 2 {
i += j + 1 // increment i past the previous newline
continue
}
m, err := parseMetric(buf[i:i+j], t, precision)
if err != nil {
i += j + 1 // increment i past the previous newline
errStr += " " + err.Error()
continue
}
i += j + 1 // increment i past the previous newline
metrics = append(metrics, m)
}
if len(errStr) > 0 {
return metrics, fmt.Errorf(errStr)
}
return metrics, nil
}
func parseMetric(buf []byte,
defaultTime time.Time,
precision string,
) (telegraf.Metric, error) {
var dTime string
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
pos, key, err := scanKey(buf, 0)
if err != nil {
return nil, err
}
// measurement name is required
if len(key) == 0 {
return nil, fmt.Errorf("missing measurement")
}
if len(key) > MaxKeyLength {
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
}
// scan the second block is which is field1=value1[,field2=value2,...]
pos, fields, err := scanFields(buf, pos)
if err != nil {
return nil, err
}
// at least one field is required
if len(fields) == 0 {
return nil, fmt.Errorf("missing fields")
}
// scan the last block which is an optional integer timestamp
pos, ts, err := scanTime(buf, pos)
if err != nil {
return nil, err
}
// apply precision multiplier
var nsec int64
multiplier := getPrecisionMultiplier(precision)
if len(ts) > 0 && multiplier > 1 {
tsint, err := parseIntBytes(ts, 10, 64)
if err != nil {
return nil, err
}
nsec := multiplier * tsint
ts = []byte(strconv.FormatInt(nsec, 10))
}
m := &metric{
fields: fields,
t: ts,
nsec: nsec,
}
// parse out the measurement name
// namei is the index at which the "name" ends
namei := indexUnescapedByte(key, ',')
if namei < 1 {
// no tags
m.name = key
} else {
m.name = key[0:namei]
m.tags = key[namei:]
}
if len(m.t) == 0 {
if len(dTime) == 0 {
dTime = fmt.Sprint(defaultTime.UnixNano())
}
// use default time
m.t = []byte(dTime)
}
// here we copy on return because this allows us to later call
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
return m.Copy(), nil
}
// scanKey scans buf starting at i for the measurement and tag portion of the point.
// It returns the ending position and the byte slice of key within buf. If there
// are tags, they will be sorted if they are not already.
func scanKey(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
// First scan the Point's measurement.
state, i, err := scanMeasurement(buf, i)
if err != nil {
return i, buf[start:i], err
}
// Optionally scan tags if needed.
if state == tagKeyState {
i, err = scanTags(buf, i)
if err != nil {
return i, buf[start:i], err
}
}
return i, buf[start:i], nil
}
// scanMeasurement examines the measurement part of a Point, returning
// the next state to move to, and the current location in the buffer.
func scanMeasurement(buf []byte, i int) (int, int, error) {
// Check first byte of measurement, anything except a comma is fine.
// It can't be a space, since whitespace is stripped prior to this
// function call.
if i >= len(buf) || buf[i] == ',' {
return -1, i, makeError("missing measurement", buf, i)
}
for {
i++
if i >= len(buf) {
// cpu
return -1, i, makeError("missing fields", buf, i)
}
if buf[i-1] == '\\' {
// Skip character (it's escaped).
continue
}
// Unescaped comma; move onto scanning the tags.
if buf[i] == ',' {
return tagKeyState, i + 1, nil
}
// Unescaped space; move onto scanning the fields.
if buf[i] == ' ' {
// cpu value=1.0
return fieldsState, i, nil
}
}
}
// scanTags examines all the tags in a Point, keeping track of and
// returning the updated indices slice, number of commas and location
// in buf where to start examining the Point fields.
func scanTags(buf []byte, i int) (int, error) {
var (
err error
state = tagKeyState
)
for {
switch state {
case tagKeyState:
i, err = scanTagsKey(buf, i)
state = tagValueState // tag value always follows a tag key
case tagValueState:
state, i, err = scanTagsValue(buf, i)
case fieldsState:
return i, nil
}
if err != nil {
return i, err
}
}
}
// scanTagsKey scans each character in a tag key.
func scanTagsKey(buf []byte, i int) (int, error) {
// First character of the key.
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
// cpu,{'', ' ', ',', '='}
return i, makeError("missing tag key", buf, i)
}
// Examine each character in the tag key until we hit an unescaped
// equals (the tag value), or we hit an error (i.e., unescaped
// space or comma).
for {
i++
// Either we reached the end of the buffer or we hit an
// unescaped comma or space.
if i >= len(buf) ||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
// cpu,tag{'', ' ', ','}
return i, makeError("missing tag value", buf, i)
}
if buf[i] == '=' && buf[i-1] != '\\' {
// cpu,tag=
return i + 1, nil
}
}
}
// scanTagsValue scans each character in a tag value.
func scanTagsValue(buf []byte, i int) (int, int, error) {
// Tag value cannot be empty.
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
// cpu,tag={',', ' '}
return -1, i, makeError("missing tag value", buf, i)
}
// Examine each character in the tag value until we hit an unescaped
// comma (move onto next tag key), an unescaped space (move onto
// fields), or we error out.
for {
i++
if i >= len(buf) {
// cpu,tag=value
return -1, i, makeError("missing fields", buf, i)
}
// An unescaped equals sign is an invalid tag value.
if buf[i] == '=' && buf[i-1] != '\\' {
// cpu,tag={'=', 'fo=o'}
return -1, i, makeError("invalid tag format", buf, i)
}
if buf[i] == ',' && buf[i-1] != '\\' {
// cpu,tag=foo,
return tagKeyState, i + 1, nil
}
// cpu,tag=foo value=1.0
// cpu, tag=foo\= value=1.0
if buf[i] == ' ' && buf[i-1] != '\\' {
return fieldsState, i, nil
}
}
}
// scanFields scans buf, starting at i for the fields section of a point. It returns
// the ending position and the byte slice of the fields within buf
func scanFields(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
// track how many '"" we've seen since last '='
quotes := 0
// tracks how many '=' we've seen
equals := 0
// tracks how many commas we've seen
commas := 0
for {
// reached the end of buf?
if i >= len(buf) {
break
}
// escaped characters?
if buf[i] == '\\' && i+1 < len(buf) {
i += 2
continue
}
// If the value is quoted, scan until we get to the end quote
// Only quote values in the field value since quotes are not significant
// in the field key
if buf[i] == '"' && equals > commas {
i++
quotes++
if quotes > 2 {
break
}
continue
}
// If we see an =, ensure that there is at least on char before and after it
if buf[i] == '=' && quotes != 1 {
quotes = 0
equals++
// check for "... =123" but allow "a\ =123"
if buf[i-1] == ' ' && buf[i-2] != '\\' {
return i, buf[start:i], makeError("missing field key", buf, i)
}
// check for "...a=123,=456" but allow "a=123,a\,=456"
if buf[i-1] == ',' && buf[i-2] != '\\' {
return i, buf[start:i], makeError("missing field key", buf, i)
}
// check for "... value="
if i+1 >= len(buf) {
return i, buf[start:i], makeError("missing field value", buf, i)
}
// check for "... value=,value2=..."
if buf[i+1] == ',' || buf[i+1] == ' ' {
return i, buf[start:i], makeError("missing field value", buf, i)
}
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
var err error
i, err = scanNumber(buf, i+1)
if err != nil {
return i, buf[start:i], err
}
continue
}
// If next byte is not a double-quote, the value must be a boolean
if buf[i+1] != '"' {
var err error
i, _, err = scanBoolean(buf, i+1)
if err != nil {
return i, buf[start:i], err
}
continue
}
}
if buf[i] == ',' && quotes != 1 {
commas++
}
// reached end of block?
if buf[i] == ' ' && quotes != 1 {
break
}
i++
}
if quotes != 0 && quotes != 2 {
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
}
// check that all field sections had key and values (e.g. prevent "a=1,b"
if equals == 0 || commas != equals-1 {
return i, buf[start:i], makeError("invalid field format", buf, i)
}
return i, buf[start:i], nil
}
// scanTime scans buf, starting at i for the time section of a point. It
// returns the ending position and the byte slice of the timestamp within buf
// and and error if the timestamp is not in the correct numeric format.
func scanTime(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
for {
// reached the end of buf?
if i >= len(buf) {
break
}
// Reached end of block or trailing whitespace?
if buf[i] == '\n' || buf[i] == ' ' {
break
}
// Handle negative timestamps
if i == start && buf[i] == '-' {
i++
continue
}
// Timestamps should be integers, make sure they are so we don't need
// to actually parse the timestamp until needed.
if buf[i] < '0' || buf[i] > '9' {
return i, buf[start:i], makeError("invalid timestamp", buf, i)
}
i++
}
return i, buf[start:i], nil
}
func isNumeric(b byte) bool {
return (b >= '0' && b <= '9') || b == '.'
}
// scanNumber returns the end position within buf, start at i after
// scanning over buf for an integer, or float. It returns an
// error if a invalid number is scanned.
func scanNumber(buf []byte, i int) (int, error) {
start := i
var isInt bool
// Is negative number?
if i < len(buf) && buf[i] == '-' {
i++
// There must be more characters now, as just '-' is illegal.
if i == len(buf) {
return i, ErrInvalidNumber
}
}
// how many decimal points we've see
decimal := false
// indicates the number is float in scientific notation
scientific := false
for {
if i >= len(buf) {
break
}
if buf[i] == ',' || buf[i] == ' ' {
break
}
if buf[i] == 'i' && i > start && !isInt {
isInt = true
i++
continue
}
if buf[i] == '.' {
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
if decimal {
return i, ErrInvalidNumber
}
decimal = true
}
// `e` is valid for floats but not as the first char
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
scientific = true
i++
continue
}
// + and - are only valid at this point if they follow an e (scientific notation)
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
i++
continue
}
// NaN is an unsupported value
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
return i, ErrInvalidNumber
}
if !isNumeric(buf[i]) {
return i, ErrInvalidNumber
}
i++
}
if isInt && (decimal || scientific) {
return i, ErrInvalidNumber
}
numericDigits := i - start
if isInt {
numericDigits--
}
if decimal {
numericDigits--
}
if buf[start] == '-' {
numericDigits--
}
if numericDigits == 0 {
return i, ErrInvalidNumber
}
// It's more common that numbers will be within min/max range for their type but we need to prevent
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
// if we should parse the number to the actual type. It does not do it all the time because it incurs
// extra allocations and we end up converting the type again when writing points to disk.
if isInt {
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
if buf[i-1] != 'i' {
return i, ErrInvalidNumber
}
// Parse the int to check bounds the number of digits could be larger than the max range
// We subtract 1 from the index to remove the `i` from our tests
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
}
}
} else {
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
return i, makeError("invalid float", buf, i)
}
}
}
return i, nil
}
// scanBoolean returns the end position within buf, start at i after
// scanning over buf for boolean. Valid values for a boolean are
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
// is scanned.
func scanBoolean(buf []byte, i int) (int, []byte, error) {
start := i
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
return i, buf[start:i], makeError("invalid value", buf, i)
}
i++
for {
if i >= len(buf) {
break
}
if buf[i] == ',' || buf[i] == ' ' {
break
}
i++
}
// Single char bool (t, T, f, F) is ok
if i-start == 1 {
return i, buf[start:i], nil
}
// length must be 4 for true or TRUE
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
// length must be 5 for false or FALSE
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
// Otherwise
valid := false
switch buf[start] {
case 't':
valid = bytes.Equal(buf[start:i], []byte("true"))
case 'f':
valid = bytes.Equal(buf[start:i], []byte("false"))
case 'T':
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
case 'F':
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
}
if !valid {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
return i, buf[start:i], nil
}
// skipWhitespace returns the end position within buf, starting at i after
// scanning over spaces in tags
func skipWhitespace(buf []byte, i int) int {
for i < len(buf) {
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
break
}
i++
}
return i
}
// makeError is a helper function for making a metric parsing error.
// reason is the reason why the error occurred.
// buf should be the current buffer we are parsing.
// i is the current index, to give some context on where in the buffer we are.
func makeError(reason string, buf []byte, i int) error {
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
reason, buf, i)
}
// getPrecisionMultiplier will return a multiplier for the precision specified.
func getPrecisionMultiplier(precision string) int64 {
d := time.Nanosecond
switch precision {
case "u":
d = time.Microsecond
case "ms":
d = time.Millisecond
case "s":
d = time.Second
case "m":
d = time.Minute
case "h":
d = time.Hour
}
return int64(d)
}

413
metric/parse_test.go Normal file
View File

@@ -0,0 +1,413 @@
package metric
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const trues = `booltest b=T
booltest b=t
booltest b=True
booltest b=TRUE
booltest b=true
`
const falses = `booltest b=F
booltest b=f
booltest b=False
booltest b=FALSE
booltest b=false
`
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
w\,eather,host=local temp=99 1465839830100400200
weather,location=us\,midwest temperature=82 1465839830100400200
weather,location=us-midwest temp\=rature=82 1465839830100400200
weather,location\ place=us-midwest temperature=82 1465839830100400200
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
`
const withTimestamps = `cpu usage=99 1480595849000000000
cpu usage=99 1480595850000000000
cpu usage=99 1480595851700030000
cpu usage=99 1480595852000000300
`
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
`
const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200
`
// some metrics are invalid
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
cpu 1480595852000000300
cpu usage=99 1480595852foobar300
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
`
func TestParse(t *testing.T) {
start := time.Now()
metrics, err := Parse([]byte(sevenMetrics))
assert.NoError(t, err)
assert.Len(t, metrics, 7)
// all metrics parsed together w/o a timestamp should have the same time.
firstTime := metrics[0].Time()
for _, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"idle": float64(99),
"busy": int64(1),
"b": true,
"s": "string",
},
m.Fields(),
)
assert.Equal(t,
map[string]string{
"host": "foo",
"datacenter": "us-east",
},
m.Tags(),
)
assert.True(t, m.Time().After(start))
assert.True(t, m.Time().Equal(firstTime))
}
}
func TestParseNegNumbers(t *testing.T) {
metrics, err := Parse([]byte(negMetrics))
assert.NoError(t, err)
assert.Len(t, metrics, 1)
assert.Equal(t,
map[string]interface{}{
"temp": int64(-99),
"temp_float": float64(-99.4),
},
metrics[0].Fields(),
)
assert.Equal(t,
map[string]string{
"host": "local",
},
metrics[0].Tags(),
)
}
func TestParseErrors(t *testing.T) {
start := time.Now()
metrics, err := Parse([]byte(someInvalid))
assert.Error(t, err)
assert.Len(t, metrics, 4)
// all metrics parsed together w/o a timestamp should have the same time.
firstTime := metrics[0].Time()
for _, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
},
m.Fields(),
)
assert.Equal(t,
map[string]string{
"host": "foo",
"datacenter": "us-east",
},
m.Tags(),
)
assert.True(t, m.Time().After(start))
assert.True(t, m.Time().Equal(firstTime))
}
}
func TestParseWithTimestamps(t *testing.T) {
metrics, err := Parse([]byte(withTimestamps))
assert.NoError(t, err)
assert.Len(t, metrics, 4)
expectedTimestamps := []time.Time{
time.Unix(0, 1480595849000000000),
time.Unix(0, 1480595850000000000),
time.Unix(0, 1480595851700030000),
time.Unix(0, 1480595852000000300),
}
// all metrics parsed together w/o a timestamp should have the same time.
for i, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"usage": float64(99),
},
m.Fields(),
)
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
}
}
func TestParseEscapes(t *testing.T) {
metrics, err := Parse([]byte(withEscapes))
assert.NoError(t, err)
assert.Len(t, metrics, 6)
tests := []struct {
name string
fields map[string]interface{}
tags map[string]string
}{
{
name: `w, eather`,
fields: map[string]interface{}{"temp": float64(99)},
tags: map[string]string{"host": "local"},
},
{
name: `w,eather`,
fields: map[string]interface{}{"temp": float64(99)},
tags: map[string]string{"host": "local"},
},
{
name: `weather`,
fields: map[string]interface{}{"temperature": float64(82)},
tags: map[string]string{"location": `us,midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{`temp=rature`: float64(82)},
tags: map[string]string{"location": `us-midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{"temperature": float64(82)},
tags: map[string]string{`location place`: `us-midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{`temperature`: `too"hot"`},
tags: map[string]string{"location": `us-midwest`},
},
}
for i, test := range tests {
assert.Equal(t, test.name, metrics[i].Name())
assert.Equal(t, test.fields, metrics[i].Fields())
assert.Equal(t, test.tags, metrics[i].Tags())
}
}
func TestParseTrueBooleans(t *testing.T) {
metrics, err := Parse([]byte(trues))
assert.NoError(t, err)
assert.Len(t, metrics, 5)
for _, metric := range metrics {
assert.Equal(t, "booltest", metric.Name())
assert.Equal(t, true, metric.Fields()["b"])
}
}
func TestParseFalseBooleans(t *testing.T) {
metrics, err := Parse([]byte(falses))
assert.NoError(t, err)
assert.Len(t, metrics, 5)
for _, metric := range metrics {
assert.Equal(t, "booltest", metric.Name())
assert.Equal(t, false, metric.Fields()["b"])
}
}
func TestParsePointBadNumber(t *testing.T) {
for _, tt := range []string{
"cpu v=- ",
"cpu v=-i ",
"cpu v=-. ",
"cpu v=. ",
"cpu v=1.0i ",
"cpu v=1ii ",
"cpu v=1a ",
"cpu v=-e-e-e ",
"cpu v=42+3 ",
"cpu v= ",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseTagsMissingParts(t *testing.T) {
for _, tt := range []string{
`cpu,host`,
`cpu,host,`,
`cpu,host=`,
`cpu,f=oo=bar value=1`,
`cpu,host value=1i`,
`cpu,host=serverA,region value=1i`,
`cpu,host=serverA,region= value=1i`,
`cpu,host=serverA,region=,zone=us-west value=1i`,
`cpu, value=1`,
`cpu, ,,`,
`cpu,,,`,
`cpu,host=serverA,=us-east value=1i`,
`cpu,host=serverAa\,,=us-east value=1i`,
`cpu,host=serverA\,,=us-east value=1i`,
`cpu, =serverA value=1i`,
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParsePointWhitespace(t *testing.T) {
for _, tt := range []string{
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000 `,
} {
m, err := Parse([]byte(tt + "\n"))
assert.NoError(t, err, tt)
assert.Equal(t, "cpu", m[0].Name())
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
}
}
func TestParsePointInvalidFields(t *testing.T) {
for _, tt := range []string{
"test,foo=bar a=101,=value",
"test,foo=bar =value",
"test,foo=bar a=101,key=",
"test,foo=bar key=",
`test,foo=bar a=101,b="foo`,
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParsePointNoFields(t *testing.T) {
for _, tt := range []string{
"cpu_load_short,host=server01,region=us-west",
"very_long_measurement_name",
"cpu,host==",
"============",
"cpu",
"cpu\n\n\n\n\n\n\n",
" ",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
// a b=1 << this is the shortest possible metric
// any shorter is just ignored
func TestParseBufTooShort(t *testing.T) {
for _, tt := range []string{
"",
"a",
"a ",
"a b=",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseInvalidBooleans(t *testing.T) {
for _, tt := range []string{
"test b=tru",
"test b=fals",
"test b=faLse",
"test q=foo",
"test b=lambchops",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseInvalidNumbers(t *testing.T) {
for _, tt := range []string{
"test b=-",
"test b=1.1.1",
"test b=nan",
"test b=9i10",
"test b=9999999999999999999i",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseNegativeTimestamps(t *testing.T) {
for _, tt := range []string{
"test foo=101 -1257894000000000000",
} {
metrics, err := Parse([]byte(tt + "\n"))
assert.NoError(t, err, tt)
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
}
}
func TestParsePrecision(t *testing.T) {
for _, tt := range []struct {
line string
precision string
expected int64
}{
{"test v=42 1491847420", "s", 1491847420000000000},
{"test v=42 1491847420123", "ms", 1491847420123000000},
{"test v=42 1491847420123456", "u", 1491847420123456000},
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
} {
metrics, err := ParseWithDefaultTimePrecision(
[]byte(tt.line+"\n"), time.Now(), tt.precision)
assert.NoError(t, err)
assert.Equal(t, tt.expected, metrics[0].UnixNano())
}
}
func TestParsePrecisionUnsetTime(t *testing.T) {
for _, tt := range []struct {
line string
precision string
}{
{"test v=42", "s"},
{"test v=42", "ns"},
} {
_, err := ParseWithDefaultTimePrecision(
[]byte(tt.line+"\n"), time.Now(), tt.precision)
assert.NoError(t, err)
}
}
func TestParseMaxKeyLength(t *testing.T) {
key := ""
for {
if len(key) > MaxKeyLength {
break
}
key += "test"
}
_, err := Parse([]byte(key + " value=1\n"))
assert.Error(t, err)
}

159
metric/reader.go Normal file
View File

@@ -0,0 +1,159 @@
package metric
import (
"io"
"github.com/influxdata/telegraf"
)
type state int
const (
_ state = iota
// normal state copies whole metrics into the given buffer until we can't
// fit the next metric.
normal
// split state means that we have a metric that we were able to split, so
// that we can fit it into multiple metrics (and calls to Read)
split
// overflow state means that we have a metric that didn't fit into a single
// buffer, and needs to be split across multiple calls to Read.
overflow
// splitOverflow state means that a split metric didn't fit into a single
// buffer, and needs to be split across multiple calls to Read.
splitOverflow
// done means we're done reading metrics, and now always return (0, io.EOF)
done
)
type reader struct {
metrics []telegraf.Metric
splitMetrics []telegraf.Metric
buf []byte
state state
// metric index
iM int
// split metric index
iSM int
// buffer index
iB int
}
func NewReader(metrics []telegraf.Metric) io.Reader {
return &reader{
metrics: metrics,
state: normal,
}
}
func (r *reader) Read(p []byte) (n int, err error) {
var i int
switch r.state {
case done:
return 0, io.EOF
case normal:
for {
// this for-loop is the sunny-day scenario, where we are given a
// buffer that is large enough to hold at least a single metric.
// all of the cases below it are edge-cases.
if r.metrics[r.iM].Len() <= len(p[i:]) {
i += r.metrics[r.iM].SerializeTo(p[i:])
} else {
break
}
r.iM++
if r.iM == len(r.metrics) {
r.state = done
return i, io.EOF
}
}
// if we haven't written any bytes, check if we can split the current
// metric into multiple full metrics at a smaller size.
if i == 0 {
tmp := r.metrics[r.iM].Split(len(p))
if len(tmp) > 1 {
r.splitMetrics = tmp
r.state = split
if r.splitMetrics[0].Len() <= len(p) {
i += r.splitMetrics[0].SerializeTo(p)
r.iSM = 1
} else {
// splitting didn't quite work, so we'll drop down and
// overflow the metric.
r.state = normal
r.iSM = 0
}
}
}
// if we haven't written any bytes and we're not at the end of the metrics
// slice, then it means we have a single metric that is larger than the
// provided buffer.
if i == 0 {
r.buf = r.metrics[r.iM].Serialize()
i += copy(p, r.buf[r.iB:])
r.iB += i
r.state = overflow
}
case split:
if r.splitMetrics[r.iSM].Len() <= len(p) {
// write the current split metric
i += r.splitMetrics[r.iSM].SerializeTo(p)
r.iSM++
if r.iSM >= len(r.splitMetrics) {
// done writing the current split metrics
r.iSM = 0
r.iM++
if r.iM == len(r.metrics) {
r.state = done
return i, io.EOF
}
r.state = normal
}
} else {
// This would only happen if we split the metric, and then a
// subsequent buffer was smaller than the initial one given,
// so that our split metric no longer fits.
r.buf = r.splitMetrics[r.iSM].Serialize()
i += copy(p, r.buf[r.iB:])
r.iB += i
r.state = splitOverflow
}
case splitOverflow:
i = copy(p, r.buf[r.iB:])
r.iB += i
if r.iB >= len(r.buf) {
r.iB = 0
r.iSM++
if r.iSM == len(r.splitMetrics) {
r.iM++
if r.iM == len(r.metrics) {
r.state = done
return i, io.EOF
}
r.state = normal
} else {
r.state = split
}
}
case overflow:
i = copy(p, r.buf[r.iB:])
r.iB += i
if r.iB >= len(r.buf) {
r.iB = 0
r.iM++
if r.iM == len(r.metrics) {
r.state = done
return i, io.EOF
}
r.state = normal
}
}
return i, nil
}

713
metric/reader_test.go Normal file
View File

@@ -0,0 +1,713 @@
package metric
import (
"io"
"io/ioutil"
"regexp"
"strings"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func BenchmarkMetricReader(b *testing.B) {
metrics := make([]telegraf.Metric, 10)
for i := 0; i < 10; i++ {
metrics[i], _ = New("foo", map[string]string{},
map[string]interface{}{"value": int64(1)}, time.Now())
}
for n := 0; n < b.N; n++ {
r := NewReader(metrics)
io.Copy(ioutil.Discard, r)
}
}
func TestMetricReader(t *testing.T) {
ts := time.Unix(1481032190, 0)
metrics := make([]telegraf.Metric, 10)
for i := 0; i < 10; i++ {
metrics[i], _ = New("foo", map[string]string{},
map[string]interface{}{"value": int64(1)}, ts)
}
r := NewReader(metrics)
buf := make([]byte, 35)
for i := 0; i < 10; i++ {
n, err := r.Read(buf)
if err != nil {
assert.True(t, err == io.EOF, err.Error())
}
assert.Equal(t, 33, n)
assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n]))
}
// reader should now be done, and always return 0, io.EOF
for i := 0; i < 10; i++ {
n, err := r.Read(buf)
assert.True(t, err == io.EOF, err.Error())
assert.Equal(t, 0, n)
}
}
func TestMetricReader_OverflowMetric(t *testing.T) {
ts := time.Unix(1481032190, 0)
m, _ := New("foo", map[string]string{},
map[string]interface{}{"value": int64(10)}, ts)
metrics := []telegraf.Metric{m}
r := NewReader(metrics)
buf := make([]byte, 5)
tests := []struct {
exp string
err error
n int
}{
{
"foo v",
nil,
5,
},
{
"alue=",
nil,
5,
},
{
"10i 1",
nil,
5,
},
{
"48103",
nil,
5,
},
{
"21900",
nil,
5,
},
{
"00000",
nil,
5,
},
{
"000\n",
io.EOF,
4,
},
{
"",
io.EOF,
0,
},
}
for _, test := range tests {
n, err := r.Read(buf)
assert.Equal(t, test.n, n)
assert.Equal(t, test.exp, string(buf[0:n]))
assert.Equal(t, test.err, err)
}
}
// Regression test for when a metric is the same size as the buffer.
//
// Previously EOF would not be set until the next call to Read.
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{"a": int64(1)}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, m1.Len())
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
// Regression test for when a metric requires to be split and one of the
// split metrics is exactly the size of the buffer.
//
// Previously an empty string would be returned on the next Read without error,
// and then next Read call would panic.
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 30)
// foo a=1i,bb=2i 1481032190000000000\n // len 35
//
// Requires this specific split order:
// foo a=1i 1481032190000000000\n // len 29
// foo bb=2i 1481032190000000000\n // len 30
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
// Regression test for when a metric requires to be split and one of the
// split metrics is larger than the buffer.
//
// Previously the metric index would be set incorrectly causing a panic.
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{
"a": int64(1),
"bbb": int64(2),
}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 30)
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
//
// foo a=1i 1481032190000000000\n // len 29
// foo bbb=2i 1481032190000000000\n // len 31
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
// Regression test for when a split metric exactly fits in the buffer.
//
// Previously the metric would be overflow split when not required.
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 29)
// foo a=1i,b=2i 1481032190000000000\n // len 34
//
// foo a=1i 1481032190000000000\n // len 29
// foo b=2i 1481032190000000000\n // len 29
for {
n, err := r.Read(buf)
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
if n == 0 {
require.Equal(t, io.EOF, err)
break
}
// Lines should be terminated with a LF
if err == io.EOF {
require.Equal(t, uint8('\n'), buf[n-1])
break
}
require.NoError(t, err)
}
}
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
ts := time.Unix(1481032190, 0)
m, _ := New("foo", map[string]string{},
map[string]interface{}{"value": int64(10)}, ts)
metrics := []telegraf.Metric{m, m.Copy()}
r := NewReader(metrics)
buf := make([]byte, 10)
tests := []struct {
exp string
err error
n int
}{
{
"foo value=",
nil,
10,
},
{
"10i 148103",
nil,
10,
},
{
"2190000000",
nil,
10,
},
{
"000\n",
nil,
4,
},
{
"foo value=",
nil,
10,
},
{
"10i 148103",
nil,
10,
},
{
"2190000000",
nil,
10,
},
{
"000\n",
io.EOF,
4,
},
{
"",
io.EOF,
0,
},
}
for _, test := range tests {
n, err := r.Read(buf)
assert.Equal(t, test.n, n)
assert.Equal(t, test.exp, string(buf[0:n]))
assert.Equal(t, test.err, err)
}
}
// test splitting a metric
func TestMetricReader_SplitMetric(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
"value2": int64(10),
"value3": int64(10),
"value4": int64(10),
"value5": int64(10),
"value6": int64(10),
},
ts,
)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 60)
tests := []struct {
expRegex string
err error
n int
}{
{
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
nil,
57,
},
{
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
io.EOF,
57,
},
{
"",
io.EOF,
0,
},
}
for _, test := range tests {
n, err := r.Read(buf)
assert.Equal(t, test.n, n)
re := regexp.MustCompile(test.expRegex)
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
assert.Equal(t, test.err, err)
}
}
// test an array with one split metric and one unsplit
func TestMetricReader_SplitMetric2(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
"value2": int64(10),
"value3": int64(10),
"value4": int64(10),
"value5": int64(10),
"value6": int64(10),
},
ts,
)
m2, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
},
ts,
)
metrics := []telegraf.Metric{m1, m2}
r := NewReader(metrics)
buf := make([]byte, 60)
tests := []struct {
expRegex string
err error
n int
}{
{
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
nil,
57,
},
{
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
nil,
57,
},
{
`foo value1=10i 1481032190000000000\n`,
io.EOF,
35,
},
{
"",
io.EOF,
0,
},
}
for _, test := range tests {
n, err := r.Read(buf)
assert.Equal(t, test.n, n)
re := regexp.MustCompile(test.expRegex)
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
assert.Equal(t, test.err, err)
}
}
// test split that results in metrics that are still too long, which results in
// the reader falling back to regular overflow.
func TestMetricReader_SplitMetricTooLong(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
"value2": int64(10),
},
ts,
)
metrics := []telegraf.Metric{m1}
r := NewReader(metrics)
buf := make([]byte, 30)
tests := []struct {
expRegex string
err error
n int
}{
{
`foo value\d=10i,value\d=10i 1481`,
nil,
30,
},
{
`032190000000000\n`,
io.EOF,
16,
},
{
"",
io.EOF,
0,
},
}
for _, test := range tests {
n, err := r.Read(buf)
assert.Equal(t, test.n, n)
re := regexp.MustCompile(test.expRegex)
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
assert.Equal(t, test.err, err)
}
}
// test split with a changing buffer size in the middle of subsequent calls
// to Read
func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
"value2": int64(10),
"value3": int64(10),
},
ts,
)
m2, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
},
ts,
)
metrics := []telegraf.Metric{m1, m2}
r := NewReader(metrics)
tests := []struct {
expRegex string
err error
n int
buf []byte
}{
{
`foo value\d=10i 1481032190000000000\n`,
nil,
35,
make([]byte, 36),
},
{
`foo value\d=10i 148103219000000`,
nil,
30,
make([]byte, 30),
},
{
`0000\n`,
nil,
5,
make([]byte, 30),
},
{
`foo value\d=10i 1481032190000000000\n`,
nil,
35,
make([]byte, 36),
},
{
`foo value1=10i 1481032190000000000\n`,
io.EOF,
35,
make([]byte, 36),
},
{
"",
io.EOF,
0,
make([]byte, 36),
},
}
for _, test := range tests {
n, err := r.Read(test.buf)
assert.Equal(t, test.n, n, test.expRegex)
re := regexp.MustCompile(test.expRegex)
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
assert.Equal(t, test.err, err, test.expRegex)
}
}
// test split with a changing buffer size in the middle of subsequent calls
// to Read
func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
ts := time.Unix(1481032190, 0)
m1, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
"value2": int64(10),
},
ts,
)
m2, _ := New("foo", map[string]string{},
map[string]interface{}{
"value1": int64(10),
},
ts,
)
metrics := []telegraf.Metric{m1, m2}
r := NewReader(metrics)
tests := []struct {
expRegex string
err error
n int
buf []byte
}{
{
`foo value\d=10i 1481032190000000000\n`,
nil,
35,
make([]byte, 36),
},
{
`foo value\d=10i 148103219000000`,
nil,
30,
make([]byte, 30),
},
{
`0000\n`,
nil,
5,
make([]byte, 30),
},
{
`foo value1=10i 1481032190000000000\n`,
io.EOF,
35,
make([]byte, 36),
},
{
"",
io.EOF,
0,
make([]byte, 36),
},
}
for _, test := range tests {
n, err := r.Read(test.buf)
assert.Equal(t, test.n, n, test.expRegex)
re := regexp.MustCompile(test.expRegex)
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
assert.Equal(t, test.err, err, test.expRegex)
}
}
func TestReader_Read(t *testing.T) {
epoch := time.Unix(0, 0)
type args struct {
name string
tags map[string]string
fields map[string]interface{}
t time.Time
mType []telegraf.ValueType
}
tests := []struct {
name string
args args
expected []byte
}{
{
name: "escape backslashes in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\`},
t: epoch,
},
expected: []byte(`cpu value="test\\" 0`),
},
{
name: "escape quote in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test"`},
t: epoch,
},
expected: []byte(`cpu value="test\"" 0`),
},
{
name: "escape quote and backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\"`},
t: epoch,
},
expected: []byte(`cpu value="test\\\"" 0`),
},
{
name: "escape multiple backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\\`},
t: epoch,
},
expected: []byte(`cpu value="test\\\\" 0`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := make([]byte, 512)
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
require.NoError(t, err)
r := NewReader([]telegraf.Metric{m})
num, err := r.Read(buf)
if err != io.EOF {
require.NoError(t, err)
}
line := string(buf[:num])
// This is done so that we can use raw strings in the test spec
noeol := strings.TrimRight(line, "\n")
require.Equal(t, string(tt.expected), noeol)
require.Equal(t, len(tt.expected)+1, num)
})
}
}
func TestMetricRoundtrip(t *testing.T) {
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
`
metrics, err := Parse([]byte(lp))
require.NoError(t, err)
r := NewReader(metrics)
buf := make([]byte, 128)
_, err = r.Read(buf)
require.NoError(t, err)
metrics, err = Parse(buf)
require.NoError(t, err)
}

View File

@@ -1,83 +0,0 @@
package telegraf
import (
"fmt"
"math"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
now.UnixNano())
assert.Equal(t, lineProto, m.String())
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
now.Unix())
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
}
func TestNewMetricStringNoTime(t *testing.T) {
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := NewMetric("cpu", tags, fields)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
assert.Equal(t, lineProto, m.String())
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
}
func TestNewMetricFailNaN(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": math.NaN(),
}
_, err := NewMetric("cpu", tags, fields, now)
assert.Error(t, err)
}

View File

@@ -0,0 +1,7 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
)

View File

@@ -0,0 +1,43 @@
# BasicStats Aggregator Plugin
The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values,
emitting the aggregate every `period` seconds.
### Configuration:
```toml
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
```
### Measurements & Fields:
- measurement1
- field1_count
- field1_max
- field1_min
- field1_mean
- field1_s2 (variance)
- field1_stdev (standard deviation)
### Tags:
No tags are applied by this aggregator.
### Example Output:
```
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000
```

View File

@@ -0,0 +1,155 @@
package basicstats
import (
"math"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type BasicStats struct {
cache map[uint64]aggregate
}
func NewBasicStats() telegraf.Aggregator {
mm := &BasicStats{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]basicstats
name string
tags map[string]string
}
type basicstats struct {
count float64
min float64
max float64
mean float64
M2 float64 //intermedia value for variance/stdev
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *BasicStats) SampleConfig() string {
return sampleConfig
}
func (m *BasicStats) Description() string {
return "Keep the aggregate basicstats of each metric passing through."
}
func (m *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]basicstats),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
M2: 0.0,
}
continue
}
tmp := m.cache[id].fields[k]
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
//variable initialization
x := fv
mean := tmp.mean
M2 := tmp.M2
//counter compute
n := tmp.count + 1
tmp.count = n
//mean compute
delta := x - mean
mean = mean + delta/n
tmp.mean = mean
//variance/stdev compute
M2 = M2 + delta*(x-mean)
tmp.M2 = M2
//max/min compute
if fv < tmp.min {
tmp.min = fv
} else if fv > tmp.max {
tmp.max = fv
}
//store final data
m.cache[id].fields[k] = tmp
}
}
}
}
func (m *BasicStats) Push(acc telegraf.Accumulator) {
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
fields[k+"_count"] = v.count
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
fields[k+"_mean"] = v.mean
//v.count always >=1
if v.count > 1 {
variance := v.M2 / (v.count - 1)
fields[k+"_s2"] = variance
fields[k+"_stdev"] = math.Sqrt(variance)
}
//if count == 1 StdDev = infinite => so I won't send data
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (m *BasicStats) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
})
}

View File

@@ -0,0 +1,151 @@
package basicstats
import (
"math"
"testing"
"time"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": float64(2),
"d": float64(2),
},
time.Now(),
)
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": float64(4),
"d": float64(6),
"e": float64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"b_count": float64(2), //b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), //c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"d_count": float64(2), //d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(1),
"b_min": float64(1),
"b_mean": float64(1),
"c_count": float64(1), //c
"c_max": float64(2),
"c_min": float64(2),
"c_mean": float64(2),
"d_count": float64(1), //d
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(3),
"b_min": float64(3),
"b_mean": float64(3),
"c_count": float64(1), //c
"c_max": float64(4),
"c_min": float64(4),
"c_mean": float64(4),
"d_count": float64(1), //d
"d_max": float64(6),
"d_min": float64(6),
"d_mean": float64(6),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View File

@@ -0,0 +1,97 @@
# Histogram Aggregator Plugin
The histogram aggregator plugin creates histograms containing the counts of
field values within a range.
Values added to a bucket are also added to the larger buckets in the
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
Bucket counts however are not reset between periods and will be non-strictly
increasing while Telegraf is running.
#### Design
Each metric is passed to the aggregator and this aggregator searches
histogram buckets for those fields, which have been specified in the
config. If buckets are found, the aggregator will increment +1 to the appropriate
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
seconds this data will be forwarded to the outputs.
The algorithm of hit counting to buckets was implemented on the base
of the algorithm which is implemented in the Prometheus
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
### Configuration
```toml
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
```
The user is responsible for defining the bounds of the histogram bucket as
well as the measurement name and fields to aggregate.
Each histogram config section must contain a `buckets` and `measurement_name`
option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
The `buckets` option contains a list of floats which specify the bucket
boundaries. Each float value defines the inclusive upper bound of the bucket.
The `+Inf` bucket is added automatically and does not need to be defined.
### Measurements & Fields:
The postfix `bucket` will be added to each field key.
- measurement1
- field1_bucket
- field2_bucket
### Tags:
All measurements are given the tag `le`. This tag has the border value of
bucket. It means that the metric value is less than or equal to the value of
this tag. For example, let assume that we have the metric value 10 and the
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
10, because the metrics value is passed into bucket with right border value
`10`.
### Example Output:
```
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000
```

View File

@@ -0,0 +1,315 @@
package histogram
import (
"sort"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
// bucketTag is the tag, which contains right bucket border
const bucketTag = "le"
// bucketInf is the right bucket border for infinite values
const bucketInf = "+Inf"
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
type HistogramAggregator struct {
Configs []config `toml:"config"`
buckets bucketsByMetrics
cache map[uint64]metricHistogramCollection
}
// config is the config, which contains name, field of metric and histogram buckets.
type config struct {
Metric string `toml:"measurement_name"`
Fields []string `toml:"fields"`
Buckets buckets `toml:"buckets"`
}
// bucketsByMetrics contains the buckets grouped by metric and field name
type bucketsByMetrics map[string]bucketsByFields
// bucketsByFields contains the buckets grouped by field name
type bucketsByFields map[string]buckets
// buckets contains the right borders buckets
type buckets []float64
// metricHistogramCollection aggregates the histogram data
type metricHistogramCollection struct {
histogramCollection map[string]counts
name string
tags map[string]string
}
// counts is the number of hits in the bucket
type counts []int64
// groupedByCountFields contains grouped fields by their count and fields values
type groupedByCountFields struct {
name string
tags map[string]string
fieldsWithCount map[string]int64
}
// NewHistogramAggregator creates new histogram aggregator
func NewHistogramAggregator() telegraf.Aggregator {
h := &HistogramAggregator{}
h.buckets = make(bucketsByMetrics)
h.resetCache()
return h
}
var sampleConfig = `
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
`
// SampleConfig returns sample of config
func (h *HistogramAggregator) SampleConfig() string {
return sampleConfig
}
// Description returns description of aggregator plugin
func (h *HistogramAggregator) Description() string {
return "Create aggregate histograms."
}
// Add adds new hit to the buckets
func (h *HistogramAggregator) Add(in telegraf.Metric) {
bucketsByField := make(map[string][]float64)
for field := range in.Fields() {
buckets := h.getBuckets(in.Name(), field)
if buckets != nil {
bucketsByField[field] = buckets
}
}
if len(bucketsByField) == 0 {
return
}
id := in.HashID()
agr, ok := h.cache[id]
if !ok {
agr = metricHistogramCollection{
name: in.Name(),
tags: in.Tags(),
histogramCollection: make(map[string]counts),
}
}
for field, value := range in.Fields() {
if buckets, ok := bucketsByField[field]; ok {
if agr.histogramCollection[field] == nil {
agr.histogramCollection[field] = make(counts, len(buckets)+1)
}
if value, ok := convert(value); ok {
index := sort.SearchFloat64s(buckets, value)
agr.histogramCollection[field][index]++
}
}
}
h.cache[id] = agr
}
// Push returns histogram values for metrics
func (h *HistogramAggregator) Push(acc telegraf.Accumulator) {
metricsWithGroupedFields := []groupedByCountFields{}
for _, aggregate := range h.cache {
for field, counts := range aggregate.histogramCollection {
h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts)
}
}
for _, metric := range metricsWithGroupedFields {
acc.AddFields(metric.name, makeFieldsWithCount(metric.fieldsWithCount), metric.tags)
}
}
// groupFieldsByBuckets groups fields by metric buckets which are represented as tags
func (h *HistogramAggregator) groupFieldsByBuckets(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
tags map[string]string,
counts []int64,
) {
count := int64(0)
for index, bucket := range h.getBuckets(name, field) {
count += counts[index]
tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64)
h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags))
}
count += counts[len(counts)-1]
tags[bucketTag] = bucketInf
h.groupField(metricsWithGroupedFields, name, field, count, tags)
}
// groupField groups field by count value
func (h *HistogramAggregator) groupField(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
count int64,
tags map[string]string,
) {
for key, metric := range *metricsWithGroupedFields {
if name == metric.name && isTagsIdentical(tags, metric.tags) {
(*metricsWithGroupedFields)[key].fieldsWithCount[field] = count
return
}
}
fieldsWithCount := map[string]int64{
field: count,
}
*metricsWithGroupedFields = append(
*metricsWithGroupedFields,
groupedByCountFields{name: name, tags: tags, fieldsWithCount: fieldsWithCount},
)
}
// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has
// small value, we will get a histogram with a small amount of the distribution.
func (h *HistogramAggregator) Reset() {}
// resetCache resets cached counts(hits) in the buckets
func (h *HistogramAggregator) resetCache() {
h.cache = make(map[uint64]metricHistogramCollection)
}
// getBuckets finds buckets and returns them
func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 {
if buckets, ok := h.buckets[metric][field]; ok {
return buckets
}
for _, config := range h.Configs {
if config.Metric == metric {
if !isBucketExists(field, config) {
continue
}
if _, ok := h.buckets[metric]; !ok {
h.buckets[metric] = make(bucketsByFields)
}
h.buckets[metric][field] = sortBuckets(config.Buckets)
}
}
return h.buckets[metric][field]
}
// isBucketExists checks if buckets exists for the passed field
func isBucketExists(field string, cfg config) bool {
if len(cfg.Fields) == 0 {
return true
}
for _, fl := range cfg.Fields {
if fl == field {
return true
}
}
return false
}
// sortBuckets sorts the buckets if it is needed
func sortBuckets(buckets []float64) []float64 {
for i, bucket := range buckets {
if i < len(buckets)-1 && bucket >= buckets[i+1] {
sort.Float64s(buckets)
break
}
}
return buckets
}
// convert converts interface to concrete type
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
// copyTags copies tags
func copyTags(tags map[string]string) map[string]string {
copiedTags := map[string]string{}
for key, val := range tags {
copiedTags[key] = val
}
return copiedTags
}
// isTagsIdentical checks the identity of two list of tags
func isTagsIdentical(originalTags, checkedTags map[string]string) bool {
if len(originalTags) != len(checkedTags) {
return false
}
for tagName, tagValue := range originalTags {
if tagValue != checkedTags[tagName] {
return false
}
}
return true
}
// makeFieldsWithCount assigns count value to all metric fields
func makeFieldsWithCount(fieldsWithCountIn map[string]int64) map[string]interface{} {
fieldsWithCountOut := map[string]interface{}{}
for field, count := range fieldsWithCountIn {
fieldsWithCountOut[field+"_bucket"] = count
}
return fieldsWithCountOut
}
// init initializes histogram aggregator plugin
func init() {
aggregators.Add("histogram", func() telegraf.Aggregator {
return NewHistogramAggregator()
})
}

View File

@@ -0,0 +1,210 @@
package histogram
import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
// NewTestHistogram creates new test histogram aggregation with specified config
func NewTestHistogram(cfg []config) telegraf.Aggregator {
htm := &HistogramAggregator{Configs: cfg}
htm.buckets = make(bucketsByMetrics)
htm.resetCache()
return htm
}
// firstMetric1 is the first test metric
var firstMetric1, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.3),
"b": float64(40),
},
time.Now(),
)
// firstMetric1 is the first test metric with other value
var firstMetric2, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.9),
"c": float64(40),
},
time.Now(),
)
// secondMetric is the second metric
var secondMetric, _ = metric.New(
"second_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(105),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
// BenchmarkApply runs benchmarks
func BenchmarkApply(b *testing.B) {
histogram := NewHistogramAggregator()
for n := 0; n < b.N; n++ {
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
}
}
// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
func TestHistogramWithPeriodAndOneField(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Push(acc)
if len(acc.Metrics) != 6 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf)
}
// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields
func TestHistogramWithPeriodAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
histogram.Push(acc)
if len(acc.Metrics) != 12 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf)
}
// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
// getting added in different periods) for all fields
func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf)
acc.ClearMetrics()
histogram.Add(firstMetric2)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
}
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
func TestWrongBucketsOrder(t *testing.T) {
defer func() {
if r := recover(); r != nil {
assert.Equal(
t,
"histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a",
fmt.Sprint(r),
)
}
}()
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
histogram.Add(firstMetric2)
}
// assertContainsTaggedField is help functions to test histogram data
func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) {
acc.Lock()
defer acc.Unlock()
for _, checkedMetric := range acc.Metrics {
// check metric name
if checkedMetric.Measurement != metricName {
continue
}
// check "le" tag
if checkedMetric.Tags[bucketTag] != le {
continue
}
// check fields
isFieldsIdentical := true
for field := range fields {
if _, ok := checkedMetric.Fields[field]; !ok {
isFieldsIdentical = false
break
}
}
if !isFieldsIdentical {
continue
}
// check fields with their counts
if assert.Equal(t, fields, checkedMetric.Fields) {
return
}
assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName))
}
assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields))
}

View File

@@ -0,0 +1,42 @@
# MinMax Aggregator Plugin
The minmax aggregator plugin aggregates min & max values of each field it sees,
emitting the aggrate every `period` seconds.
### Configuration:
```toml
# Keep the aggregate min/max of each metric passing through.
[[aggregators.minmax]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
```
### Measurements & Fields:
- measurement1
- field1_max
- field1_min
### Tags:
No tags are applied by this aggregator.
### Example Output:
```
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1.72 1475583980000000000
system,host=tars load1=1.6 1475583990000000000
system,host=tars load1=1.66 1475584000000000000
system,host=tars load1=1.63 1475584010000000000
system,host=tars load1_max=1.72,load1_min=1.6 1475584010000000000
system,host=tars load1=1.46 1475584020000000000
system,host=tars load1=1.39 1475584030000000000
system,host=tars load1=1.41 1475584040000000000
system,host=tars load1_max=1.46,load1_min=1.39 1475584040000000000
```

View File

@@ -0,0 +1,119 @@
package minmax
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type MinMax struct {
cache map[uint64]aggregate
}
func NewMinMax() telegraf.Aggregator {
mm := &MinMax{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]minmax
name string
tags map[string]string
}
type minmax struct {
min float64
max float64
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *MinMax) SampleConfig() string {
return sampleConfig
}
func (m *MinMax) Description() string {
return "Keep the aggregate min/max of each metric passing through."
}
func (m *MinMax) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]minmax),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = minmax{
min: fv,
max: fv,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = minmax{
min: fv,
max: fv,
}
continue
}
if fv < m.cache[id].fields[k].min {
tmp := m.cache[id].fields[k]
tmp.min = fv
m.cache[id].fields[k] = tmp
} else if fv > m.cache[id].fields[k].max {
tmp := m.cache[id].fields[k]
tmp.max = fv
m.cache[id].fields[k] = tmp
}
}
}
}
}
func (m *MinMax) Push(acc telegraf.Accumulator) {
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (m *MinMax) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("minmax", func() telegraf.Aggregator {
return NewMinMax()
})
}

View File

@@ -0,0 +1,162 @@
package minmax
import (
"testing"
"time"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": int64(1),
"d": int64(1),
"e": int64(1),
"f": float64(2),
"g": float64(2),
"h": float64(2),
"i": float64(2),
"j": float64(3),
},
time.Now(),
)
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": int64(3),
"d": int64(3),
"e": int64(3),
"f": float64(1),
"g": float64(1),
"h": float64(1),
"i": float64(1),
"j": float64(1),
"k": float64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewMinMax()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestMinMaxWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewMinMax()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(3),
"b_min": float64(1),
"c_max": float64(3),
"c_min": float64(1),
"d_max": float64(3),
"d_min": float64(1),
"e_max": float64(3),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(1),
"g_max": float64(2),
"g_min": float64(1),
"h_max": float64(2),
"h_min": float64(1),
"i_max": float64(2),
"i_min": float64(1),
"j_max": float64(3),
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestMinMaxDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewMinMax()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(1),
"b_min": float64(1),
"c_max": float64(1),
"c_min": float64(1),
"d_max": float64(1),
"d_min": float64(1),
"e_max": float64(1),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(2),
"g_max": float64(2),
"g_min": float64(2),
"h_max": float64(2),
"h_min": float64(2),
"i_max": float64(2),
"i_min": float64(2),
"j_max": float64(3),
"j_min": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(3),
"b_min": float64(3),
"c_max": float64(3),
"c_min": float64(3),
"d_max": float64(3),
"d_min": float64(3),
"e_max": float64(3),
"e_min": float64(3),
"f_max": float64(1),
"f_min": float64(1),
"g_max": float64(1),
"g_min": float64(1),
"h_max": float64(1),
"h_min": float64(1),
"i_max": float64(1),
"i_min": float64(1),
"j_max": float64(1),
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View File

@@ -0,0 +1,11 @@
package aggregators
import "github.com/influxdata/telegraf"
type Creator func() telegraf.Aggregator
var Aggregators = map[string]Creator{}
func Add(name string, creator Creator) {
Aggregators[name] = creator
}

View File

@@ -1,37 +1,61 @@
# Example Input Plugin
The example plugin gathers metrics about example things
The example plugin gathers metrics about example things. This description
explains at a high level what the plugin does and provides links to where
additional information can be found.
### Configuration:
This section contains the default TOML to configure the plugin. You can
generate it using `telegraf --usage <plugin-name>`.
```toml
# Description
[[inputs.example]]
# SampleConfig
example_option = "example_value"
```
### Measurements & Fields:
### Metrics:
<optional description>
Here you should add an optional description and links to where the user can
get more information about the measurements.
If the output is determined dynamically based on the input source, or there
are more metrics than can reasonably be listed, describe how the input is
mapped to the output.
- measurement1
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- field3 (integer, bytes)
### Tags:
- All measurements have the following tags:
- tags:
- tag1 (optional description)
- tag2
- measurement2 has the following tags:
- fields:
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- tags:
- tag3
- fields:
- field3 (integer, bytes)
### Sample Queries:
This section should contain some useful InfluxDB queries that can be used to
get started with the plugin or to generate dashboards. For each query listed,
describe at a high level what data is returned.
Get the max, mean, and min for the measurement in the last hour:
```
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
```
### Example Output:
This section shows example output in Line Protocol format. You can often use
`telegraf --input-filter <plugin-name> --test` or use the `file` output to get
this information.
```
$ ./telegraf -config telegraf.conf -input-filter example -test
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
```

File diff suppressed because one or more lines are too long

View File

@@ -1,104 +1,20 @@
package aerospike
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"errors"
"log"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
as "github.com/aerospike/aerospike-client-go"
)
const (
MSG_HEADER_SIZE = 8
MSG_TYPE = 1 // Info is 1
MSG_VERSION = 2
)
var (
STATISTICS_COMMAND = []byte("statistics\n")
NAMESPACES_COMMAND = []byte("namespaces\n")
)
type aerospikeMessageHeader struct {
Version uint8
Type uint8
DataLen [6]byte
}
type aerospikeMessage struct {
aerospikeMessageHeader
Data []byte
}
// Taken from aerospike-client-go/types/message.go
func (msg *aerospikeMessage) Serialize() []byte {
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
buf := bytes.NewBuffer([]byte{})
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
binary.Write(buf, binary.BigEndian, msg.Data[:])
return buf.Bytes()
}
type aerospikeInfoCommand struct {
msg *aerospikeMessage
}
// Taken from aerospike-client-go/info.go
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
responses := make(map[string]string)
offset := int64(0)
begin := int64(0)
dataLen := int64(len(nfo.msg.Data))
// Create reusable StringBuilder for performance.
for offset < dataLen {
b := nfo.msg.Data[offset]
if b == '\t' {
name := nfo.msg.Data[begin:offset]
offset++
begin = offset
// Parse field value.
for offset < dataLen {
if nfo.msg.Data[offset] == '\n' {
break
}
offset++
}
if offset > begin {
value := nfo.msg.Data[begin:offset]
responses[string(name)] = string(value)
} else {
responses[string(name)] = ""
}
offset++
begin = offset
} else if b == '\n' {
if offset > begin {
name := nfo.msg.Data[begin:offset]
responses[string(name)] = ""
}
offset++
begin = offset
} else {
offset++
}
}
if offset > begin {
name := nfo.msg.Data[begin:offset]
responses[string(name)] = ""
}
return responses, nil
}
type Aerospike struct {
Servers []string
}
@@ -115,7 +31,7 @@ func (a *Aerospike) SampleConfig() string {
}
func (a *Aerospike) Description() string {
return "Read stats from an aerospike server"
return "Read stats from aerospike server(s)"
}
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
@@ -124,214 +40,111 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
}
var wg sync.WaitGroup
var outerr error
wg.Add(len(a.Servers))
for _, server := range a.Servers {
wg.Add(1)
go func(server string) {
go func(serv string) {
defer wg.Done()
outerr = a.gatherServer(server, acc)
acc.AddError(a.gatherServer(serv, acc))
}(server)
}
wg.Wait()
return outerr
return nil
}
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return fmt.Errorf("Aerospike info failed: %s", err)
return err
}
readAerospikeStats(aerospikeInfo, acc, host, "")
namespaces, err := getList(NAMESPACES_COMMAND, host)
iport, err := strconv.Atoi(port)
if err != nil {
return fmt.Errorf("Aerospike namespace list failed: %s", err)
iport = 3000
}
for ix := range namespaces {
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
if err != nil {
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
c, err := as.NewClient(host, iport)
if err != nil {
return err
}
defer c.Close()
nodes := c.GetNodes()
for _, n := range nodes {
tags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
fields := make(map[string]interface{})
stats, err := as.RequestNodeStats(n)
if err != nil {
return err
}
for k, v := range stats {
val, err := parseValue(v)
if err == nil {
fields[strings.Replace(k, "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v)
}
}
acc.AddFields("aerospike_node", fields, tags, time.Now())
info, err := as.RequestNodeInfo(n, "namespaces")
if err != nil {
return err
}
namespaces := strings.Split(info["namespaces"], ";")
for _, namespace := range namespaces {
nTags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
nTags["namespace"] = namespace
nFields := make(map[string]interface{})
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
if err != nil {
continue
}
stats := strings.Split(info["namespace/"+namespace], ";")
for _, stat := range stats {
parts := strings.Split(stat, "=")
if len(parts) < 2 {
continue
}
val, err := parseValue(parts[1])
if err == nil {
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1])
}
}
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
}
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
}
return nil
}
func getMap(key []byte, host string) (map[string]string, error) {
data, err := get(key, host)
if err != nil {
return nil, fmt.Errorf("Failed to get data: %s", err)
func parseValue(v string) (interface{}, error) {
if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
return parsed, nil
} else if _, err := strconv.ParseUint(v, 10, 64); err == nil {
// int64 overflow, yet valid uint64
return nil, errors.New("Number is too large")
} else if parsed, err := strconv.ParseBool(v); err == nil {
return parsed, nil
} else {
return v, nil
}
parsed, err := unmarshalMapInfo(data, string(key))
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
}
return parsed, nil
}
func getList(key []byte, host string) ([]string, error) {
data, err := get(key, host)
if err != nil {
return nil, fmt.Errorf("Failed to get data: %s", err)
func copyTags(m map[string]string) map[string]string {
out := make(map[string]string)
for k, v := range m {
out[k] = v
}
parsed, err := unmarshalListInfo(data, string(key))
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
}
return parsed, nil
}
func get(key []byte, host string) (map[string]string, error) {
var err error
var data map[string]string
asInfo := &aerospikeInfoCommand{
msg: &aerospikeMessage{
aerospikeMessageHeader: aerospikeMessageHeader{
Version: uint8(MSG_VERSION),
Type: uint8(MSG_TYPE),
DataLen: msgLenToBytes(int64(len(key))),
},
Data: key,
},
}
cmd := asInfo.msg.Serialize()
addr, err := net.ResolveTCPAddr("tcp", host)
if err != nil {
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
}
conn, err := net.DialTCP("tcp", nil, addr)
if err != nil {
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
}
defer conn.Close()
_, err = conn.Write(cmd)
if err != nil {
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
}
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
if err != nil {
return data, fmt.Errorf("Failed to read header: %s", err)
}
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
if err != nil {
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
}
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
if int64(len(asInfo.msg.Data)) != msgLen {
asInfo.msg.Data = make([]byte, msgLen)
}
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
if err != nil {
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
}
data, err = asInfo.parseMultiResponse()
if err != nil {
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
}
return data, err
}
func readAerospikeStats(
stats map[string]string,
acc telegraf.Accumulator,
host string,
namespace string,
) {
fields := make(map[string]interface{})
tags := map[string]string{
"aerospike_host": host,
"namespace": "_service",
}
if namespace != "" {
tags["namespace"] = namespace
}
for key, value := range stats {
// We are going to ignore all string based keys
val, err := strconv.ParseInt(value, 10, 64)
if err == nil {
if strings.Contains(key, "-") {
key = strings.Replace(key, "-", "_", -1)
}
fields[key] = val
}
}
acc.AddFields("aerospike", fields, tags)
}
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
key = strings.TrimSuffix(key, "\n")
res := map[string]string{}
v, exists := infoMap[key]
if !exists {
return res, fmt.Errorf("Key '%s' missing from info", key)
}
values := strings.Split(v, ";")
for i := range values {
kv := strings.Split(values[i], "=")
if len(kv) > 1 {
res[kv[0]] = kv[1]
}
}
return res, nil
}
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
key = strings.TrimSuffix(key, "\n")
v, exists := infoMap[key]
if !exists {
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
}
values := strings.Split(v, ";")
return values, nil
}
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
var r int
for total < length {
r, err = c.Read(buffer[total:length])
total += r
if err != nil {
break
}
}
return
}
// Taken from aerospike-client-go/types/message.go
func msgLenToBytes(DataLen int64) [6]byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(DataLen))
res := [6]byte{}
copy(res[:], b[2:])
return res
}
// Taken from aerospike-client-go/types/message.go
func msgLenFromBytes(buf [6]byte) int64 {
nbytes := append([]byte{0, 0}, buf[:]...)
DataLen := binary.BigEndian.Uint64(nbytes)
return int64(DataLen)
return out
}
func init() {

View File

@@ -1,7 +1,6 @@
package aerospike
import (
"reflect"
"testing"
"github.com/influxdata/telegraf/testutil"
@@ -11,7 +10,7 @@ import (
func TestAerospikeStatistics(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
t.Skip("Skipping aerospike integration tests.")
}
a := &Aerospike{
@@ -20,99 +19,50 @@ func TestAerospikeStatistics(t *testing.T) {
var acc testutil.Accumulator
err := a.Gather(&acc)
err := acc.GatherError(a.Gather)
require.NoError(t, err)
// Only use a few of the metrics
asMetrics := []string{
"transactions",
"stat_write_errs",
"stat_read_reqs",
"stat_write_reqs",
}
for _, metric := range asMetrics {
assert.True(t, acc.HasIntField("aerospike", metric), metric)
}
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasTag("aerospike_node", "node_name"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
var i int64 = 8
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
}
func TestAerospikeStatisticsPartialErr(t *testing.T) {
if testing.Short() {
t.Skip("Skipping aerospike integration tests.")
}
a := &Aerospike{
Servers: []string{
testutil.GetLocalHost() + ":3000",
testutil.GetLocalHost() + ":9999",
},
}
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
// Also test for re-writing
var acc testutil.Accumulator
stats := map[string]string{
"stat-write-errs": "12345",
"stat_read_reqs": "12345",
}
readAerospikeStats(stats, &acc, "host1", "")
fields := map[string]interface{}{
"stat_write_errs": int64(12345),
"stat_read_reqs": int64(12345),
}
tags := map[string]string{
"aerospike_host": "host1",
"namespace": "_service",
}
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
require.Error(t, acc.GatherError(a.Gather))
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestReadAerospikeStatsNamespace(t *testing.T) {
var acc testutil.Accumulator
stats := map[string]string{
"stat_write_errs": "12345",
"stat_read_reqs": "12345",
}
readAerospikeStats(stats, &acc, "host1", "test")
func TestAerospikeParseValue(t *testing.T) {
// uint64 with value bigger than int64 max
val, err := parseValue("18446744041841121751")
assert.Nil(t, val)
assert.Error(t, err)
fields := map[string]interface{}{
"stat_write_errs": int64(12345),
"stat_read_reqs": int64(12345),
}
tags := map[string]string{
"aerospike_host": "host1",
"namespace": "test",
}
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
}
func TestAerospikeUnmarshalList(t *testing.T) {
i := map[string]string{
"test": "one;two;three",
}
expected := []string{"one", "two", "three"}
list, err := unmarshalListInfo(i, "test2")
assert.True(t, err != nil)
list, err = unmarshalListInfo(i, "test")
assert.True(t, err == nil)
equal := true
for ix := range expected {
if list[ix] != expected[ix] {
equal = false
break
}
}
assert.True(t, equal)
}
func TestAerospikeUnmarshalMap(t *testing.T) {
i := map[string]string{
"test": "key1=value1;key2=value2",
}
expected := map[string]string{
"key1": "value1",
"key2": "value2",
}
m, err := unmarshalMapInfo(i, "test")
assert.True(t, err == nil)
assert.True(t, reflect.DeepEqual(m, expected))
// int values
val, err = parseValue("42")
assert.NoError(t, err)
assert.Equal(t, val, int64(42), "must be parsed as int")
// string values
val, err = parseValue("BB977942A2CA502")
assert.NoError(t, err)
assert.Equal(t, val, `BB977942A2CA502`, "must be left as string")
}

View File

@@ -2,38 +2,74 @@ package all
import (
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
_ "github.com/influxdata/telegraf/plugins/inputs/dcos"
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
_ "github.com/influxdata/telegraf/plugins/inputs/postfix"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
@@ -43,16 +79,30 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/smart"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/unbound"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
_ "github.com/influxdata/telegraf/plugins/inputs/win_services"
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
_ "github.com/influxdata/telegraf/plugins/inputs/zipkin"
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
)

View File

@@ -0,0 +1,47 @@
# AMQP Consumer Input Plugin
This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
Metrics are read from a topic exchange using the configured queue and binding_key.
Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
For an introduction to AMQP see:
- https://www.rabbitmq.com/tutorials/amqp-concepts.html
- https://www.rabbitmq.com/getstarted.html
The following defaults are known to work with RabbitMQ:
```toml
# AMQP consumer plugin
[[inputs.amqp_consumer]]
## AMQP url
url = "amqp://localhost:5672/influxdb"
## AMQP exchange
exchange = "telegraf"
## AMQP queue name
queue = "telegraf"
## Binding Key
binding_key = "#"
## Controls how many messages the server will try to keep on the network
## for consumers before receiving delivery acks.
#prefetch_count = 50
## Auth method. PLAIN and EXTERNAL are supported.
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@@ -0,0 +1,280 @@
package amqp_consumer
import (
"fmt"
"log"
"strings"
"sync"
"time"
"github.com/streadway/amqp"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
// AMQPConsumer is the top level struct for this plugin
type AMQPConsumer struct {
URL string
// AMQP exchange
Exchange string
// Queue Name
Queue string
// Binding Key
BindingKey string `toml:"binding_key"`
// Controls how many messages the server will try to keep on the network
// for consumers before receiving delivery acks.
PrefetchCount int
// AMQP Auth method
AuthMethod string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
parser parsers.Parser
conn *amqp.Connection
wg *sync.WaitGroup
}
type externalAuth struct{}
func (a *externalAuth) Mechanism() string {
return "EXTERNAL"
}
func (a *externalAuth) Response() string {
return fmt.Sprintf("\000")
}
const (
DefaultAuthMethod = "PLAIN"
DefaultPrefetchCount = 50
)
func (a *AMQPConsumer) SampleConfig() string {
return `
## AMQP url
url = "amqp://localhost:5672/influxdb"
## AMQP exchange
exchange = "telegraf"
## AMQP queue name
queue = "telegraf"
## Binding Key
binding_key = "#"
## Maximum number of messages server should give to the worker.
prefetch_count = 50
## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}
func (a *AMQPConsumer) Description() string {
return "AMQP consumer plugin"
}
func (a *AMQPConsumer) SetParser(parser parsers.Parser) {
a.parser = parser
}
// All gathering is done in the Start function
func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
return nil
}
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
// make new tls config
tls, err := internal.GetTLSConfig(
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
if err != nil {
return nil, err
}
// parse auth method
var sasl []amqp.Authentication // nil by default
if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
sasl = []amqp.Authentication{&externalAuth{}}
}
config := amqp.Config{
TLSClientConfig: tls,
SASL: sasl, // if nil, it will be PLAIN
}
return &config, nil
}
// Start satisfies the telegraf.ServiceInput interface
func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
amqpConf, err := a.createConfig()
if err != nil {
return err
}
msgs, err := a.connect(amqpConf)
if err != nil {
return err
}
a.wg = &sync.WaitGroup{}
a.wg.Add(1)
go a.process(msgs, acc)
go func() {
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
if err == nil {
return
}
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
for {
msgs, err := a.connect(amqpConf)
if err != nil {
log.Printf("E! AMQP connection failed: %s", err)
time.Sleep(10 * time.Second)
continue
}
a.wg.Add(1)
go a.process(msgs, acc)
break
}
}()
return nil
}
func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
conn, err := amqp.DialConfig(a.URL, *amqpConf)
if err != nil {
return nil, err
}
a.conn = conn
ch, err := conn.Channel()
if err != nil {
return nil, fmt.Errorf("Failed to open a channel: %s", err)
}
err = ch.ExchangeDeclare(
a.Exchange, // name
"topic", // type
true, // durable
false, // auto-deleted
false, // internal
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
}
q, err := ch.QueueDeclare(
a.Queue, // queue
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Failed to declare a queue: %s", err)
}
err = ch.QueueBind(
q.Name, // queue
a.BindingKey, // binding-key
a.Exchange, // exchange
false,
nil,
)
if err != nil {
return nil, fmt.Errorf("Failed to bind a queue: %s", err)
}
err = ch.Qos(
a.PrefetchCount,
0, // prefetch-size
false, // global
)
if err != nil {
return nil, fmt.Errorf("Failed to set QoS: %s", err)
}
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Failed establishing connection to queue: %s", err)
}
log.Println("I! Started AMQP consumer")
return msgs, err
}
// Read messages from queue and add them to the Accumulator
func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
defer a.wg.Done()
for d := range msgs {
metrics, err := a.parser.Parse(d.Body)
if err != nil {
log.Printf("E! %v: error parsing metric - %v", err, string(d.Body))
} else {
for _, m := range metrics {
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
}
d.Ack(false)
}
log.Printf("I! AMQP consumer queue closed")
}
func (a *AMQPConsumer) Stop() {
err := a.conn.Close()
if err != nil && err != amqp.ErrClosed {
log.Printf("E! Error closing AMQP connection: %s", err)
return
}
a.wg.Wait()
log.Println("I! Stopped AMQP service")
}
func init() {
inputs.Add("amqp_consumer", func() telegraf.Input {
return &AMQPConsumer{
AuthMethod: DefaultAuthMethod,
PrefetchCount: DefaultPrefetchCount,
}
})
}

View File

@@ -1,45 +1,84 @@
# Telegraf plugin: Apache
# Apache Input Plugin
#### Plugin arguments:
- **urls** []string: List of apache-status URLs to collect from.
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
#### Description
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documenation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
The Apache plugin collects from the /server-status?auto URL. See
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
example. And
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
mod_status documentation.
### Configuration:
# Measurements:
```toml
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
Meta:
- tags: `port=<port>`, `server=url`
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
- apache_TotalAccesses
- apache_TotalkBytes
- apache_CPULoad
- apache_Uptime
- apache_ReqPerSec
- apache_BytesPerSec
- apache_BytesPerReq
- apache_BusyWorkers
- apache_IdleWorkers
- apache_ConnsTotal
- apache_ConnsAsyncWriting
- apache_ConnsAsyncKeepAlive
- apache_ConnsAsyncClosing
## Maximum time to receive response.
# response_timeout = "5s"
### Scoreboard measurements
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
```
- apache_scboard_waiting
- apache_scboard_starting
- apache_scboard_reading
- apache_scboard_sending
- apache_scboard_keepalive
- apache_scboard_dnslookup
- apache_scboard_closing
- apache_scboard_logging
- apache_scboard_finishing
- apache_scboard_idle_cleanup
- apache_scboard_open
### Measurements & Fields:
- apache
- BusyWorkers (float)
- BytesPerReq (float)
- BytesPerSec (float)
- ConnsAsyncClosing (float)
- ConnsAsyncKeepAlive (float)
- ConnsAsyncWriting (float)
- ConnsTotal (float)
- CPUChildrenSystem (float)
- CPUChildrenUser (float)
- CPULoad (float)
- CPUSystem (float)
- CPUUser (float)
- IdleWorkers (float)
- Load1 (float)
- Load5 (float)
- Load15 (float)
- ParentServerConfigGeneration (float)
- ParentServerMPMGeneration (float)
- ReqPerSec (float)
- ServerUptimeSeconds (float)
- TotalAccesses (float)
- TotalkBytes (float)
- Uptime (float)
The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state:
- apache
- scboard_closing (float)
- scboard_dnslookup (float)
- scboard_finishing (float)
- scboard_idle_cleanup (float)
- scboard_keepalive (float)
- scboard_logging (float)
- scboard_open (float)
- scboard_reading (float)
- scboard_sending (float)
- scboard_starting (float)
- scboard_waiting (float)
### Tags:
- All measurements have the following tags:
- port
- server
### Example Output:
```
apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
```

View File

@@ -12,16 +12,46 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Apache struct {
Urls []string
Urls []string
Username string
Password string
ResponseTimeout internal.Duration
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
client *http.Client
}
var sampleConfig = `
## An array of Apache status URI to gather stats.
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
`
func (n *Apache) SampleConfig() string {
@@ -33,42 +63,73 @@ func (n *Apache) Description() string {
}
func (n *Apache) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
var outerr error
if len(n.Urls) == 0 {
n.Urls = []string{"http://localhost/server-status?auto"}
}
if n.ResponseTimeout.Duration < time.Second {
n.ResponseTimeout.Duration = time.Second * 5
}
if n.client == nil {
client, err := n.createHttpClient()
if err != nil {
return err
}
n.client = client
}
var wg sync.WaitGroup
wg.Add(len(n.Urls))
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
wg.Add(1)
go func(addr *url.URL) {
defer wg.Done()
outerr = n.gatherUrl(addr, acc)
acc.AddError(n.gatherUrl(addr, acc))
}(addr)
}
wg.Wait()
return outerr
return nil
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
func (n *Apache) createHttpClient() (*http.Client, error) {
tlsCfg, err := internal.GetTLSConfig(
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
if err != nil {
return nil, err
}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: n.ResponseTimeout.Duration,
}
return client, nil
}
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
resp, err := client.Get(addr.String())
req, err := http.NewRequest("GET", addr.String(), nil)
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
}
if len(n.Username) != 0 && len(n.Password) != 0 {
req.SetBasicAuth(n.Username, n.Password)
}
resp, err := n.client.Do(req)
if err != nil {
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
}

View File

@@ -36,11 +36,12 @@ func TestHTTPApache(t *testing.T) {
defer ts.Close()
a := Apache{
Urls: []string{ts.URL},
// Fetch it 2 times to catch possible data races.
Urls: []string{ts.URL, ts.URL},
}
var acc testutil.Accumulator
err := a.Gather(&acc)
err := acc.GatherError(a.Gather)
require.NoError(t, err)
fields := map[string]interface{}{

View File

@@ -70,7 +70,7 @@ Using this configuration:
When run with:
```
./telegraf -config telegraf.conf -input-filter bcache -test
./telegraf --config telegraf.conf --input-filter bcache --test
```
It produces:

View File

@@ -0,0 +1,85 @@
# Bond Input Plugin
The Bond Input plugin collects network bond interface status, bond's slaves interfaces
status and failures count of bond's slaves interfaces.
The plugin collects these metrics from `/proc/net/bonding/*` files.
### Configuration:
```toml
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
```
### Measurements & Fields:
- bond
- active_slave (for active-backup mode)
- status
- bond_slave
- failures
- status
### Description:
```
active_slave
Currently active slave interface for active-backup mode.
status
Status of bond interface or bonds's slave interface (down = 0, up = 1).
failures
Amount of failures for bond's slave interface.
```
### Tags:
- bond
- bond
- bond_slave
- bond
- interface
### Example output:
Configuration:
```
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
bond_interfaces = ["bond0", "bond1"]
```
Run:
```
telegraf --config telegraf.conf --input-filter bond --test
```
Output:
```
* Plugin: inputs.bond, Collection 1
> bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000
> bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000
> bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000
> bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000
```

204
plugins/inputs/bond/bond.go Normal file
View File

@@ -0,0 +1,204 @@
package bond
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// default host proc path
const defaultHostProc = "/proc"
// env host proc variable name
const envProc = "HOST_PROC"
type Bond struct {
HostProc string `toml:"host_proc"`
BondInterfaces []string `toml:"bond_interfaces"`
}
var sampleConfig = `
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
`
func (bond *Bond) Description() string {
return "Collect bond interface status, slaves statuses and failures count"
}
func (bond *Bond) SampleConfig() string {
return sampleConfig
}
func (bond *Bond) Gather(acc telegraf.Accumulator) error {
// load proc path, get default value if config value and env variable are empty
bond.loadPath()
// list bond interfaces from bonding directory or gather all interfaces.
bondNames, err := bond.listInterfaces()
if err != nil {
return err
}
for _, bondName := range bondNames {
bondAbsPath := bond.HostProc + "/net/bonding/" + bondName
file, err := ioutil.ReadFile(bondAbsPath)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err))
continue
}
rawFile := strings.TrimSpace(string(file))
err = bond.gatherBondInterface(bondName, rawFile, acc)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err))
}
}
return nil
}
func (bond *Bond) gatherBondInterface(bondName string, rawFile string, acc telegraf.Accumulator) error {
splitIndex := strings.Index(rawFile, "Slave Interface:")
if splitIndex == -1 {
splitIndex = len(rawFile)
}
bondPart := rawFile[:splitIndex]
slavePart := rawFile[splitIndex:]
err := bond.gatherBondPart(bondName, bondPart, acc)
if err != nil {
return err
}
err = bond.gatherSlavePart(bondName, slavePart, acc)
if err != nil {
return err
}
return nil
}
func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.Accumulator) error {
fields := make(map[string]interface{})
tags := map[string]string{
"bond": bondName,
}
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Currently Active Slave") {
fields["active_slave"] = value
}
if strings.Contains(name, "MII Status") {
fields["status"] = 0
if value == "up" {
fields["status"] = 1
}
acc.AddFields("bond", fields, tags)
return nil
}
}
if err := scanner.Err(); err != nil {
return err
}
return fmt.Errorf("Couldn't find status info for '%s' ", bondName)
}
func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error {
var slave string
var status int
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Slave Interface") {
slave = value
}
if strings.Contains(name, "MII Status") {
status = 0
if value == "up" {
status = 1
}
}
if strings.Contains(name, "Link Failure Count") {
count, err := strconv.Atoi(value)
if err != nil {
return err
}
fields := map[string]interface{}{
"status": status,
"failures": count,
}
tags := map[string]string{
"bond": bondName,
"interface": slave,
}
acc.AddFields("bond_slave", fields, tags)
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
// loadPath can be used to read path firstly from config
// if it is empty then try read from env variable
func (bond *Bond) loadPath() {
if bond.HostProc == "" {
bond.HostProc = proc(envProc, defaultHostProc)
}
}
// proc can be used to read file paths from env
func proc(env, path string) string {
// try to read full file path
if p := os.Getenv(env); p != "" {
return p
}
// return default path
return path
}
func (bond *Bond) listInterfaces() ([]string, error) {
var interfaces []string
if len(bond.BondInterfaces) > 0 {
interfaces = bond.BondInterfaces
} else {
paths, err := filepath.Glob(bond.HostProc + "/net/bonding/*")
if err != nil {
return nil, err
}
for _, p := range paths {
interfaces = append(interfaces, filepath.Base(p))
}
}
return interfaces, nil
}
func init() {
inputs.Add("bond", func() telegraf.Input {
return &Bond{}
})
}

View File

@@ -0,0 +1,77 @@
package bond
import (
"testing"
"github.com/influxdata/telegraf/testutil"
)
var sampleTest802 = `
Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008)
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
Transmit Hash Policy: layer2 (0)
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
802.3ad info
LACP rate: fast
Aggregator selection policy (ad_select): stable
bond bond0 has no active aggregator
Slave Interface: eth1
MII Status: up
Link Failure Count: 0
Permanent HW addr: 00:0c:29:f5:b7:11
Aggregator ID: N/A
Slave Interface: eth2
MII Status: up
Link Failure Count: 3
Permanent HW addr: 00:0c:29:f5:b7:1b
Aggregator ID: N/A
`
var sampleTestAB = `
Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: eth2 (primary_reselect always)
Currently Active Slave: eth2
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth3
MII Status: down
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 2
Permanent HW addr:
Slave queue ID: 0
Slave Interface: eth2
MII Status: up
Speed: 100 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr:
`
func TestGatherBondInterface(t *testing.T) {
var acc testutil.Accumulator
bond := &Bond{}
bond.gatherBondInterface("bond802", sampleTest802, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"})
bond.gatherBondInterface("bondAB", sampleTestAB, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"})
}

View File

@@ -0,0 +1,125 @@
# Telegraf plugin: Cassandra
#### Plugin arguments:
- **context** string: Context root used for jolokia url
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
- **metrics** []string: List of Jmx paths that identify mbeans attributes
#### Description
The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
# Measurements:
Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name.
Given a configuration like:
```toml
[[inputs.cassandra]]
context = "/jolokia/read"
servers = [":8778"]
metrics = ["/java.lang:type=Memory/HeapMemoryUsage"]
```
The collected metrics will be:
```
javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084
```
# Useful Metrics:
Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web.
- [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics)
- [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
####measurement = javaGarbageCollector
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
####measurement = javaMemory
- /java.lang:type=Memory/HeapMemoryUsage
- /java.lang:type=Memory/NonHeapMemoryUsage
####measurement = cassandraCache
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hits
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
####measurement = cassandraClient
- /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients
####measurement = cassandraClientRequest
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
####measurement = cassandraCommitLog
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
####measurement = cassandraCompaction
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
####measurement = cassandraStorage
- /org.apache.cassandra.metrics:type=Storage,name=Load
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
####measurement = cassandraTable
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
####measurement = cassandraThreadPools
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks

View File

@@ -0,0 +1,312 @@
package cassandra
import (
"encoding/json"
"errors"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil"
"net/http"
"net/url"
"strings"
)
type JolokiaClient interface {
MakeRequest(req *http.Request) (*http.Response, error)
}
type JolokiaClientImpl struct {
client *http.Client
}
func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error) {
return c.client.Do(req)
}
type Cassandra struct {
jClient JolokiaClient
Context string
Servers []string
Metrics []string
}
type javaMetric struct {
host string
metric string
acc telegraf.Accumulator
}
type cassandraMetric struct {
host string
metric string
acc telegraf.Accumulator
}
type jmxMetric interface {
addTagsFields(out map[string]interface{})
}
func newJavaMetric(host string, metric string,
acc telegraf.Accumulator) *javaMetric {
return &javaMetric{host: host, metric: metric, acc: acc}
}
func newCassandraMetric(host string, metric string,
acc telegraf.Accumulator) *cassandraMetric {
return &cassandraMetric{host: host, metric: metric, acc: acc}
}
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
mname string) {
for k, v := range values {
if v != nil {
fields[mname+"_"+k] = v
}
}
}
func parseJmxMetricRequest(mbean string) map[string]string {
tokens := make(map[string]string)
classAndPairs := strings.Split(mbean, ":")
if classAndPairs[0] == "org.apache.cassandra.metrics" {
tokens["class"] = "cassandra"
} else if classAndPairs[0] == "java.lang" {
tokens["class"] = "java"
} else {
return tokens
}
pairs := strings.Split(classAndPairs[1], ",")
for _, pair := range pairs {
p := strings.Split(pair, "=")
tokens[p[0]] = p[1]
}
return tokens
}
func addTokensToTags(tokens map[string]string, tags map[string]string) {
for k, v := range tokens {
if k == "name" {
tags["mname"] = v // name seems to a reserved word in influxdb
} else if k == "class" || k == "type" {
continue // class and type are used in the metric name
} else {
tags[k] = v
}
}
}
func (j javaMetric) addTagsFields(out map[string]interface{}) {
tags := make(map[string]string)
fields := make(map[string]interface{})
a := out["request"].(map[string]interface{})
attribute := a["attribute"].(string)
mbean := a["mbean"].(string)
tokens := parseJmxMetricRequest(mbean)
addTokensToTags(tokens, tags)
tags["cassandra_host"] = j.host
if _, ok := tags["mname"]; !ok {
//Queries for a single value will not return a "name" tag in the response.
tags["mname"] = attribute
}
if values, ok := out["value"]; ok {
switch t := values.(type) {
case map[string]interface{}:
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
case interface{}:
fields[attribute] = t
}
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
} else {
j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
j.metric, out))
}
}
func addCassandraMetric(mbean string, c cassandraMetric,
values map[string]interface{}) {
tags := make(map[string]string)
fields := make(map[string]interface{})
tokens := parseJmxMetricRequest(mbean)
addTokensToTags(tokens, tags)
tags["cassandra_host"] = c.host
addValuesAsFields(values, fields, tags["mname"])
c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
}
func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
r := out["request"]
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
// Requests with wildcards for keyspace or table names will return nested
// maps in the json response
if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" ||
tokens["scope"] == "*") {
if valuesMap, ok := out["value"]; ok {
for k, v := range valuesMap.(map[string]interface{}) {
addCassandraMetric(k, c, v.(map[string]interface{}))
}
} else {
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out))
return
}
} else {
if values, ok := out["value"]; ok {
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
c, values.(map[string]interface{}))
} else {
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out))
return
}
}
}
func (j *Cassandra) SampleConfig() string {
return `
# This is the context root used to compose the jolokia url
context = "/jolokia/read"
## List of cassandra servers exposing jolokia read service
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
## List of metrics collected on above servers
## Each metric consists of a jmx path.
## This will collect all heap memory usage metrics from the jvm and
## ReadLatency metrics for all keyspaces and tables.
## "type=Table" in the query works with Cassandra3.0. Older versions might
## need to use "type=ColumnFamily"
metrics = [
"/java.lang:type=Memory/HeapMemoryUsage",
"/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
]
`
}
func (j *Cassandra) Description() string {
return "Read Cassandra metrics through Jolokia"
}
func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
// Create + send request
req, err := http.NewRequest("GET", requestUrl.String(), nil)
if err != nil {
return nil, err
}
resp, err := j.jClient.MakeRequest(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Process response
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
requestUrl,
resp.StatusCode,
http.StatusText(resp.StatusCode),
http.StatusOK,
http.StatusText(http.StatusOK))
return nil, err
}
// read body
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Unmarshal json
var jsonOut map[string]interface{}
if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
return nil, errors.New("Error decoding JSON response")
}
return jsonOut, nil
}
func parseServerTokens(server string) map[string]string {
serverTokens := make(map[string]string)
hostAndUser := strings.Split(server, "@")
hostPort := ""
userPasswd := ""
if len(hostAndUser) == 2 {
hostPort = hostAndUser[1]
userPasswd = hostAndUser[0]
} else {
hostPort = hostAndUser[0]
}
hostTokens := strings.Split(hostPort, ":")
serverTokens["host"] = hostTokens[0]
serverTokens["port"] = hostTokens[1]
if userPasswd != "" {
userTokens := strings.Split(userPasswd, ":")
serverTokens["user"] = userTokens[0]
serverTokens["passwd"] = userTokens[1]
}
return serverTokens
}
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
context := c.Context
servers := c.Servers
metrics := c.Metrics
for _, server := range servers {
for _, metric := range metrics {
serverTokens := parseServerTokens(server)
var m jmxMetric
if strings.HasPrefix(metric, "/java.lang:") {
m = newJavaMetric(serverTokens["host"], metric, acc)
} else if strings.HasPrefix(metric,
"/org.apache.cassandra.metrics:") {
m = newCassandraMetric(serverTokens["host"], metric, acc)
} else {
// unsupported metric type
acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping",
metric))
continue
}
// Prepare URL
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
serverTokens["port"] + context + metric)
if err != nil {
acc.AddError(err)
continue
}
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
requestUrl.User = url.UserPassword(serverTokens["user"],
serverTokens["passwd"])
}
out, err := c.getAttr(requestUrl)
if err != nil {
acc.AddError(err)
continue
}
if out["status"] != 200.0 {
acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl))
continue
}
m.addTagsFields(out)
}
}
return nil
}
func init() {
inputs.Add("cassandra", func() telegraf.Input {
return &Cassandra{jClient: &JolokiaClientImpl{client: &http.Client{}}}
})
}

Some files were not shown because too many files have changed in this diff Show More