Compare commits

...

100 Commits

Author SHA1 Message Date
David Norton
81caa56859 move plugin interfaces into separate package 2016-12-23 10:18:27 -05:00
David Norton
3e6c4a53a4 fix plugin namespacing 2016-12-22 12:06:04 -05:00
David Norton
ba8f91a038 update Circle build to use go1.8beta1 2016-12-22 12:06:04 -05:00
David Norton
2b77751df8 make plugin dir configurable and fix namespacing 2016-12-22 12:06:04 -05:00
David Norton
70d678c442 load external plugins 2016-12-22 12:06:04 -05:00
YKlausz
fd1feff7b4 Remove print call in cassandra plugin (#2192) 2016-12-21 17:23:54 +00:00
Dominik Labuda
37bc9cf795 [plugins] jolokia input plugin: configurable http timeouts (#2098) 2016-12-21 12:41:58 +00:00
Cameron Sparr
b762546fa7 docker: check type when totalling blkio & net metrics
closes #2027
2016-12-21 12:18:38 +00:00
Cameron Sparr
bf5f2659a1 Do not try Uint parsing in redis plugin
this is just a waste of cpu cycles, since telegraf converts all uints to
int64 anyways.
2016-12-20 23:42:14 +00:00
Mark Wolfe
d2787e8ef5 Fix for loop over value array range issue. (#2187) 2016-12-20 22:56:02 +00:00
Cameron Sparr
a9f03a72f5 Mask username/password from error messages
closes #1980
2016-12-20 19:35:45 +00:00
Cameron Sparr
7fc57812a7 changelog update 2016-12-20 18:50:32 +00:00
Mark Wolfe
8a982ca68f Moved to using the inbuilt serializer. (#1942)
* Moved to using the inbuilt serializer.

* Remove Atomic variable as it is not required.

* Adjusted metric type in line with latest changes.
2016-12-20 18:49:28 +00:00
Cameron Sparr
200237a515 Do not create a global statsd "previous instance"
this basically reverts #887

at some point we might want to do some special handling of reloading
plugins and keeping their state intact, but that will need to be done at
a higher level, and in a way that is thread-safe for multiple input
plugins of the same type.

Unfortunately this is a rather large feature that will not have a quick
fix available for it.

fixes #1975
fixes #2102
2016-12-20 17:55:04 +00:00
Cameron Sparr
0ae1e0611c changelog update 2016-12-20 16:30:49 +00:00
Matt O'Hara
1392e73125 Add clusterstats to elasticsearch plugin (#1979)
* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* add clusterstats to elasticsearch input plugin

* responses to requested changes

* remove unnecessary recommendation
2016-12-20 16:30:03 +00:00
Cameron Sparr
a90afd95c6 Fix & unit test logparser CLF pattern with IPv6
deals partially with #1973

see also https://github.com/vjeantet/grok/issues/17
2016-12-20 15:57:32 +00:00
Cameron Sparr
9866146545 Support negative statsd counters
closes #1898
2016-12-20 13:21:51 +00:00
Cameron Sparr
8df325a68c changelog update 2016-12-20 13:04:51 +00:00
Łukasz Harasimowicz
48ae105a11 Fixing consul with multiple health checks per service (#1994)
* plugins/input/consul: moved check_id from regular fields to tags.

When service has more than one check sending data for both would overwrite each other
resulting only in one check being written (the last one). Adding check_id as a tag
ensures we will get info for all unique checks per service.

* plugins/inputs/consul: updated tests
2016-12-20 13:03:31 +00:00
Jeff Ashton
4e808c5c20 Importing pdh from github.com/lxn/win
closes #1763
closes #2017
2016-12-20 12:06:40 +00:00
Ken Dilley
eb96443a34 Update MySQL Readme to clarify connection string examples. (#2175)
* Update MySQL Readme to clarify connection string examples.

* Update mysql sample config to clarify connection string examples
2016-12-20 10:17:00 +00:00
Cameron Sparr
e36c354ff5 internal.Duration build fixup 2016-12-17 13:10:33 +00:00
Pierre Tessier
f09c08d1f3 Added response_timeout property
closes #2006
2016-12-17 13:06:04 +00:00
Steven Pall
0e8122a2fc Add trailing slash to jolokia context (#2105) 2016-12-17 12:51:46 +00:00
Cameron Sparr
6723ea5fe6 changelog update 2016-12-16 17:30:13 +00:00
Vincent
e8bf968c78 fix mongodb replica set lag awalys 0 #1449 (#2125) 2016-12-16 17:29:04 +00:00
Cameron Sparr
9c8f24601f rabbitmq, decrease timeout verbosity in config 2016-12-16 14:12:50 +00:00
Tevin Jeffrey
4957717df5 Add field for last GC pause time (#2121) 2016-12-16 14:03:53 +00:00
Cameron Sparr
21fac3ebec changelog update 2016-12-16 14:02:11 +00:00
Patrick Hemmer
ecbc634221 fix tail input seeking when used with pipe (#2090) 2016-12-16 14:01:49 +00:00
alekseyp
90cec20d1d Standard deviation (jitter) for Input plugin Ping (#2078) 2016-12-16 13:58:27 +00:00
Cameron Sparr
bcbf82f8e8 changelog update 2016-12-16 13:54:51 +00:00
Alex Sherwin
3a45d8851d fixes #1987 custom docker repos with non-standard port (#2018)
* fixed parsing of docker image name/version

now accounts for custom docker repo's which contain a colon for a non-default port

* 1978: modifying docker test case to have a custom repo with non-standard port

* using a temp var to store index, ran gofmt

* fixes #1987, renaming iterator to 'i'
2016-12-16 13:53:16 +00:00
Pierre Tessier
4a83c8c518 Add Questions status variable for issue: #1988 (#2004) 2016-12-16 13:47:47 +00:00
Doug Reese
bc13d32d53 MongoDB input plugin: Improve state data (#2001)
* MongoDB input plugin: Improve state data

Adds ARB as a "member_status" (replica set arbiter).
Uses MongoDB replica set state string for "state" value.

* MongoDB input plugin: Improve state data - changelog update
2016-12-16 13:46:32 +00:00
Frank Stutz
e6fc32bdf0 fix for puppetagent config - test 1
put Makefile back to normal

removed comment from puppetagent.go

changed config_version to config_version_string and fixed yaml for build

changed workind from branch to environment for config_string

fixed casing and Changelog

fixed test case

closes #1917
2016-12-16 13:36:06 +00:00
Cameron Sparr
a970b9c62c Revert "Rabbitmq plugin: connection-related metrics." (#2169) 2016-12-15 19:31:40 +00:00
Florian Klink
17b307a7bc ping: fix typo in README (#2163) 2016-12-14 19:47:48 +00:00
Jose Luis Navarro
393f5044bb Collect JSON values recursively
closes #1993
closes #1693
2016-12-13 21:06:05 +00:00
Pieter Slabbert
c630212dde Enable setting a clientID for MQTT Output
closes #2079
closes #1910
2016-12-13 20:03:09 +00:00
Cameron Sparr
f39db08c6d Set default values for delete_ configuration options
closes #1893
2016-12-13 20:00:52 +00:00
Jonas Falck
b4f9bc8745 Change hddtemp to always put temperature in temperature field (#1905)
Added unit tests for the changes

Fixes #1904
2016-12-13 19:40:55 +00:00
Cameron Sparr
5f06bd2566 Graylog output should set short_message field
closes #2045
2016-12-13 16:10:59 +00:00
Cameron Sparr
8a4ab3654d Fix documentation for net_response plugin
closes #2103
2016-12-13 16:02:03 +00:00
Cameron Sparr
e2f9617228 Support strings in statsd set measurements
closes #2068
2016-12-13 15:42:22 +00:00
Cameron Sparr
e097ae9632 Fix possible panic when file info cannot be gotten
closes #2061
2016-12-13 14:54:07 +00:00
Cameron Sparr
07684fb030 Update changelog 2016-12-13 14:28:28 +00:00
Da1den
17fa6f9b17 Fixed bug that you cannot gather data on non english systems (#1944) 2016-12-13 14:24:41 +00:00
krise3k
8e3fbaa9dd Add missing slim (#1937) 2016-12-13 14:23:18 +00:00
Kishore Nallan
dede3e70ad Rabbitmq plugin: connection-related metrics. (#1908)
* Rabbitmq plugin: connection-related metrics.

* Run go fmt.
2016-12-13 14:17:20 +00:00
Anthony Arnaud
7558081873 Output openTSDB HTTPS with basic auth (#1913) 2016-12-13 14:15:51 +00:00
Leon Barrett
6e241611be Fix bug: too many cloudwatch metrics (#1885)
* Fix bug: too many cloudwatch metrics

Cloudwatch metrics were being added incorrectly. The most obvious
symptom of this was that too many metrics were being added. A simple
check against the name of the metric proved to be a sufficient fix. In
order to test the fix, a metric selection function was factored out.

* Go fmt cloudwatch

* Cloudwatch isSelected checks metric name

* Move cloudwatch line in changelog to 1.2 features
2016-12-13 14:13:53 +00:00
Rikaard Hosein
fc9f921b62 Can turn pid into tag instead of field
closes #1843
fixes  #1668
2016-12-13 13:21:39 +00:00
Cameron Sparr
12db3b9120 Check if metric is nil before calling SetAggregate
fixes #2146
2016-12-13 12:27:10 +00:00
Patrick Hemmer
b58926dd26 snmp: use a shared global translation cache
Prevents the same data from being looked up multiple times. Also prevents multiple simultaneous lookups.

closes #2115
closes #2104
2016-12-12 13:32:42 +00:00
Patrick Hemmer
91143dda1a snmp: make snmptranslate not required (#2008) 2016-12-12 13:30:07 +00:00
Christian Eichelmann
efb64a049f add a hint to possible basic authentication settings 2016-12-09 12:58:54 +00:00
Ross McDonald
4f6087a99d Update readme links for 1.1.1. (#2134) 2016-12-07 17:10:05 +00:00
Cameron Sparr
6b0e863556 Support a telegraf.Metric.Split function 2016-12-07 15:18:47 +00:00
Cameron Sparr
11bc82379c Go version 1.7.3 -> 1.7.4 2016-12-06 15:42:50 +00:00
Cameron Sparr
a093ec1eaa Kafka output fixup 2016-12-06 15:38:59 +00:00
Cameron Sparr
d71a42cd1b Implement telegraf collecting stats on itself
closes #1348
2016-12-05 18:56:54 +00:00
Nathan D Acuff
d518d7d806 Add device name as a tag in disk stats (#1807)
* return partition stat alongside disk stat from disk usage method, and report device name (minus /dev/) as a tag in disk stats

* update system/disk tests to include new partition stat return value from disk usage method calls

* update changelog for #1807 (use device name instead of path to report disk stats)
2016-12-05 17:42:36 +00:00
Pierre Fersing
1d1afe6481 Fix RPM architecture for armhf (#2003)
Signed-off-by: Pierre Fersing <pierre.fersing@bleemeo.com>
2016-12-05 16:45:02 +00:00
Foxlik
5a3f2e61f3 Fix improper total of CPU times (#2123)
On linux, the cpu timer counters of user and nice include the respective guest and guest_nice counters. This results in improper calculation of percentages.

Please see:
https://github.com/torvalds/linux/blob/447976e/kernel/sched/cputime.c#L169
https://lists.linuxfoundation.org/pipermail/virtualization/2009-August/013459.html
https://github.com/giampaolo/psutil/pull/940
2016-12-05 08:35:59 +00:00
Cameron Sparr
504f4e69db file output plugin fixup 2016-12-02 11:36:22 +00:00
Cameron Sparr
9f6666beb3 unit test fixup 2016-12-01 19:17:44 +00:00
Cameron Sparr
af6e7b9531 More unit tests for new metric 2016-12-01 19:07:14 +00:00
Cameron Sparr
6fd7361364 allocation & perf improvements 2016-12-01 18:17:02 +00:00
Cameron Sparr
e5c7a71d8e Fix unit tests for new metric implementation 2016-12-01 18:17:02 +00:00
Cameron Sparr
db7a4b24b6 Implement telegraf's own full metric type
main reasons behind this:
- make adding/removing tags cheap
- make adding/removing fields cheap
- make parsing cheaper
- make parse -> decorate -> write out bytes metric flow much faster

Refactor serializer to use byte buffer
2016-12-01 18:17:02 +00:00
Cameron Sparr
332f678afb JSON serializer: include unit test with escapes 2016-12-01 18:16:52 +00:00
John Engelman
04a2b36a52 Fix changelog for json parser. (#2100) 2016-11-28 18:20:32 +00:00
Cameron Sparr
f862c6585d amqp precision is not used anymore 2016-11-24 10:17:24 +00:00
Cameron Sparr
5c32521a07 Add benchmarks for metric parsing and creating 2016-11-23 17:23:08 +00:00
Cameron Sparr
9db30250c3 'discard' output plugin 2016-11-23 14:03:30 +00:00
Cameron Sparr
2b0cd2037b Add Copy() function to Metric interface 2016-11-23 12:30:31 +00:00
Guillem Jover
536dbfb724 Switch to github.com/kballard/go-shellquote (#1950)
The old gonuts fork has no License and has not seen any commits
differing from the original project, while the original has seen some
activity, even if low.

Having no license is a problem for distributors, as by default, such
projects are undistributable.
2016-11-16 11:24:11 -05:00
karech
b77398c4d3 Configurable RabbitMQ HTTP timeouts #1997 (#1998)
* [plugins] rabbitmq input plugin: add non default http timeouts

* update CHANGELOG.md
2016-11-16 16:18:56 +00:00
Chris Goller
fbf5bee051 Update win_pref_counter to include Processor Queue Length in examples. (#2029) 2016-11-16 13:16:44 +00:00
leplan73
81004c808f Added IopsInProgress to diskio stats (#2037)
* Export IopsInProgress

* Export IopsInProgress

* Export IopsInProgress
2016-11-16 13:16:16 +00:00
Pieter Slabbert
196509cc53 Trim null characters in Value data format (#2049)
* Trim null characters in Value data format

Some producers (such as the paho embedded c mqtt client) add a null
character "\x00" to the end of a message.  The Value parser would fail on
any message from such a producer.

* Trim whitespace and null in all Value data formats

* No unnecessary reassignments in Value data format parser

* Update change log for Value data format fix
2016-11-16 13:13:31 +00:00
John Engelman
94ce67cc67 Add support to parse JSON array. (#1965) 2016-11-15 13:02:55 -05:00
Toni Moreno
33ed528afe Apache input enhancements ( added Basic Auth and SSL skipverify ) (#1964)
* added connection Timeout parámeter, basic HTTP autentication and HTTP support with Sslskipverify option

* updated README.md

* added optional SSL config , changed timeout name and type , and other minor fixes

* added some code style improvements

* Update README.md
2016-11-15 10:52:24 -05:00
Cameron Sparr
2435e47926 changelog update 2016-11-15 11:36:26 +00:00
Mike Ragalie
ff67a4b96c Cache and expire metrics for prometheus output (#2016)
* Cache and expire metrics for prometheus output

* Fix test

* Use interval.Duration

* Default prometheus expiration interval to 60s

* Update changelog
2016-11-15 11:33:39 +00:00
Sebastian Borza
f816b952cf Add udp_buffer_size option to udp_listener (#1883)
* patching udp_listener for fun

updating with errcode

adding debug flags to temp msgs

moving from debug to info

* updating PR 1883 based on feedback
2016-11-15 09:49:48 +00:00
Todd Persen
b905bc1b5d Merge pull request #2024 from influxdata/cs2023-single-quote-duration
Fix single quote parsing of TOML durations
2016-11-11 09:40:32 -08:00
Cameron Sparr
0ecbf9e349 Fix single quote parsing of TOML durations
closes #2023
2016-11-10 09:47:46 +00:00
Cameron Sparr
1c7715780e Documentation improvements
- fully document aggregator and processor plugins
- improve readme.md

closes #1989
2016-11-08 13:55:37 +00:00
John Engelman
5d3850c44e Update docs on Cloudwatch. Set default period to 5m. (#2000) 2016-11-07 12:14:04 +00:00
Cameron Sparr
e84b356a12 Update etc/telegraf.conf 2016-11-04 13:18:44 +00:00
John Engelman
b349800f7a Fix up AWS plugin docs so they don't use single quotes. (#1991)
Also don't use named returns in fetchNamespaceMetrics since it's
non-standard for the rest of the codebase.
2016-11-04 13:16:41 +00:00
Cameron Sparr
47de43abf3 Use rfc3339 timestamps in telegraf log output
closes #1564

also add unit and benchmark tests
2016-11-03 18:39:02 +00:00
Johannes Rudolph
7a9fef80f5 Update README.md (#1868)
I think this is a copy paste bug? ;-)
2016-11-03 18:28:24 +00:00
Cameron Sparr
dc28875437 Update gopsutil dependency
primarily for a fix in Windows network counter getting code

closes #1949
2016-11-03 18:06:10 +00:00
Cameron Sparr
a6ed4d4c3a CircleCI script, do not explicitly set version tag 2016-11-03 17:21:06 +00:00
Cameron Sparr
fe6162b2a1 Use short commit in Makefile build 2016-11-03 16:37:52 +00:00
Cameron Sparr
34182d9c9f Add release 1.2 section to changelog 2016-11-03 14:34:09 +00:00
274 changed files with 8562 additions and 2336 deletions

View File

@@ -1,4 +1,86 @@
## v1.1 [unreleased]
## v1.2 [unreleased]
### Release Notes
- The StatsD plugin will now default all "delete_" config options to "true". This
will change te default behavior for users who were not specifying these parameters
in their config file.
- The StatsD plugin will also no longer save it's state on a service reload.
Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887).
The reason for this is that saving the state in a global variable is not
thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)),
and this creates issues if users want to define multiple instances
of the statsd plugin. Saving state on reload may be considered in the future,
but this would need to be implemented at a higher level and applied to all
plugins, not just statsd.
### Features
- [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages
- [#1564](https://github.com/influxdata/telegraf/issues/1564): Use RFC3339 timestamps in log output.
- [#1997](https://github.com/influxdata/telegraf/issues/1997): Non-default HTTP timeouts for RabbitMQ plugin.
- [#2074](https://github.com/influxdata/telegraf/pull/2074): "discard" output plugin added, primarily for testing purposes.
- [#1965](https://github.com/influxdata/telegraf/pull/1965): The JSON parser can now parse an array of objects using the same configuration.
- [#1807](https://github.com/influxdata/telegraf/pull/1807): Option to use device name rather than path for reporting disk stats.
- [#1348](https://github.com/influxdata/telegraf/issues/1348): Telegraf "internal" plugin for collecting stats on itself.
- [#2127](https://github.com/influxdata/telegraf/pull/2127): Update Go version to 1.7.4.
- [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function.
- [#2026](https://github.com/influxdata/telegraf/pull/2065): elasticsearch "shield" (basic auth) support doc.
- [#1885](https://github.com/influxdata/telegraf/pull/1885): Fix over-querying of cloudwatch metrics
- [#1913](https://github.com/influxdata/telegraf/pull/1913): OpenTSDB basic auth support.
- [#1908](https://github.com/influxdata/telegraf/pull/1908): RabbitMQ Connection metrics.
- [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric.
- [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets.
- [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior.
- [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output.
- [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data.
- [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field.
- [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin.
- [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin.
- [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf.
- [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters.
- [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support.
- [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins.
- [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages.
- [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
### Bugfixes
- [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input.
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus.
- [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker.
- [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag.
- [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters.
- [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s).
- [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field.
- [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature.
- [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data.
- [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable.
- [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port.
- [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe.
- [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag.
- [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites.
- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses.
- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin.
- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix.
## v1.1.2 [2016-12-12]
### Bugfixes
- [#2007](https://github.com/influxdata/telegraf/issues/2007): Make snmptranslate not required when using numeric OID.
- [#2104](https://github.com/influxdata/telegraf/issues/2104): Add a global snmp translation cache.
## v1.1.1 [2016-11-14]
### Bugfixes
- [#2023](https://github.com/influxdata/telegraf/issues/2023): Fix issue parsing toml durations with single quotes.
## v1.1.0 [2016-11-07]
### Release Notes
@@ -73,6 +155,7 @@ continue sending logs to /var/log/telegraf/telegraf.log.
- [#1771](https://github.com/influxdata/telegraf/issues/1771): Delete nil fields in the metric maker.
- [#870](https://github.com/influxdata/telegraf/issues/870): Fix MySQL special characters in DSN parsing.
- [#1742](https://github.com/influxdata/telegraf/issues/1742): Ping input odd timeout behavior.
- [#1950](https://github.com/influxdata/telegraf/pull/1950): Switch to github.com/kballard/go-shellquote.
## v1.0.1 [2016-09-26]

4
Godeps
View File

@@ -20,7 +20,6 @@ github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
@@ -32,6 +31,7 @@ github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
@@ -47,7 +47,7 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c

View File

@@ -1,7 +1,6 @@
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438

View File

@@ -1,6 +1,6 @@
VERSION := $(shell sh -c 'git describe --always --tags')
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
COMMIT := $(shell sh -c 'git rev-parse HEAD')
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else

249
README.md
View File

@@ -1,15 +1,23 @@
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/)
Telegraf is an agent written in Go for collecting metrics from the system it's
running on, or from other services, and writing them into InfluxDB or other
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
Telegraf is an agent written in Go for collecting, processing, aggregating,
and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from well known services (like Hadoop, Postgres, or Redis) and third party
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
New input and output plugins are designed to be easy to contribute,
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
4. [Output Plugins](#output-plugins) write metrics to various destinations
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
New plugins are designed to be easy to contribute,
we'll eagerly accept pull
requests and will manage the set of plugins that Telegraf supports.
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
@@ -20,12 +28,12 @@ new plugins.
### Linux deb and rpm Packages:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_amd64.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.x86_64.rpm
Latest (arm):
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_armhf.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.armhf.rpm
##### Package Instructions:
@@ -39,21 +47,21 @@ controlled via `systemctl [action] telegraf`
### yum/apt Repositories:
There is a yum/apt repo available for the whole InfluxData stack, see
[here](https://docs.influxdata.com/influxdb/v0.10/introduction/installation/#installation)
[here](https://docs.influxdata.com/influxdb/latest/introduction/installation/#installation)
for instructions on setting up the repo. Once it is configured, you will be able
to use this repo to install & update telegraf.
### Linux tarballs:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_i386.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_armhf.tar.gz
### FreeBSD tarball:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_freebsd_amd64.tar.gz
### Ansible Role:
@@ -69,7 +77,7 @@ brew install telegraf
### Windows Binaries (EXPERIMENTAL)
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_windows_amd64.zip
### From Source:
@@ -127,77 +135,72 @@ telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
configuration options.
## Supported Input Plugins
## Input Plugins
Telegraf currently has support for collecting metrics from many sources. For
more information on each, please look at the directory of the same name in
`plugins/inputs`.
Currently implemented sources:
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch)
* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike)
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
* [ceph](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ceph)
* [chrony](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/chrony)
* [consul](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul)
* [conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack)
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query)
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
* [hddtemp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hddtemp)
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp)
* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached)
* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos)
* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb)
* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql)
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
* [nstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nstat)
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping)
* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql)
* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible)
* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns)
* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat)
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus)
* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent)
* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq)
* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops)
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
* [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy)
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
* [sysstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sysstat)
* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [aerospike](./plugins/inputs/aerospike)
* [apache](./plugins/inputs/apache)
* [bcache](./plugins/inputs/bcache)
* [cassandra](./plugins/inputs/cassandra)
* [ceph](./plugins/inputs/ceph)
* [chrony](./plugins/inputs/chrony)
* [consul](./plugins/inputs/consul)
* [conntrack](./plugins/inputs/conntrack)
* [couchbase](./plugins/inputs/couchbase)
* [couchdb](./plugins/inputs/couchdb)
* [disque](./plugins/inputs/disque)
* [dns query time](./plugins/inputs/dns_query)
* [docker](./plugins/inputs/docker)
* [dovecot](./plugins/inputs/dovecot)
* [elasticsearch](./plugins/inputs/elasticsearch)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [filestat](./plugins/inputs/filestat)
* [haproxy](./plugins/inputs/haproxy)
* [hddtemp](./plugins/inputs/hddtemp)
* [http_response](./plugins/inputs/http_response)
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [internal](./plugins/inputs/internal)
* [influxdb](./plugins/inputs/influxdb)
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
* [iptables](./plugins/inputs/iptables)
* [jolokia](./plugins/inputs/jolokia)
* [leofs](./plugins/inputs/leofs)
* [lustre2](./plugins/inputs/lustre2)
* [mailchimp](./plugins/inputs/mailchimp)
* [memcached](./plugins/inputs/memcached)
* [mesos](./plugins/inputs/mesos)
* [mongodb](./plugins/inputs/mongodb)
* [mysql](./plugins/inputs/mysql)
* [net_response](./plugins/inputs/net_response)
* [nginx](./plugins/inputs/nginx)
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
* [postgresql](./plugins/inputs/postgresql)
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [powerdns](./plugins/inputs/powerdns)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus)
* [puppetagent](./plugins/inputs/puppetagent)
* [rabbitmq](./plugins/inputs/rabbitmq)
* [raindrops](./plugins/inputs/raindrops)
* [redis](./plugins/inputs/redis)
* [rethinkdb](./plugins/inputs/rethinkdb)
* [riak](./plugins/inputs/riak)
* [sensors](./plugins/inputs/sensors)
* [snmp](./plugins/inputs/snmp)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [twemproxy](./plugins/inputs/twemproxy)
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu
* mem
* net
@@ -211,45 +214,51 @@ Currently implemented sources:
Telegraf can also collect metrics via the following service plugins:
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
* [http_listener](./plugins/inputs/http_listener)
* [kafka_consumer](./plugins/inputs/kafka_consumer)
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
* [nats_consumer](./plugins/inputs/nats_consumer)
* [nsq_consumer](./plugins/inputs/nsq_consumer)
* [logparser](./plugins/inputs/logparser)
* [statsd](./plugins/inputs/statsd)
* [tail](./plugins/inputs/tail)
* [tcp_listener](./plugins/inputs/tcp_listener)
* [udp_listener](./plugins/inputs/udp_listener)
* [webhooks](./plugins/inputs/webhooks)
* [filestack](./plugins/inputs/webhooks/filestack)
* [github](./plugins/inputs/webhooks/github)
* [mandrill](./plugins/inputs/webhooks/mandrill)
* [rollbar](./plugins/inputs/webhooks/rollbar)
We'll be adding support for many more over the coming months. Read on if you
want to add support for another service or third-party API.
## Processor Plugins
## Supported Output Plugins
* [printer](./plugins/processors/printer)
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb)
* [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon)
* [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp)
* [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis)
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch)
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
* [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann)
## Aggregator Plugins
* [minmax](./plugins/aggregators/minmax)
## Output Plugins
* [influxdb](./plugins/outputs/influxdb)
* [amon](./plugins/outputs/amon)
* [amqp](./plugins/outputs/amqp)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
* [file](./plugins/outputs/file)
* [graphite](./plugins/outputs/graphite)
* [graylog](./plugins/outputs/graylog)
* [instrumental](./plugins/outputs/instrumental)
* [kafka](./plugins/outputs/kafka)
* [librato](./plugins/outputs/librato)
* [mqtt](./plugins/outputs/mqtt)
* [nats](./plugins/outputs/nats)
* [nsq](./plugins/outputs/nsq)
* [opentsdb](./plugins/outputs/opentsdb)
* [prometheus](./plugins/outputs/prometheus_client)
* [riemann](./plugins/outputs/riemann)
## Contributing

View File

@@ -2,10 +2,14 @@ package agent
import (
"log"
"sync/atomic"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/selfstat"
)
var (
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
)
type MetricMaker interface {
@@ -14,14 +18,14 @@ type MetricMaker interface {
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
mType plugins.ValueType,
t time.Time,
) telegraf.Metric
) plugins.Metric
}
func NewAccumulator(
maker MetricMaker,
metrics chan telegraf.Metric,
metrics chan plugins.Metric,
) *accumulator {
acc := accumulator{
maker: maker,
@@ -32,13 +36,11 @@ func NewAccumulator(
}
type accumulator struct {
metrics chan telegraf.Metric
metrics chan plugins.Metric
maker MetricMaker
precision time.Duration
errCount uint64
}
func (ac *accumulator) AddFields(
@@ -47,7 +49,7 @@ func (ac *accumulator) AddFields(
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
if m := ac.maker.MakeMetric(measurement, fields, tags, plugins.Untyped, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
@@ -58,7 +60,7 @@ func (ac *accumulator) AddGauge(
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
if m := ac.maker.MakeMetric(measurement, fields, tags, plugins.Gauge, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
@@ -69,7 +71,7 @@ func (ac *accumulator) AddCounter(
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
if m := ac.maker.MakeMetric(measurement, fields, tags, plugins.Counter, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
@@ -80,7 +82,7 @@ func (ac *accumulator) AddError(err error) {
if err == nil {
return
}
atomic.AddUint64(&ac.errCount, 1)
NErrors.Incr(1)
//TODO suppress/throttle consecutive duplicate errors?
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
}

View File

@@ -8,7 +8,8 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -16,7 +17,7 @@ import (
func TestAdd(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -41,13 +42,13 @@ func TestAdd(t *testing.T) {
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
}
func TestAddFields(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -69,7 +70,7 @@ func TestAddFields(t *testing.T) {
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
actual)
}
@@ -78,7 +79,7 @@ func TestAccAddError(t *testing.T) {
log.SetOutput(errBuf)
defer log.SetOutput(os.Stderr)
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -87,7 +88,7 @@ func TestAccAddError(t *testing.T) {
a.AddError(fmt.Errorf("baz"))
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
assert.EqualValues(t, 3, a.errCount)
assert.EqualValues(t, int64(3), NErrors.Get())
require.Len(t, errs, 4) // 4 because of trailing newline
assert.Contains(t, string(errs[0]), "TestPlugin")
assert.Contains(t, string(errs[0]), "foo")
@@ -99,7 +100,7 @@ func TestAccAddError(t *testing.T) {
func TestAddNoIntervalWithPrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
@@ -125,13 +126,13 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
}
func TestAddDisablePrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -157,13 +158,13 @@ func TestAddDisablePrecision(t *testing.T) {
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
actual)
}
func TestAddNoPrecisionWithInterval(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -189,13 +190,13 @@ func TestAddNoPrecisionWithInterval(t *testing.T) {
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
}
func TestDifferentPrecisions(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -206,7 +207,7 @@ func TestDifferentPrecisions(t *testing.T) {
testm := <-a.metrics
actual := testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
a.SetPrecision(0, time.Millisecond)
@@ -216,7 +217,7 @@ func TestDifferentPrecisions(t *testing.T) {
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
actual)
a.SetPrecision(0, time.Microsecond)
@@ -226,7 +227,7 @@ func TestDifferentPrecisions(t *testing.T) {
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
actual)
a.SetPrecision(0, time.Nanosecond)
@@ -236,13 +237,13 @@ func TestDifferentPrecisions(t *testing.T) {
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
actual)
}
func TestAddGauge(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -259,24 +260,24 @@ func TestAddGauge(t *testing.T) {
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
assert.Equal(t, testm.Type(), plugins.Gauge)
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
assert.Equal(t, testm.Type(), plugins.Gauge)
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Gauge)
assert.Equal(t, testm.Type(), plugins.Gauge)
}
func TestAddCounter(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
metrics := make(chan plugins.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
@@ -293,19 +294,19 @@ func TestAddCounter(t *testing.T) {
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
assert.Equal(t, testm.Type(), plugins.Counter)
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
assert.Equal(t, testm.Type(), plugins.Counter)
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Counter)
assert.Equal(t, testm.Type(), plugins.Counter)
}
type TestMetricMaker struct {
@@ -318,20 +319,20 @@ func (tm *TestMetricMaker) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
mType plugins.ValueType,
t time.Time,
) telegraf.Metric {
) plugins.Metric {
switch mType {
case telegraf.Untyped:
if m, err := telegraf.NewMetric(measurement, tags, fields, t); err == nil {
case plugins.Untyped:
if m, err := metric.New(measurement, tags, fields, t); err == nil {
return m
}
case telegraf.Counter:
if m, err := telegraf.NewCounterMetric(measurement, tags, fields, t); err == nil {
case plugins.Counter:
if m, err := metric.New(measurement, tags, fields, t, plugins.Counter); err == nil {
return m
}
case telegraf.Gauge:
if m, err := telegraf.NewGaugeMetric(measurement, tags, fields, t); err == nil {
case plugins.Gauge:
if m, err := metric.New(measurement, tags, fields, t, plugins.Gauge); err == nil {
return m
}
}

View File

@@ -8,10 +8,11 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/selfstat"
)
// Agent runs telegraf and collects data based on the given config
@@ -44,10 +45,8 @@ func NewAgent(config *config.Config) (*Agent, error) {
// Connect connects to all configured outputs
func (a *Agent) Connect() error {
for _, o := range a.Config.Outputs {
o.Quiet = a.Config.Agent.Quiet
switch ot := o.Output.(type) {
case telegraf.ServiceOutput:
case plugins.ServiceOutput:
if err := ot.Start(); err != nil {
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
o.Name, err.Error())
@@ -77,7 +76,7 @@ func (a *Agent) Close() error {
for _, o := range a.Config.Outputs {
err = o.Output.Close()
switch ot := o.Output.(type) {
case telegraf.ServiceOutput:
case plugins.ServiceOutput:
ot.Stop()
}
}
@@ -102,28 +101,30 @@ func (a *Agent) gatherer(
shutdown chan struct{},
input *models.RunningInput,
interval time.Duration,
metricC chan telegraf.Metric,
metricC chan plugins.Metric,
) {
defer panicRecover(input)
GatherTime := selfstat.RegisterTiming("gather",
"gather_time_ns",
map[string]string{"input": input.Config.Name},
)
acc := NewAccumulator(input, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
acc := NewAccumulator(input, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
input.SetDebug(a.Config.Agent.Debug)
input.SetDefaultTags(a.Config.Tags)
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
start := time.Now()
gatherWithTimeout(shutdown, input, acc, interval)
elapsed := time.Since(start)
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
input.Name(), interval, elapsed)
GatherTime.Incr(elapsed.Nanoseconds())
select {
case <-shutdown:
@@ -175,7 +176,7 @@ func gatherWithTimeout(
func (a *Agent) Test() error {
shutdown := make(chan struct{})
defer close(shutdown)
metricC := make(chan telegraf.Metric)
metricC := make(chan plugins.Metric)
// dummy receiver for the point channel
go func() {
@@ -204,9 +205,6 @@ func (a *Agent) Test() error {
if err := input.Input.Gather(acc); err != nil {
return err
}
if acc.errCount > 0 {
return fmt.Errorf("Errors encountered during processing")
}
// Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages.
@@ -243,14 +241,14 @@ func (a *Agent) flush() {
}
// flusher monitors the metrics input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
func (a *Agent) flusher(shutdown chan struct{}, metricC chan plugins.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 300)
// create an output metric channel and a gorouting that continously passes
// each metric onto the output plugins & aggregators.
outMetricC := make(chan telegraf.Metric, 100)
outMetricC := make(chan plugins.Metric, 100)
var wg sync.WaitGroup
wg.Add(1)
go func() {
@@ -269,7 +267,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
var dropOriginal bool
if !m.IsAggregate() {
for _, agg := range a.Config.Aggregators {
if ok := agg.Add(copyMetric(m)); ok {
if ok := agg.Add(m.Copy()); ok {
dropOriginal = true
}
}
@@ -279,7 +277,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(copyMetric(m))
o.AddMetric(m.Copy())
}
}
}
@@ -302,7 +300,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
case metric := <-metricC:
// NOTE potential bottleneck here as we put each metric through the
// processors serially.
mS := []telegraf.Metric{metric}
mS := []plugins.Metric{metric}
for _, processor := range a.Config.Processors {
mS = processor.Apply(mS...)
}
@@ -323,17 +321,17 @@ func (a *Agent) Run(shutdown chan struct{}) error {
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating metrics
metricC := make(chan telegraf.Metric, 100)
metricC := make(chan plugins.Metric, 100)
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
input.SetDefaultTags(a.Config.Tags)
switch p := input.Input.(type) {
case telegraf.ServiceInput:
case plugins.ServiceInput:
acc := NewAccumulator(input, metricC)
// Service input plugins should set their own precision of their
// metrics.
acc.SetPrecision(time.Nanosecond, 0)
input.SetDefaultTags(a.Config.Tags)
if err := p.Start(acc); err != nil {
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
input.Name(), err.Error())
@@ -385,19 +383,3 @@ func (a *Agent) Run(shutdown chan struct{}) error {
wg.Wait()
return nil
}
func copyMetric(m telegraf.Metric) telegraf.Metric {
t := time.Time(m.Time())
tags := make(map[string]string)
fields := make(map[string]interface{})
for k, v := range m.Tags() {
tags[k] = v
}
for k, v := range m.Fields() {
fields[k] = v
}
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
return out
}

View File

@@ -4,9 +4,9 @@ machine:
post:
- sudo service zookeeper stop
- go version
- go version | grep 1.7.3 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz
- go version | grep 1.7.4 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.8beta1.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.8beta1.linux-amd64.tar.gz
- go version
dependencies:

View File

@@ -6,6 +6,9 @@ import (
"log"
"os"
"os/signal"
"path"
"path/filepath"
"plugin"
"runtime"
"strings"
"syscall"
@@ -13,11 +16,14 @@ import (
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/logger"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/aggregators"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
"github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/influxdata/telegraf/plugins/processors"
_ "github.com/influxdata/telegraf/plugins/processors/all"
"github.com/kardianos/service"
)
@@ -50,6 +56,8 @@ var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
var fService = flag.String("service", "",
"operate on the service")
var fPlugins = flag.String("plugins", "",
"path to directory containing external plugins")
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
@@ -304,9 +312,93 @@ func (p *program) Stop(s service.Service) error {
return nil
}
// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
// in the specified directory.
func loadExternalPlugins(dir string) error {
return filepath.Walk(dir, func(pth string, info os.FileInfo, err error) error {
// Stop if there was an error.
if err != nil {
return err
}
// Ignore directories.
if info.IsDir() {
return nil
}
// Ignore files that aren't shared libraries.
ext := strings.ToLower(path.Ext(pth))
if ext != ".so" && ext != ".dll" {
return nil
}
// Load plugin.
p, err := plugin.Open(pth)
if err != nil {
return err
}
// Register plugin.
if err := registerPlugin(dir, pth, p); err != nil {
return err
}
return nil
})
}
// registerPlugin registers an external plugin with telegraf.
func registerPlugin(pluginsDir, filePath string, p *plugin.Plugin) error {
// Clean the file path and make sure it's relative to the root plugins directory.
// This is done because plugin names are namespaced using the directory
// structure. E.g., if the root plugin directory, passed in the pluginsDir
// argument, is '/home/jdoe/bin/telegraf/plugins' and we're registering plugin
// '/home/jdoe/bin/telegraf/plugins/input/mysql.so'
pluginsDir = filepath.Clean(pluginsDir)
parentDir, _ := filepath.Split(pluginsDir)
var err error
if filePath, err = filepath.Rel(parentDir, filePath); err != nil {
return err
}
// Strip the file extension and save it.
ext := path.Ext(filePath)
filePath = strings.TrimSuffix(filePath, ext)
// Convert path separators to "." to generate a plugin name namespaced by directory names.
name := strings.Replace(filePath, string(os.PathSeparator), ".", -1)
if create, err := p.Lookup("NewInput"); err == nil {
inputs.Add(name, inputs.Creator(create.(func() plugins.Input)))
} else if create, err := p.Lookup("NewOutput"); err == nil {
outputs.Add(name, outputs.Creator(create.(func() plugins.Output)))
} else if create, err := p.Lookup("NewProcessor"); err == nil {
processors.Add(name, processors.Creator(create.(func() plugins.Processor)))
} else if create, err := p.Lookup("NewAggregator"); err == nil {
aggregators.Add(name, aggregators.Creator(create.(func() plugins.Aggregator)))
} else {
return fmt.Errorf("not a telegraf plugin: %s%s", filePath, ext)
}
log.Printf("I! Registered: %s (from %s%s)\n", name, filePath, ext)
return nil
}
func main() {
flag.Usage = func() { usageExit(0) }
flag.Parse()
// Load external plugins, if requested.
if *fPlugins != "" {
pluginsDir, err := filepath.Abs(*fPlugins)
if err != nil {
log.Fatal("E! " + err.Error())
}
log.Printf("I! Loading external plugins from: %s\n", pluginsDir)
if err := loadExternalPlugins(*fPlugins); err != nil {
log.Fatal("E! " + err.Error())
}
}
if runtime.GOOS == "windows" {
svcConfig := &service.Config{
Name: "telegraf",

View File

@@ -0,0 +1,59 @@
# Telegraf Aggregator & Processor Plugins
As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugins.
These plugins sit in-between Input & Output plugins, aggregating and processing
metrics as they pass through Telegraf:
```
┌───────────┐
│ │
│ CPU │───┐
│ │ │
└───────────┘ │
┌───────────┐ │ ┌───────────┐
│ │ │ │ │
│ Memory │───┤ ┌──▶│ InfluxDB │
│ │ │ │ │ │
└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘
│ │ │ │Aggregate │ │
┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐
│ │ │ │ - transform │ │ - quantiles │ │ │ │
│ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │
│ │ │ │ - filter │ │ - count │ │ │ │
└───────────┘ │ │ │ │ │ │ └───────────┘
│ └─────────────┘ └─────────────┘ │
┌───────────┐ │ │ ┌───────────┐
│ │ │ │ │ │
│ SNMP │───┤ └──▶│ Kafka │
│ │ │ │ │
└───────────┘ │ └───────────┘
┌───────────┐ │
│ │ │
│ Docker │───┘
│ │
└───────────┘
```
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
**Processor** plugins process metrics as they pass through and immediately emit
results based on the values they process. For example, this could be printing
all metrics or adding a tag to all metrics that pass through.
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
are typically for emitting new _aggregate_ metrics, such as a running mean,
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
plugins are configured with a `period`. The `period` is the size of the window
of metrics that each _aggregate_ represents. In other words, the emitted
_aggregate_ metric will be the aggregated value of the past `period` seconds.
Since many users will only care about their aggregates and not every single metric
gathered, there is also a `drop_original` argument, which tells Telegraf to only
emit the aggregates and not the original metrics.
**NOTE** That since aggregators only aggregate metrics within their period, that
historical data is not supported. In other words, if your metric timestamp is more
than `now() - period` in the past, it will not be aggregated. If this is a feature
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)

View File

@@ -147,6 +147,62 @@ Your Telegraf metrics would get tagged with "my_tag_1"
exec_mycollector,my_tag_1=foo a=5,b_c=6
```
If the JSON data is an array, then each element of the array is parsed with the configured settings.
Each resulting metric will be output with the same timestamp.
For example, if the following configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["/usr/bin/mycollector --foo=bar"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
## List of tag names to extract from top-level of JSON server response
tag_keys = [
"my_tag_1",
"my_tag_2"
]
```
with this JSON output from a command:
```json
[
{
"a": 5,
"b": {
"c": 6
},
"my_tag_1": "foo",
"my_tag_2": "baz"
},
{
"a": 7,
"b": {
"c": 8
},
"my_tag_1": "bar",
"my_tag_2": "baz"
}
]
```
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
```
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
```
# Value:
The "value" data format translates single values into Telegraf metrics. This

View File

@@ -12,11 +12,11 @@
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)

View File

@@ -160,7 +160,7 @@
# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
# ## Amazon REGION
# region = 'us-east-1'
# region = "us-east-1"
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
@@ -178,7 +178,7 @@
# #shared_credential_file = ""
#
# ## Namespace for the CloudWatch MetricDatums
# namespace = 'InfluxData/Telegraf'
# namespace = "InfluxData/Telegraf"
# # Configuration for DataDog API to send metrics to.
@@ -623,7 +623,7 @@
# # Pull Metric Statistics from Amazon CloudWatch
# [[inputs.cloudwatch]]
# ## Amazon Region
# region = 'us-east-1'
# region = "us-east-1"
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
@@ -641,21 +641,21 @@
# #shared_credential_file = ""
#
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
# period = '1m'
# period = "5m"
#
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
# delay = '1m'
# delay = "5m"
#
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
# ## gaps or overlap in pulled data
# interval = '1m'
# interval = "5m"
#
# ## Configure the TTL for the internal cache of metrics.
# ## Defaults to 1 hr if not specified
# #cache_ttl = '10m'
# #cache_ttl = "10m"
#
# ## Metric Statistic Namespace (required)
# namespace = 'AWS/ELB'
# namespace = "AWS/ELB"
#
# ## Maximum requests per second. Note that the global default AWS rate limit is
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
@@ -666,12 +666,12 @@
# ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h
# #[[inputs.cloudwatch.metrics]]
# # names = ['Latency', 'RequestCount']
# # names = ["Latency", "RequestCount"]
# #
# # ## Dimension filters for Metric (optional)
# # [[inputs.cloudwatch.metrics.dimensions]]
# # name = 'LoadBalancerName'
# # value = 'p-example'
# # name = "LoadBalancerName"
# # value = "p-example"
# # Gather health check statuses from services registered in Consul
@@ -777,18 +777,25 @@
# # Read stats from one or more Elasticsearch servers or clusters
# [[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
# # you can add username and password to your url to use basic authentication:
# # servers = ["http://user:pass@localhost:9200"]
# servers = ["http://localhost:9200"]
#
# ## Timeout for HTTP requests to the elastic search server(s)
# http_timeout = "5s"
#
# ## set local to false when you want to read the indices stats from all nodes
# ## within the cluster
# ## When local is true (the default), the node will read only its own stats.
# ## Set local to false when you want to read the node stats from all nodes
# ## of the cluster.
# local = true
#
# ## set cluster_health to true when you want to also obtain cluster level stats
# ## set cluster_health to true when you want to also obtain cluster health stats
# cluster_health = false
#
# ## Set cluster_stats to true when you want to obtain cluster stats from the
# ## Master node.
# cluster_stats = false
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
@@ -1979,4 +1986,3 @@
#
# [inputs.webhooks.rollbar]
# path = "/rollbar"

View File

@@ -130,6 +130,7 @@
Counters = [
"Context Switches/sec",
"System Calls/sec",
"Processor Queue Length",
]
Instances = ["------"]
Measurement = "win_system"

View File

@@ -3,16 +3,18 @@ package buffer
import (
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/selfstat"
)
var (
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
)
// Buffer is an object for storing metrics in a circular buffer.
type Buffer struct {
buf chan telegraf.Metric
// total dropped metrics
drops int
// total metrics added
total int
buf chan plugins.Metric
mu sync.Mutex
}
@@ -22,7 +24,7 @@ type Buffer struct {
// called when the buffer is full, then the oldest metric(s) will be dropped.
func NewBuffer(size int) *Buffer {
return &Buffer{
buf: make(chan telegraf.Metric, size),
buf: make(chan plugins.Metric, size),
}
}
@@ -36,25 +38,14 @@ func (b *Buffer) Len() int {
return len(b.buf)
}
// Drops returns the total number of dropped metrics that have occured in this
// buffer since instantiation.
func (b *Buffer) Drops() int {
return b.drops
}
// Total returns the total number of metrics that have been added to this buffer.
func (b *Buffer) Total() int {
return b.total
}
// Add adds metrics to the buffer.
func (b *Buffer) Add(metrics ...telegraf.Metric) {
func (b *Buffer) Add(metrics ...plugins.Metric) {
for i, _ := range metrics {
b.total++
MetricsWritten.Incr(1)
select {
case b.buf <- metrics[i]:
default:
b.drops++
MetricsDropped.Incr(1)
<-b.buf
b.buf <- metrics[i]
}
@@ -64,10 +55,10 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
// Batch returns a batch of metrics of size batchSize.
// the batch will be of maximum length batchSize. It can be less than batchSize,
// if the length of Buffer is less than batchSize.
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
func (b *Buffer) Batch(batchSize int) []plugins.Metric {
b.mu.Lock()
n := min(len(b.buf), batchSize)
out := make([]telegraf.Metric, n)
out := make([]plugins.Metric, n)
for i := 0; i < n; i++ {
out[i] = <-b.buf
}

View File

@@ -3,13 +3,13 @@ package buffer
import (
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
var metricList = []telegraf.Metric{
var metricList = []plugins.Metric{
testutil.TestMetric(2, "mymetric1"),
testutil.TestMetric(1, "mymetric2"),
testutil.TestMetric(11, "mymetric3"),
@@ -27,47 +27,53 @@ func BenchmarkAddMetrics(b *testing.B) {
func TestNewBufferBasicFuncs(t *testing.T) {
b := NewBuffer(10)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
assert.True(t, b.IsEmpty())
assert.Zero(t, b.Len())
assert.Zero(t, b.Drops())
assert.Zero(t, b.Total())
assert.Zero(t, MetricsDropped.Get())
assert.Zero(t, MetricsWritten.Get())
m := testutil.TestMetric(1, "mymetric")
b.Add(m)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 1)
assert.Equal(t, b.Drops(), 0)
assert.Equal(t, b.Total(), 1)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(1), MetricsWritten.Get())
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 6)
assert.Equal(t, b.Drops(), 0)
assert.Equal(t, b.Total(), 6)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(6), MetricsWritten.Get())
}
func TestDroppingMetrics(t *testing.T) {
b := NewBuffer(10)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
// Add up to the size of the buffer
b.Add(metricList...)
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 10)
assert.Equal(t, b.Drops(), 0)
assert.Equal(t, b.Total(), 10)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(10), MetricsWritten.Get())
// Add 5 more and verify they were dropped
b.Add(metricList...)
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 10)
assert.Equal(t, b.Drops(), 5)
assert.Equal(t, b.Total(), 15)
assert.Equal(t, int64(5), MetricsDropped.Get())
assert.Equal(t, int64(15), MetricsWritten.Get())
}
func TestGettingBatches(t *testing.T) {
b := NewBuffer(20)
MetricsDropped.Set(0)
MetricsWritten.Set(0)
// Verify that the buffer returned is smaller than requested when there are
// not as many items as requested.
@@ -78,8 +84,8 @@ func TestGettingBatches(t *testing.T) {
// Verify that the buffer is now empty
assert.True(t, b.IsEmpty())
assert.Zero(t, b.Len())
assert.Zero(t, b.Drops())
assert.Equal(t, b.Total(), 5)
assert.Zero(t, MetricsDropped.Get())
assert.Equal(t, int64(5), MetricsWritten.Get())
// Verify that the buffer returned is not more than the size requested
b.Add(metricList...)
@@ -89,6 +95,6 @@ func TestGettingBatches(t *testing.T) {
// Verify that buffer is not empty
assert.False(t, b.IsEmpty())
assert.Equal(t, b.Len(), 2)
assert.Equal(t, b.Drops(), 0)
assert.Equal(t, b.Total(), 10)
assert.Equal(t, int64(0), MetricsDropped.Get())
assert.Equal(t, int64(10), MetricsWritten.Get())
}

View File

@@ -15,9 +15,9 @@ import (
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
@@ -399,7 +399,7 @@ func printFilteredInputs(inputFilters []string, commented bool) {
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
servInputs := make(map[string]plugins.ServiceInput)
// for alphabetical looping:
servInputNames := []string{}
@@ -409,7 +409,7 @@ func printFilteredInputs(inputFilters []string, commented bool) {
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
case plugins.ServiceInput:
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
@@ -821,10 +821,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
return err
}
rp := &models.RunningInput{
Input: input,
Config: pluginConfig,
}
rp := models.NewRunningInput(input, pluginConfig)
c.Inputs = append(c.Inputs, rp)
return nil
}

View File

@@ -35,8 +35,9 @@ type Duration struct {
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
var err error
b = bytes.Trim(b, `'`)
// see if we can straight convert it
// see if we can directly convert it
d.Duration, err = time.ParseDuration(string(b))
if err == nil {
return nil

View File

@@ -142,6 +142,10 @@ func TestDuration(t *testing.T) {
d.UnmarshalTOML([]byte(`1s`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`'1s'`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`10`))
assert.Equal(t, 10*time.Second, d.Duration)

View File

@@ -5,7 +5,8 @@ import (
"math"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins"
)
// makemetric is used by both RunningAggregator & RunningInput
@@ -31,10 +32,9 @@ func makemetric(
daemonTags map[string]string,
filter Filter,
applyFilter bool,
debug bool,
mType telegraf.ValueType,
mType plugins.ValueType,
t time.Time,
) telegraf.Metric {
) plugins.Metric {
if len(fields) == 0 || len(measurement) == 0 {
return nil
}
@@ -122,11 +122,9 @@ func makemetric(
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
if debug {
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
}
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
delete(fields, k)
continue
}
@@ -135,16 +133,7 @@ func makemetric(
}
}
var m telegraf.Metric
var err error
switch mType {
case telegraf.Counter:
m, err = telegraf.NewCounterMetric(measurement, tags, fields, t)
case telegraf.Gauge:
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, t)
default:
m, err = telegraf.NewMetric(measurement, tags, fields, t)
}
m, err := metric.New(measurement, tags, fields, t, mType)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return nil

View File

@@ -3,27 +3,28 @@ package models
import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins"
)
type RunningAggregator struct {
a telegraf.Aggregator
a plugins.Aggregator
Config *AggregatorConfig
metrics chan telegraf.Metric
metrics chan plugins.Metric
periodStart time.Time
periodEnd time.Time
}
func NewRunningAggregator(
a telegraf.Aggregator,
a plugins.Aggregator,
conf *AggregatorConfig,
) *RunningAggregator {
return &RunningAggregator{
a: a,
Config: conf,
metrics: make(chan telegraf.Metric, 100),
metrics: make(chan plugins.Metric, 100),
}
}
@@ -51,9 +52,9 @@ func (r *RunningAggregator) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
mType plugins.ValueType,
t time.Time,
) telegraf.Metric {
) plugins.Metric {
m := makemetric(
measurement,
fields,
@@ -65,12 +66,13 @@ func (r *RunningAggregator) MakeMetric(
nil,
r.Config.Filter,
false,
false,
mType,
t,
)
m.SetAggregate(true)
if m != nil {
m.SetAggregate(true)
}
return m
}
@@ -78,7 +80,7 @@ func (r *RunningAggregator) MakeMetric(
// Add applies the given metric to the aggregator.
// Before applying to the plugin, it will run any defined filters on the metric.
// Apply returns true if the original metric should be dropped.
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
func (r *RunningAggregator) Add(in plugins.Metric) bool {
if r.Config.Filter.IsActive() {
// check if the aggregator should apply this metric
name := in.Name()
@@ -90,17 +92,17 @@ func (r *RunningAggregator) Add(in telegraf.Metric) bool {
return false
}
in, _ = telegraf.NewMetric(name, tags, fields, t)
in, _ = metric.New(name, tags, fields, t)
}
r.metrics <- in
return r.Config.DropOriginal
}
func (r *RunningAggregator) add(in telegraf.Metric) {
func (r *RunningAggregator) add(in plugins.Metric) {
r.a.Add(in)
}
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
func (r *RunningAggregator) push(acc plugins.Accumulator) {
r.a.Push(acc)
}
@@ -111,7 +113,7 @@ func (r *RunningAggregator) reset() {
// Run runs the running aggregator, listens for incoming metrics, and waits
// for period ticks to tell it when to push and reset the aggregator.
func (r *RunningAggregator) Run(
acc telegraf.Accumulator,
acc plugins.Accumulator,
shutdown chan struct{},
) {
// The start of the period is truncated to the nearest second.

View File

@@ -7,7 +7,7 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
@@ -30,7 +30,7 @@ func TestAdd(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
time.Now().Add(time.Millisecond*150),
)
assert.False(t, ra.Add(m))
@@ -62,7 +62,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
time.Now().Add(-time.Hour),
)
assert.False(t, ra.Add(m))
@@ -72,7 +72,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
time.Now().Add(time.Hour),
)
assert.False(t, ra.Add(m))
@@ -82,7 +82,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
time.Now().Add(time.Millisecond*50),
)
assert.False(t, ra.Add(m))
@@ -120,7 +120,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
time.Now().Add(time.Millisecond*100),
)
assert.False(t, ra.Add(m))
@@ -151,7 +151,7 @@ func TestAddDropOriginal(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
time.Now(),
)
assert.True(t, ra.Add(m))
@@ -161,7 +161,7 @@ func TestAddDropOriginal(t *testing.T) {
"foobar",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
time.Now(),
)
assert.False(t, ra.Add(m2))
@@ -179,54 +179,54 @@ func TestMakeMetricA(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
plugins.Untyped,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
plugins.Counter,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
plugins.Counter,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
plugins.Gauge,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
plugins.Gauge,
)
}
@@ -240,14 +240,14 @@ func (t *TestAggregator) Reset() {
atomic.StoreInt64(&t.sum, 0)
}
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
func (t *TestAggregator) Push(acc plugins.Accumulator) {
acc.AddFields("TestMetric",
map[string]interface{}{"sum": t.sum},
map[string]string{},
)
}
func (t *TestAggregator) Add(in telegraf.Metric) {
func (t *TestAggregator) Add(in plugins.Metric) {
for _, v := range in.Fields() {
if vi, ok := v.(int64); ok {
atomic.AddInt64(&t.sum, vi)

View File

@@ -4,16 +4,35 @@ import (
"fmt"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/selfstat"
)
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
type RunningInput struct {
Input telegraf.Input
Input plugins.Input
Config *InputConfig
trace bool
debug bool
defaultTags map[string]string
MetricsGathered selfstat.Stat
}
func NewRunningInput(
input plugins.Input,
config *InputConfig,
) *RunningInput {
return &RunningInput{
Input: input,
Config: config,
MetricsGathered: selfstat.Register(
"gather",
"metrics_gathered",
map[string]string{"input": config.Name},
),
}
}
// InputConfig containing a name, interval, and filter
@@ -37,9 +56,9 @@ func (r *RunningInput) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
mType plugins.ValueType,
t time.Time,
) telegraf.Metric {
) plugins.Metric {
m := makemetric(
measurement,
fields,
@@ -51,7 +70,6 @@ func (r *RunningInput) MakeMetric(
r.defaultTags,
r.Config.Filter,
true,
r.debug,
mType,
t,
)
@@ -60,17 +78,11 @@ func (r *RunningInput) MakeMetric(
fmt.Println("> " + m.String())
}
r.MetricsGathered.Incr(1)
GlobalMetricsGathered.Incr(1)
return m
}
func (r *RunningInput) Debug() bool {
return r.debug
}
func (r *RunningInput) SetDebug(debug bool) {
r.debug = debug
}
func (r *RunningInput) Trace() bool {
return r.trace
}

View File

@@ -6,24 +6,22 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/stretchr/testify/assert"
)
func TestMakeMetricNoFields(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Nil(t, m)
@@ -32,11 +30,9 @@ func TestMakeMetricNoFields(t *testing.T) {
// nil fields should get dropped
func TestMakeMetricNilFields(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
m := ri.MakeMetric(
"RITest",
@@ -45,12 +41,12 @@ func TestMakeMetricNilFields(t *testing.T) {
"nil": nil,
},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
}
@@ -58,13 +54,10 @@ func TestMakeMetricNilFields(t *testing.T) {
// make an untyped, counter, & gauge metric
func TestMakeMetric(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
@@ -73,69 +66,66 @@ func TestMakeMetric(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
plugins.Untyped,
)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
plugins.Counter,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
plugins.Counter,
)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
plugins.Gauge,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
plugins.Gauge,
)
}
func TestMakeMetricWithPluginTags(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
@@ -143,29 +133,26 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricFilteredOut(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
Filter: Filter{NamePass: []string{"foobar"}},
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
Filter: Filter{NamePass: []string{"foobar"}},
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.NoError(t, ri.Config.Filter.Compile())
@@ -174,7 +161,7 @@ func TestMakeMetricFilteredOut(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Nil(t, m)
@@ -182,16 +169,13 @@ func TestMakeMetricFilteredOut(t *testing.T) {
func TestMakeMetricWithDaemonTags(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetDefaultTags(map[string]string{
"foo": "bar",
})
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
@@ -199,13 +183,13 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
)
}
@@ -214,13 +198,10 @@ func TestMakeMetricInfFields(t *testing.T) {
inf := math.Inf(1)
ninf := math.Inf(-1)
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
@@ -232,25 +213,22 @@ func TestMakeMetricInfFields(t *testing.T) {
"ninf": ninf,
},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricAllFieldTypes(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
@@ -272,81 +250,90 @@ func TestMakeMetricAllFieldTypes(t *testing.T) {
"m": true,
},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest a=10i,b=10i,c=10i,d=10i,e=10i,f=10i,g=10i,h=10i,i=10i,j=10,k=9223372036854775807i,l=\"foobar\",m=true %d", now.UnixNano()),
m.String(),
)
assert.Contains(t, m.String(), "a=10i")
assert.Contains(t, m.String(), "b=10i")
assert.Contains(t, m.String(), "c=10i")
assert.Contains(t, m.String(), "d=10i")
assert.Contains(t, m.String(), "e=10i")
assert.Contains(t, m.String(), "f=10i")
assert.Contains(t, m.String(), "g=10i")
assert.Contains(t, m.String(), "h=10i")
assert.Contains(t, m.String(), "i=10i")
assert.Contains(t, m.String(), "j=10")
assert.NotContains(t, m.String(), "j=10i")
assert.Contains(t, m.String(), "k=9223372036854775807i")
assert.Contains(t, m.String(), "l=\"foobar\"")
assert.Contains(t, m.String(), "m=true")
}
func TestMakeMetricNameOverride(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
NameOverride: "foobar",
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
NameOverride: "foobar",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("foobar value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricNamePrefix(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
MeasurementPrefix: "foobar_",
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
MeasurementPrefix: "foobar_",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("foobar_RITest value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricNameSuffix(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
MeasurementSuffix: "_foobar",
},
}
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
MeasurementSuffix: "_foobar",
})
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
plugins.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
m.String(),
fmt.Sprintf("RITest_foobar value=101i %d", now.UnixNano()),
)
}
type testInput struct{}
func (t *testInput) Description() string { return "" }
func (t *testInput) SampleConfig() string { return "" }
func (t *testInput) Gather(acc plugins.Accumulator) error { return nil }

View File

@@ -4,8 +4,10 @@ import (
"log"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal/buffer"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/selfstat"
)
const (
@@ -19,19 +21,24 @@ const (
// RunningOutput contains the output configuration
type RunningOutput struct {
Name string
Output telegraf.Output
Output plugins.Output
Config *OutputConfig
Quiet bool
MetricBufferLimit int
MetricBatchSize int
MetricsFiltered selfstat.Stat
MetricsWritten selfstat.Stat
BufferSize selfstat.Stat
BufferLimit selfstat.Stat
WriteTime selfstat.Stat
metrics *buffer.Buffer
failMetrics *buffer.Buffer
}
func NewRunningOutput(
name string,
output telegraf.Output,
output plugins.Output,
conf *OutputConfig,
batchSize int,
bufferLimit int,
@@ -50,29 +57,56 @@ func NewRunningOutput(
Config: conf,
MetricBufferLimit: bufferLimit,
MetricBatchSize: batchSize,
MetricsWritten: selfstat.Register(
"write",
"metrics_written",
map[string]string{"output": name},
),
MetricsFiltered: selfstat.Register(
"write",
"metrics_filtered",
map[string]string{"output": name},
),
BufferSize: selfstat.Register(
"write",
"buffer_size",
map[string]string{"output": name},
),
BufferLimit: selfstat.Register(
"write",
"buffer_limit",
map[string]string{"output": name},
),
WriteTime: selfstat.RegisterTiming(
"write",
"write_time_ns",
map[string]string{"output": name},
),
}
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
return ro
}
// AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
func (ro *RunningOutput) AddMetric(m plugins.Metric) {
// Filter any tagexclude/taginclude parameters before adding metric
if ro.Config.Filter.IsActive() {
// In order to filter out tags, we need to create a new metric, since
// metrics are immutable once created.
name := metric.Name()
tags := metric.Tags()
fields := metric.Fields()
t := metric.Time()
name := m.Name()
tags := m.Tags()
fields := m.Fields()
t := m.Time()
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
ro.MetricsFiltered.Incr(1)
return
}
// error is not possible if creating from another metric, so ignore.
metric, _ = telegraf.NewMetric(name, tags, fields, t)
m, _ = metric.New(name, tags, fields, t)
}
ro.metrics.Add(metric)
ro.metrics.Add(m)
if ro.metrics.Len() == ro.MetricBatchSize {
batch := ro.metrics.Batch(ro.MetricBatchSize)
err := ro.write(batch)
@@ -84,28 +118,21 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
// Write writes all cached points to this output.
func (ro *RunningOutput) Write() error {
if !ro.Quiet {
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
"Total gathered metrics: %d. Total dropped metrics: %d.",
ro.Name,
ro.failMetrics.Len()+ro.metrics.Len(),
ro.MetricBufferLimit,
ro.metrics.Total(),
ro.metrics.Drops()+ro.failMetrics.Drops())
}
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
ro.BufferSize.Incr(int64(nFails + nMetrics))
var err error
if !ro.failMetrics.IsEmpty() {
bufLen := ro.failMetrics.Len()
// how many batches of failed writes we need to write.
nBatches := bufLen/ro.MetricBatchSize + 1
nBatches := nFails/ro.MetricBatchSize + 1
batchSize := ro.MetricBatchSize
for i := 0; i < nBatches; i++ {
// If it's the last batch, only grab the metrics that have not had
// a write attempt already (this is primarily to preserve order).
if i == nBatches-1 {
batchSize = bufLen % ro.MetricBatchSize
batchSize = nFails % ro.MetricBatchSize
}
batch := ro.failMetrics.Batch(batchSize)
// If we've already failed previous writes, don't bother trying to
@@ -126,6 +153,7 @@ func (ro *RunningOutput) Write() error {
if err == nil {
err = ro.write(batch)
}
if err != nil {
ro.failMetrics.Add(batch...)
return err
@@ -133,18 +161,20 @@ func (ro *RunningOutput) Write() error {
return nil
}
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
if metrics == nil || len(metrics) == 0 {
func (ro *RunningOutput) write(metrics []plugins.Metric) error {
nMetrics := len(metrics)
if nMetrics == 0 {
return nil
}
start := time.Now()
err := ro.Output.Write(metrics)
elapsed := time.Since(start)
if err == nil {
if !ro.Quiet {
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
ro.Name, len(metrics), elapsed)
}
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
ro.Name, nMetrics, elapsed)
ro.MetricsWritten.Incr(int64(nMetrics))
ro.BufferSize.Incr(-int64(nMetrics))
ro.WriteTime.Incr(elapsed.Nanoseconds())
}
return err
}

View File

@@ -5,14 +5,14 @@ import (
"sync"
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var first5 = []telegraf.Metric{
var first5 = []plugins.Metric{
testutil.TestMetric(101, "metric1"),
testutil.TestMetric(101, "metric2"),
testutil.TestMetric(101, "metric3"),
@@ -20,7 +20,7 @@ var first5 = []telegraf.Metric{
testutil.TestMetric(101, "metric5"),
}
var next5 = []telegraf.Metric{
var next5 = []plugins.Metric{
testutil.TestMetric(101, "metric6"),
testutil.TestMetric(101, "metric7"),
testutil.TestMetric(101, "metric8"),
@@ -36,10 +36,9 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
m := &perfOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.Quiet = true
for n := 0; n < b.N; n++ {
ro.AddMetric(first5[0])
ro.AddMetric(testutil.TestMetric(101, "metric1"))
ro.Write()
}
}
@@ -52,10 +51,9 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
m := &perfOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.Quiet = true
for n := 0; n < b.N; n++ {
ro.AddMetric(first5[0])
ro.AddMetric(testutil.TestMetric(101, "metric1"))
if n%100 == 0 {
ro.Write()
}
@@ -71,10 +69,9 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
m := &perfOutput{}
m.failWrite = true
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.Quiet = true
for n := 0; n < b.N; n++ {
ro.AddMetric(first5[0])
ro.AddMetric(testutil.TestMetric(101, "metric1"))
}
}
@@ -140,7 +137,7 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(first5[0])
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
@@ -161,7 +158,7 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(first5[0])
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
@@ -182,7 +179,7 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(first5[0])
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
@@ -203,7 +200,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(first5[0])
ro.AddMetric(testutil.TestMetric(101, "metric1"))
assert.Len(t, m.Metrics(), 0)
err := ro.Write()
@@ -468,7 +465,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) {
type mockOutput struct {
sync.Mutex
metrics []telegraf.Metric
metrics []plugins.Metric
// if true, mock a write failure
failWrite bool
@@ -490,7 +487,7 @@ func (m *mockOutput) SampleConfig() string {
return ""
}
func (m *mockOutput) Write(metrics []telegraf.Metric) error {
func (m *mockOutput) Write(metrics []plugins.Metric) error {
m.Lock()
defer m.Unlock()
if m.failWrite {
@@ -498,7 +495,7 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error {
}
if m.metrics == nil {
m.metrics = []telegraf.Metric{}
m.metrics = []plugins.Metric{}
}
for _, metric := range metrics {
@@ -507,7 +504,7 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error {
return nil
}
func (m *mockOutput) Metrics() []telegraf.Metric {
func (m *mockOutput) Metrics() []plugins.Metric {
m.Lock()
defer m.Unlock()
return m.metrics
@@ -534,7 +531,7 @@ func (m *perfOutput) SampleConfig() string {
return ""
}
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
func (m *perfOutput) Write(metrics []plugins.Metric) error {
if m.failWrite {
return fmt.Errorf("Failed Write!")
}

View File

@@ -1,12 +1,12 @@
package models
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
)
type RunningProcessor struct {
Name string
Processor telegraf.Processor
Processor plugins.Processor
Config *ProcessorConfig
}
@@ -23,8 +23,8 @@ type ProcessorConfig struct {
Filter Filter
}
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
ret := []telegraf.Metric{}
func (rp *RunningProcessor) Apply(in ...plugins.Metric) []plugins.Metric {
ret := []plugins.Metric{}
for _, metric := range in {
if rp.Config.Filter.IsActive() {

View File

@@ -3,7 +3,7 @@ package models
import (
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
@@ -19,8 +19,8 @@ func (f *TestProcessor) Description() string { return "" }
// "foo" to "fuz"
// "bar" to "baz"
// And it also drops measurements named "dropme"
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
out := make([]telegraf.Metric, 0)
func (f *TestProcessor) Apply(in ...plugins.Metric) []plugins.Metric {
out := make([]plugins.Metric, 0)
for _, m := range in {
switch m.Name() {
case "foo":
@@ -46,7 +46,7 @@ func NewTestRunningProcessor() *RunningProcessor {
}
func TestRunningProcessor(t *testing.T) {
inmetrics := []telegraf.Metric{
inmetrics := []plugins.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
@@ -69,7 +69,7 @@ func TestRunningProcessor(t *testing.T) {
}
func TestRunningProcessor_WithNameDrop(t *testing.T) {
inmetrics := []telegraf.Metric{
inmetrics := []plugins.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
@@ -96,7 +96,7 @@ func TestRunningProcessor_WithNameDrop(t *testing.T) {
}
func TestRunningProcessor_DroppedMetric(t *testing.T) {
inmetrics := []telegraf.Metric{
inmetrics := []plugins.Metric{
testutil.TestMetric(1, "dropme"),
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),

View File

@@ -4,6 +4,7 @@ import (
"io"
"log"
"os"
"time"
"github.com/influxdata/wlog"
)
@@ -19,8 +20,8 @@ type telegrafLog struct {
writer io.Writer
}
func (t *telegrafLog) Write(p []byte) (n int, err error) {
return t.writer.Write(p)
func (t *telegrafLog) Write(b []byte) (n int, err error) {
return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
}
// SetupLogging configures the logging output.
@@ -30,6 +31,7 @@ func (t *telegrafLog) Write(p []byte) (n int, err error) {
// interpreted as stderr. If there is an error opening the file the
// logger will fallback to stderr.
func SetupLogging(debug, quiet bool, logfile string) {
log.SetFlags(0)
if debug {
wlog.SetLevel(wlog.DEBUG)
}

62
logger/logger_test.go Normal file
View File

@@ -0,0 +1,62 @@
package logger
import (
"bytes"
"io/ioutil"
"log"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(false, false, tmpfile.Name())
log.Printf("I! TEST")
log.Printf("D! TEST") // <- should be ignored
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
}
func TestDebugWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(true, false, tmpfile.Name())
log.Printf("D! TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z D! TEST\n"))
}
func TestErrorWriteLogToFile(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(false, true, tmpfile.Name())
log.Printf("E! TEST")
log.Printf("I! TEST") // <- should be ignored
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
}
func BenchmarkTelegrafLogWrite(b *testing.B) {
var msg = []byte("test")
var buf bytes.Buffer
w := newTelegrafWriter(&buf)
for i := 0; i < b.N; i++ {
buf.Reset()
w.Write(msg)
}
}

177
metric.go
View File

@@ -1,177 +0,0 @@
package telegraf
import (
"time"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/influxdb/models"
)
// ValueType is an enumeration of metric types that represent a simple value.
type ValueType int
// Possible values for the ValueType enum.
const (
_ ValueType = iota
Counter
Gauge
Untyped
)
type Metric interface {
// Name returns the measurement name of the metric
Name() string
// Name returns the tags associated with the metric
Tags() map[string]string
// Time return the timestamp for the metric
Time() time.Time
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
Type() ValueType
// UnixNano returns the unix nano time of the metric
UnixNano() int64
// HashID returns a non-cryptographic hash of the metric (name + tags)
// NOTE: do not persist & depend on this value to disk.
HashID() uint64
// Fields returns the fields for the metric
Fields() map[string]interface{}
// String returns a line-protocol string of the metric
String() string
// PrecisionString returns a line-protocol string of the metric, at precision
PrecisionString(precison string) string
// Point returns a influxdb client.Point object
Point() *client.Point
// SetAggregate sets the metric's aggregate status
// This is so that aggregate metrics don't get re-sent to aggregator plugins
SetAggregate(bool)
// IsAggregate returns true if the metric is an aggregate
IsAggregate() bool
}
// metric is a wrapper of the influxdb client.Point struct
type metric struct {
pt models.Point
mType ValueType
isaggregate bool
}
func NewMetricFromPoint(pt models.Point) Metric {
return &metric{
pt: pt,
mType: Untyped,
}
}
// NewMetric returns an untyped metric.
func NewMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
mType: Untyped,
}, nil
}
// NewGaugeMetric returns a gauge metric.
// Gauge metrics should be used when the metric is can arbitrarily go up and
// down. ie, temperature, memory usage, cpu usage, etc.
func NewGaugeMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
mType: Gauge,
}, nil
}
// NewCounterMetric returns a Counter metric.
// Counter metrics should be used when the metric being created is an
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
func NewCounterMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
mType: Counter,
}, nil
}
func (m *metric) Name() string {
return m.pt.Name()
}
func (m *metric) Tags() map[string]string {
return m.pt.Tags().Map()
}
func (m *metric) Time() time.Time {
return m.pt.Time()
}
func (m *metric) Type() ValueType {
return m.mType
}
func (m *metric) HashID() uint64 {
return m.pt.HashID()
}
func (m *metric) UnixNano() int64 {
return m.pt.UnixNano()
}
func (m *metric) Fields() map[string]interface{} {
return m.pt.Fields()
}
func (m *metric) String() string {
return m.pt.String()
}
func (m *metric) PrecisionString(precison string) string {
return m.pt.PrecisionString(precison)
}
func (m *metric) Point() *client.Point {
return client.NewPointFrom(m.pt)
}
func (m *metric) IsAggregate() bool {
return m.isaggregate
}
func (m *metric) SetAggregate(b bool) {
m.isaggregate = b
}

49
metric/escape.go Normal file
View File

@@ -0,0 +1,49 @@
package metric
import (
"strings"
)
var (
// escaper is for escaping:
// - tag keys
// - tag values
// - field keys
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
// nameEscaper is for escaping measurement names only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
// stringFieldEscaper is for escaping string field values only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
)
func escape(s string, t string) string {
switch t {
case "fieldkey", "tagkey", "tagval":
return escaper.Replace(s)
case "name":
return nameEscaper.Replace(s)
case "fieldval":
return stringFieldEscaper.Replace(s)
}
return s
}
func unescape(s string, t string) string {
switch t {
case "fieldkey", "tagkey", "tagval":
return unEscaper.Replace(s)
case "name":
return nameUnEscaper.Replace(s)
case "fieldval":
return stringFieldUnEscaper.Replace(s)
}
return s
}

View File

@@ -0,0 +1,38 @@
package metric
import (
"reflect"
"strconv"
"unsafe"
)
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseInt(s, base, bitSize)
}
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
s := unsafeBytesToString(b)
return strconv.ParseFloat(s, bitSize)
}
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
func parseBoolBytes(b []byte) (bool, error) {
return strconv.ParseBool(unsafeBytesToString(b))
}
// unsafeBytesToString converts a []byte to a string without a heap allocation.
//
// It is unsafe, and is intended to prepare input to short-lived functions
// that require strings.
func unsafeBytesToString(in []byte) string {
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
dst := reflect.StringHeader{
Data: src.Data,
Len: src.Len,
}
s := *(*string)(unsafe.Pointer(&dst))
return s
}

View File

@@ -0,0 +1,103 @@
package metric
import (
"strconv"
"testing"
"testing/quick"
)
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
f := func(b []byte, base int, bitSize int) bool {
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
got, gotErr := parseIntBytes(b, base, bitSize)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
buf := []byte{}
f := func(n int64) bool {
buf = strconv.AppendInt(buf[:0], n, 10)
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
got, gotErr := parseIntBytes(buf, 10, 64)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
f := func(b []byte, bitSize int) bool {
exp, expErr := strconv.ParseFloat(string(b), bitSize)
got, gotErr := parseFloatBytes(b, bitSize)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
buf := []byte{}
f := func(n float64) bool {
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
exp, expErr := strconv.ParseFloat(string(buf), 64)
got, gotErr := parseFloatBytes(buf, 64)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseBoolBytesEquivalence(t *testing.T) {
var buf []byte
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
buf = append(buf[:0], s...)
exp, expErr := strconv.ParseBool(s)
got, gotErr := parseBoolBytes(buf)
if got != exp || !checkErrs(expErr, gotErr) {
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
}
}
}
func checkErrs(a, b error) bool {
if (a == nil) != (b == nil) {
return false
}
return a == nil || a.Error() == b.Error()
}

546
metric/metric.go Normal file
View File

@@ -0,0 +1,546 @@
package metric
import (
"bytes"
"fmt"
"hash/fnv"
"sort"
"strconv"
"time"
"github.com/influxdata/telegraf/plugins"
// TODO remove
"github.com/influxdata/influxdb/client/v2"
)
const MaxInt = int(^uint(0) >> 1)
func New(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
mType ...plugins.ValueType,
) (plugins.Metric, error) {
if len(fields) == 0 {
return nil, fmt.Errorf("Metric cannot be made without any fields")
}
if len(name) == 0 {
return nil, fmt.Errorf("Metric cannot be made with an empty name")
}
var thisType plugins.ValueType
if len(mType) > 0 {
thisType = mType[0]
} else {
thisType = plugins.Untyped
}
m := &metric{
name: []byte(escape(name, "name")),
t: []byte(fmt.Sprint(t.UnixNano())),
nsec: t.UnixNano(),
mType: thisType,
}
// pre-allocate exact size of the tags slice
taglen := 0
for k, v := range tags {
// TODO check that length of tag key & value are > 0
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
}
m.tags = make([]byte, taglen)
i := 0
for k, v := range tags {
m.tags[i] = ','
i++
i += copy(m.tags[i:], escape(k, "tagkey"))
m.tags[i] = '='
i++
i += copy(m.tags[i:], escape(v, "tagval"))
}
// pre-allocate capacity of the fields slice
fieldlen := 0
for k, _ := range fields {
// 10 bytes is completely arbitrary, but will at least prevent some
// amount of allocations. There's a small possibility this will create
// slightly more allocations for a metric that has many short fields.
fieldlen += len(k) + 10
}
m.fields = make([]byte, 0, fieldlen)
i = 0
for k, v := range fields {
if i != 0 {
m.fields = append(m.fields, ',')
}
m.fields = appendField(m.fields, k, v)
i++
}
return m, nil
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Returns -1 if not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if countBackslashes(buf, keyi-1)%2 == 0 {
break
} else {
keyi++
}
}
return keyi
}
// countBackslashes counts the number of preceding backslashes starting at
// the 'start' index.
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
} else {
break
}
}
return count
}
type metric struct {
name []byte
tags []byte
fields []byte
t []byte
mType plugins.ValueType
aggregate bool
// cached values for reuse in "get" functions
hashID uint64
nsec int64
}
func (m *metric) Point() *client.Point {
c, _ := client.NewPoint(m.Name(), m.Tags(), m.Fields(), m.Time())
return c
}
func (m *metric) String() string {
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
}
func (m *metric) SetAggregate(b bool) {
m.aggregate = b
}
func (m *metric) IsAggregate() bool {
return m.aggregate
}
func (m *metric) Type() plugins.ValueType {
return m.mType
}
func (m *metric) Len() int {
// 3 is for 2 spaces surrounding the fields array + newline at the end.
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
}
func (m *metric) Serialize() []byte {
tmp := make([]byte, m.Len())
i := 0
i += copy(tmp[i:], m.name)
i += copy(tmp[i:], m.tags)
tmp[i] = ' '
i++
i += copy(tmp[i:], m.fields)
tmp[i] = ' '
i++
i += copy(tmp[i:], m.t)
tmp[i] = '\n'
return tmp
}
func (m *metric) Split(maxSize int) []plugins.Metric {
if m.Len() < maxSize {
return []plugins.Metric{m}
}
var out []plugins.Metric
// constant number of bytes for each metric (in addition to field bytes)
constant := len(m.name) + len(m.tags) + len(m.t) + 3
// currently selected fields
fields := make([]byte, 0, maxSize)
i := 0
for {
if i >= len(m.fields) {
// hit the end of the field byte slice
if len(fields) > 0 {
out = append(out, copyWith(m.name, m.tags, fields, m.t))
}
break
}
// find the end of the next field
j := indexUnescapedByte(m.fields[i:], ',')
if j == -1 {
j = len(m.fields)
} else {
j += i
}
// if true, then we need to create a metric _not_ including the currently
// selected field
if len(m.fields[i:j])+len(fields)+constant > maxSize {
// if false, then we'll create a metric including the currently
// selected field anyways. This means that the given maxSize is too
// small for a single field to fit.
if len(fields) > 0 {
out = append(out, copyWith(m.name, m.tags, fields, m.t))
}
fields = make([]byte, 0, maxSize)
}
if len(fields) > 0 {
fields = append(fields, ',')
}
fields = append(fields, m.fields[i:j]...)
i = j + 1
}
return out
}
func (m *metric) Fields() map[string]interface{} {
fieldMap := map[string]interface{}{}
i := 0
for {
if i >= len(m.fields) {
break
}
// end index of field key
i1 := indexUnescapedByte(m.fields[i:], '=')
if i1 == -1 {
break
}
// start index of field value
i2 := i1 + 1
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}
i3 += i2 + 2 // increment index to the comma
} else {
i3 = indexUnescapedByte(m.fields[i:], ',')
if i3 == -1 {
i3 = len(m.fields[i:])
}
}
switch m.fields[i:][i2] {
case '"':
// string field
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
// number field
switch m.fields[i:][i3-1] {
case 'i':
// integer field
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
if err == nil {
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
} else {
// TODO handle error or just ignore field silently?
}
default:
// float field
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
if err == nil {
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
} else {
// TODO handle error or just ignore field silently?
}
}
case 'T', 't':
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
case 'F', 'f':
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
default:
// TODO handle unsupported field type
}
i += i3 + 1
}
return fieldMap
}
func (m *metric) Tags() map[string]string {
tagMap := map[string]string{}
if len(m.tags) == 0 {
return tagMap
}
i := 0
for {
// start index of tag key
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
if i0 == 0 {
// didn't find a tag start
break
}
// end index of tag key
i1 := indexUnescapedByte(m.tags[i:], '=')
// start index of tag value
i2 := i1 + 1
// end index of tag value (starting from i2)
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
if i3 == -1 {
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
break
}
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
// increment start index for the next tag
i += i2 + i3
}
return tagMap
}
func (m *metric) Name() string {
return unescape(string(m.name), "name")
}
func (m *metric) Time() time.Time {
// assume metric has been verified already and ignore error:
if m.nsec == 0 {
m.nsec, _ = parseIntBytes(m.t, 10, 64)
}
return time.Unix(0, m.nsec)
}
func (m *metric) UnixNano() int64 {
// assume metric has been verified already and ignore error:
if m.nsec == 0 {
m.nsec, _ = parseIntBytes(m.t, 10, 64)
}
return m.nsec
}
func (m *metric) SetName(name string) {
m.hashID = 0
m.name = []byte(nameEscaper.Replace(name))
}
func (m *metric) SetPrefix(prefix string) {
m.hashID = 0
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
}
func (m *metric) SetSuffix(suffix string) {
m.hashID = 0
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
}
func (m *metric) AddTag(key, value string) {
m.RemoveTag(key)
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
}
func (m *metric) HasTag(key string) bool {
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return false
}
return true
}
func (m *metric) RemoveTag(key string) {
m.hashID = 0
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return
}
tmp := m.tags[0 : i-1]
j := indexUnescapedByte(m.tags[i:], ',')
if j != -1 {
tmp = append(tmp, m.tags[i+j:]...)
}
m.tags = tmp
return
}
func (m *metric) AddField(key string, value interface{}) {
m.fields = append(m.fields, ',')
m.fields = appendField(m.fields, key, value)
}
func (m *metric) HasField(key string) bool {
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return false
}
return true
}
func (m *metric) RemoveField(key string) error {
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return nil
}
var tmp []byte
if i != 0 {
tmp = m.fields[0 : i-1]
}
j := indexUnescapedByte(m.fields[i:], ',')
if j != -1 {
tmp = append(tmp, m.fields[i+j:]...)
}
if len(tmp) == 0 {
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
}
m.fields = tmp
return nil
}
func (m *metric) Copy() plugins.Metric {
return copyWith(m.name, m.tags, m.fields, m.t)
}
func copyWith(name, tags, fields, t []byte) plugins.Metric {
out := metric{
name: make([]byte, len(name)),
tags: make([]byte, len(tags)),
fields: make([]byte, len(fields)),
t: make([]byte, len(t)),
}
copy(out.name, name)
copy(out.tags, tags)
copy(out.fields, fields)
copy(out.t, t)
return &out
}
func (m *metric) HashID() uint64 {
if m.hashID == 0 {
h := fnv.New64a()
h.Write(m.name)
tags := m.Tags()
tmp := make([]string, len(tags))
i := 0
for k, v := range tags {
tmp[i] = k + v
i++
}
sort.Strings(tmp)
for _, s := range tmp {
h.Write([]byte(s))
}
m.hashID = h.Sum64()
}
return m.hashID
}
func appendField(b []byte, k string, v interface{}) []byte {
if v == nil {
return b
}
b = append(b, []byte(escape(k, "tagkey")+"=")...)
// check popular types first
switch v := v.(type) {
case float64:
b = strconv.AppendFloat(b, v, 'f', -1, 64)
case int64:
b = strconv.AppendInt(b, v, 10)
b = append(b, 'i')
case string:
b = append(b, '"')
b = append(b, []byte(escape(v, "fieldval"))...)
b = append(b, '"')
case bool:
b = strconv.AppendBool(b, v)
case int32:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int16:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int8:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint64:
// Cap uints above the maximum int value
var intv int64
if v <= uint64(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case uint32:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint16:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint8:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint:
// Cap uints above the maximum int value
var intv int64
if v <= uint(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case float32:
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
case []byte:
b = append(b, v...)
default:
// Can't determine the type, so convert to string
b = append(b, '"')
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
b = append(b, '"')
}
return b
}

View File

@@ -0,0 +1,148 @@
package metric
import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf/plugins"
)
// vars for making sure that the compiler doesnt optimize out the benchmarks:
var (
s string
I interface{}
tags map[string]string
fields map[string]interface{}
)
func BenchmarkNewMetric(b *testing.B) {
var mt plugins.Metric
for n := 0; n < b.N; n++ {
mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
}
s = string(mt.String())
}
func BenchmarkAddTag(b *testing.B) {
var mt plugins.Metric
mt = &metric{
name: []byte("cpu"),
tags: []byte(",host=localhost"),
fields: []byte("a=101"),
t: []byte("1480614053000000000"),
}
for n := 0; n < b.N; n++ {
mt.AddTag("foo", "bar")
}
s = string(mt.String())
}
func BenchmarkSplit(b *testing.B) {
var mt plugins.Metric
mt = &metric{
name: []byte("cpu"),
tags: []byte(",host=localhost"),
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
t: []byte("1480614053000000000"),
}
var metrics []plugins.Metric
for n := 0; n < b.N; n++ {
metrics = mt.Split(60)
}
s = string(metrics[0].String())
}
func BenchmarkTags(b *testing.B) {
for n := 0; n < b.N; n++ {
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
tags = mt.Tags()
}
s = fmt.Sprint(tags)
}
func BenchmarkFields(b *testing.B) {
for n := 0; n < b.N; n++ {
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
fields = mt.Fields()
}
s = fmt.Sprint(fields)
}
func BenchmarkString(b *testing.B) {
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
var S string
for n := 0; n < b.N; n++ {
S = mt.String()
}
s = S
}
func BenchmarkSerialize(b *testing.B) {
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
var B []byte
for n := 0; n < b.N; n++ {
B = mt.Serialize()
}
s = string(B)
}

646
metric/metric_test.go Normal file
View File

@@ -0,0 +1,646 @@
package metric
import (
"fmt"
"math"
"regexp"
"testing"
"time"
"github.com/influxdata/telegraf/plugins"
"github.com/stretchr/testify/assert"
)
func TestNewMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, plugins.Untyped, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewErrors(t *testing.T) {
// creating a metric with an empty name produces an error:
m, err := New(
"",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.Error(t, err)
assert.Nil(t, m)
// creating a metric with empty fields produces an error:
m, err = New(
"foobar",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{},
time.Now(),
)
assert.Error(t, err)
assert.Nil(t, m)
}
func TestNewMetric_Tags(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.True(t, m.HasTag("host"))
assert.True(t, m.HasTag("datacenter"))
m.AddTag("newtag", "foo")
assert.True(t, m.HasTag("newtag"))
m.RemoveTag("host")
assert.False(t, m.HasTag("host"))
assert.True(t, m.HasTag("newtag"))
assert.True(t, m.HasTag("datacenter"))
m.RemoveTag("datacenter")
assert.False(t, m.HasTag("datacenter"))
assert.True(t, m.HasTag("newtag"))
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
m.RemoveTag("newtag")
assert.False(t, m.HasTag("newtag"))
assert.Equal(t, map[string]string{}, m.Tags())
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
}
func TestSerialize(t *testing.T) {
now := time.Now()
tags := map[string]string{
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t,
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
m.Serialize())
m.RemoveTag("datacenter")
assert.Equal(t,
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
m.Serialize())
}
func TestHashID(t *testing.T) {
m, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
hash := m.HashID()
// adding a field doesn't change the hash:
m.AddField("foo", int64(100))
assert.Equal(t, hash, m.HashID())
// removing a non-existent tag doesn't change the hash:
m.RemoveTag("no-op")
assert.Equal(t, hash, m.HashID())
// adding a tag does change it:
m.AddTag("foo", "bar")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
// removing a tag also changes it:
m.RemoveTag("mytag")
assert.NotEqual(t, hash, m.HashID())
}
func TestHashID_Consistency(t *testing.T) {
m, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
hash := m.HashID()
for i := 0; i < 1000; i++ {
m2, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.Equal(t, hash, m2.HashID())
}
}
func TestNewMetric_NameModifiers(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
hash := m.HashID()
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
assert.Equal(t, "cpu"+suffix, m.String())
m.SetPrefix("pre_")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
assert.Equal(t, "pre_cpu"+suffix, m.String())
m.SetSuffix("_post")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
m.SetName("mem")
assert.NotEqual(t, hash, m.HashID())
assert.Equal(t, "mem"+suffix, m.String())
}
func TestNewMetric_FieldModifiers(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.True(t, m.HasField("value"))
assert.False(t, m.HasField("foo"))
m.AddField("newfield", "foo")
assert.True(t, m.HasField("newfield"))
assert.NoError(t, m.RemoveField("newfield"))
assert.False(t, m.HasField("newfield"))
// don't allow user to remove all fields:
assert.Error(t, m.RemoveField("value"))
m.AddField("value2", int64(101))
assert.NoError(t, m.RemoveField("value"))
assert.False(t, m.HasField("value"))
}
func TestNewMetric_Fields(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, fields, m.Fields())
}
func TestNewMetric_Time(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
m = m.Copy()
m2 := m.Copy()
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m2.UnixNano())
}
func TestNewMetric_Copy(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"float": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
m2 := m.Copy()
assert.Equal(t,
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
m.String())
m.AddTag("host", "localhost")
assert.Equal(t,
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
m.String())
assert.Equal(t,
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
m2.String())
}
func TestNewMetric_AllTypes(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"float64": float64(1),
"float32": float32(1),
"int64": int64(1),
"int32": int32(1),
"int16": int16(1),
"int8": int8(1),
"int": int(1),
"uint64": uint64(1),
"uint32": uint32(1),
"uint16": uint16(1),
"uint8": uint8(1),
"uint": uint(1),
"bytes": []byte("foo"),
"nil": nil,
"maxuint64": uint64(MaxInt) + 10,
"maxuint": uint(MaxInt) + 10,
"unsupported": []int{1, 2},
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Contains(t, m.String(), "float64=1")
assert.Contains(t, m.String(), "float32=1")
assert.Contains(t, m.String(), "int64=1i")
assert.Contains(t, m.String(), "int32=1i")
assert.Contains(t, m.String(), "int16=1i")
assert.Contains(t, m.String(), "int8=1i")
assert.Contains(t, m.String(), "int=1i")
assert.Contains(t, m.String(), "uint64=1i")
assert.Contains(t, m.String(), "uint32=1i")
assert.Contains(t, m.String(), "uint16=1i")
assert.Contains(t, m.String(), "uint8=1i")
assert.Contains(t, m.String(), "uint=1i")
assert.NotContains(t, m.String(), "nil")
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
}
func TestIndexUnescapedByte(t *testing.T) {
tests := []struct {
in []byte
b byte
expected int
}{
{
in: []byte(`foobar`),
b: 'b',
expected: 3,
},
{
in: []byte(`foo\bar`),
b: 'b',
expected: -1,
},
{
in: []byte(`foo\\bar`),
b: 'b',
expected: 5,
},
{
in: []byte(`foobar`),
b: 'f',
expected: 0,
},
{
in: []byte(`foobar`),
b: 'r',
expected: 5,
},
{
in: []byte(`\foobar`),
b: 'f',
expected: -1,
},
}
for _, test := range tests {
got := indexUnescapedByte(test.in, test.b)
assert.Equal(t, test.expected, got)
}
}
func TestNewGaugeMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now, plugins.Gauge)
assert.NoError(t, err)
assert.Equal(t, plugins.Gauge, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewCounterMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now, plugins.Counter)
assert.NoError(t, err)
assert.Equal(t, plugins.Counter, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
// test splitting metric into various max lengths
func TestSplitMetric(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split80 := m.Split(80)
assert.Len(t, split80, 2)
split70 := m.Split(70)
assert.Len(t, split70, 3)
split60 := m.Split(60)
assert.Len(t, split60, 4)
}
// test splitting metric into various max lengths
// use a simple regex check to verify that the split metrics are valid
func TestSplitMetric_RegexVerify(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"foo": float64(98934259085),
"bar": float64(19385292),
"number": float64(19385292),
"another": float64(19385292),
"n": float64(19385292),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
// verification regex
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
split90 := m.Split(90)
assert.Len(t, split90, 2)
for _, splitM := range split90 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
split70 := m.Split(70)
assert.Len(t, split70, 3)
for _, splitM := range split70 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
split20 := m.Split(20)
assert.Len(t, split20, 5)
for _, splitM := range split20 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
}
// test splitting metric even when given length is shorter than
// shortest possible length
// Split should split metric as short as possible, ie, 1 field per metric
func TestSplitMetric_TooShort(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split := m.Split(10)
assert.Len(t, split, 5)
strings := make([]string, 5)
for i, splitM := range split {
strings[i] = splitM.String()
}
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
}
func TestSplitMetric_NoOp(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split := m.Split(1000)
assert.Len(t, split, 1)
assert.Equal(t, m, split[0])
}
func TestSplitMetric_OneField(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
split := m.Split(1000)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
split = m.Split(1)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
split = m.Split(40)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
}
func TestNewMetricAggregate(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.False(t, m.IsAggregate())
m.SetAggregate(true)
assert.True(t, m.IsAggregate())
}
func TestNewMetricPoint(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
p := m.Point()
assert.Equal(t, fields, m.Fields())
assert.Equal(t, fields, p.Fields())
assert.Equal(t, "cpu", p.Name())
}
func TestNewMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
now.UnixNano())
assert.Equal(t, lineProto, m.String())
}
func TestNewMetricFailNaN(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": math.NaN(),
}
_, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
}

627
metric/parse.go Normal file
View File

@@ -0,0 +1,627 @@
package metric
import (
"bytes"
"errors"
"fmt"
"time"
"github.com/influxdata/telegraf/plugins"
)
var (
ErrInvalidNumber = errors.New("invalid number")
)
const (
// the number of characters for the largest possible int64 (9223372036854775807)
maxInt64Digits = 19
// the number of characters for the smallest possible int64 (-9223372036854775808)
minInt64Digits = 20
// the number of characters required for the largest float64 before a range check
// would occur during parsing
maxFloat64Digits = 25
// the number of characters required for smallest float64 before a range check occur
// would occur during parsing
minFloat64Digits = 27
MaxKeyLength = 65535
)
// The following constants allow us to specify which state to move to
// next, when scanning sections of a Point.
const (
tagKeyState = iota
tagValueState
fieldsState
)
func Parse(buf []byte) ([]plugins.Metric, error) {
return ParseWithDefaultTime(buf, time.Now())
}
func ParseWithDefaultTime(buf []byte, t time.Time) ([]plugins.Metric, error) {
if len(buf) <= 6 {
return []plugins.Metric{}, makeError("buffer too short", buf, 0)
}
metrics := make([]plugins.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
var errStr string
i := 0
for {
j := bytes.IndexByte(buf[i:], '\n')
if j == -1 {
break
}
if len(buf[i:i+j]) < 2 {
i += j + 1 // increment i past the previous newline
continue
}
m, err := parseMetric(buf[i:i+j], t)
if err != nil {
i += j + 1 // increment i past the previous newline
errStr += " " + err.Error()
continue
}
i += j + 1 // increment i past the previous newline
metrics = append(metrics, m)
}
if len(errStr) > 0 {
return metrics, fmt.Errorf(errStr)
}
return metrics, nil
}
func parseMetric(buf []byte, defaultTime time.Time) (plugins.Metric, error) {
var dTime string
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
pos, key, err := scanKey(buf, 0)
if err != nil {
return nil, err
}
// measurement name is required
if len(key) == 0 {
return nil, fmt.Errorf("missing measurement")
}
if len(key) > MaxKeyLength {
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
}
// scan the second block is which is field1=value1[,field2=value2,...]
pos, fields, err := scanFields(buf, pos)
if err != nil {
return nil, err
}
// at least one field is required
if len(fields) == 0 {
return nil, fmt.Errorf("missing fields")
}
// scan the last block which is an optional integer timestamp
pos, ts, err := scanTime(buf, pos)
if err != nil {
return nil, err
}
m := &metric{
fields: fields,
t: ts,
}
// parse out the measurement name
// namei is the index at which the "name" ends
namei := indexUnescapedByte(key, ',')
if namei < 1 {
// no tags
m.name = key
} else {
m.name = key[0:namei]
m.tags = key[namei:]
}
if len(m.t) == 0 {
if len(dTime) == 0 {
dTime = fmt.Sprint(defaultTime.UnixNano())
}
// use default time
m.t = []byte(dTime)
}
// here we copy on return because this allows us to later call
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
return m.Copy(), nil
}
// scanKey scans buf starting at i for the measurement and tag portion of the point.
// It returns the ending position and the byte slice of key within buf. If there
// are tags, they will be sorted if they are not already.
func scanKey(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
// First scan the Point's measurement.
state, i, err := scanMeasurement(buf, i)
if err != nil {
return i, buf[start:i], err
}
// Optionally scan tags if needed.
if state == tagKeyState {
i, err = scanTags(buf, i)
if err != nil {
return i, buf[start:i], err
}
}
return i, buf[start:i], nil
}
// scanMeasurement examines the measurement part of a Point, returning
// the next state to move to, and the current location in the buffer.
func scanMeasurement(buf []byte, i int) (int, int, error) {
// Check first byte of measurement, anything except a comma is fine.
// It can't be a space, since whitespace is stripped prior to this
// function call.
if i >= len(buf) || buf[i] == ',' {
return -1, i, makeError("missing measurement", buf, i)
}
for {
i++
if i >= len(buf) {
// cpu
return -1, i, makeError("missing fields", buf, i)
}
if buf[i-1] == '\\' {
// Skip character (it's escaped).
continue
}
// Unescaped comma; move onto scanning the tags.
if buf[i] == ',' {
return tagKeyState, i + 1, nil
}
// Unescaped space; move onto scanning the fields.
if buf[i] == ' ' {
// cpu value=1.0
return fieldsState, i, nil
}
}
}
// scanTags examines all the tags in a Point, keeping track of and
// returning the updated indices slice, number of commas and location
// in buf where to start examining the Point fields.
func scanTags(buf []byte, i int) (int, error) {
var (
err error
state = tagKeyState
)
for {
switch state {
case tagKeyState:
i, err = scanTagsKey(buf, i)
state = tagValueState // tag value always follows a tag key
case tagValueState:
state, i, err = scanTagsValue(buf, i)
case fieldsState:
return i, nil
}
if err != nil {
return i, err
}
}
}
// scanTagsKey scans each character in a tag key.
func scanTagsKey(buf []byte, i int) (int, error) {
// First character of the key.
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
// cpu,{'', ' ', ',', '='}
return i, makeError("missing tag key", buf, i)
}
// Examine each character in the tag key until we hit an unescaped
// equals (the tag value), or we hit an error (i.e., unescaped
// space or comma).
for {
i++
// Either we reached the end of the buffer or we hit an
// unescaped comma or space.
if i >= len(buf) ||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
// cpu,tag{'', ' ', ','}
return i, makeError("missing tag value", buf, i)
}
if buf[i] == '=' && buf[i-1] != '\\' {
// cpu,tag=
return i + 1, nil
}
}
}
// scanTagsValue scans each character in a tag value.
func scanTagsValue(buf []byte, i int) (int, int, error) {
// Tag value cannot be empty.
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
// cpu,tag={',', ' '}
return -1, i, makeError("missing tag value", buf, i)
}
// Examine each character in the tag value until we hit an unescaped
// comma (move onto next tag key), an unescaped space (move onto
// fields), or we error out.
for {
i++
if i >= len(buf) {
// cpu,tag=value
return -1, i, makeError("missing fields", buf, i)
}
// An unescaped equals sign is an invalid tag value.
if buf[i] == '=' && buf[i-1] != '\\' {
// cpu,tag={'=', 'fo=o'}
return -1, i, makeError("invalid tag format", buf, i)
}
if buf[i] == ',' && buf[i-1] != '\\' {
// cpu,tag=foo,
return tagKeyState, i + 1, nil
}
// cpu,tag=foo value=1.0
// cpu, tag=foo\= value=1.0
if buf[i] == ' ' && buf[i-1] != '\\' {
return fieldsState, i, nil
}
}
}
// scanFields scans buf, starting at i for the fields section of a point. It returns
// the ending position and the byte slice of the fields within buf
func scanFields(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
quoted := false
// tracks how many '=' we've seen
equals := 0
// tracks how many commas we've seen
commas := 0
for {
// reached the end of buf?
if i >= len(buf) {
break
}
// escaped characters?
if buf[i] == '\\' && i+1 < len(buf) {
i += 2
continue
}
// If the value is quoted, scan until we get to the end quote
// Only quote values in the field value since quotes are not significant
// in the field key
if buf[i] == '"' && equals > commas {
quoted = !quoted
i++
continue
}
// If we see an =, ensure that there is at least on char before and after it
if buf[i] == '=' && !quoted {
equals++
// check for "... =123" but allow "a\ =123"
if buf[i-1] == ' ' && buf[i-2] != '\\' {
return i, buf[start:i], makeError("missing field key", buf, i)
}
// check for "...a=123,=456" but allow "a=123,a\,=456"
if buf[i-1] == ',' && buf[i-2] != '\\' {
return i, buf[start:i], makeError("missing field key", buf, i)
}
// check for "... value="
if i+1 >= len(buf) {
return i, buf[start:i], makeError("missing field value", buf, i)
}
// check for "... value=,value2=..."
if buf[i+1] == ',' || buf[i+1] == ' ' {
return i, buf[start:i], makeError("missing field value", buf, i)
}
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
var err error
i, err = scanNumber(buf, i+1)
if err != nil {
return i, buf[start:i], err
}
continue
}
// If next byte is not a double-quote, the value must be a boolean
if buf[i+1] != '"' {
var err error
i, _, err = scanBoolean(buf, i+1)
if err != nil {
return i, buf[start:i], err
}
continue
}
}
if buf[i] == ',' && !quoted {
commas++
}
// reached end of block?
if buf[i] == ' ' && !quoted {
break
}
i++
}
if quoted {
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
}
// check that all field sections had key and values (e.g. prevent "a=1,b"
if equals == 0 || commas != equals-1 {
return i, buf[start:i], makeError("invalid field format", buf, i)
}
return i, buf[start:i], nil
}
// scanTime scans buf, starting at i for the time section of a point. It
// returns the ending position and the byte slice of the timestamp within buf
// and and error if the timestamp is not in the correct numeric format.
func scanTime(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
for {
// reached the end of buf?
if i >= len(buf) {
break
}
// Reached end of block or trailing whitespace?
if buf[i] == '\n' || buf[i] == ' ' {
break
}
// Handle negative timestamps
if i == start && buf[i] == '-' {
i++
continue
}
// Timestamps should be integers, make sure they are so we don't need
// to actually parse the timestamp until needed.
if buf[i] < '0' || buf[i] > '9' {
return i, buf[start:i], makeError("invalid timestamp", buf, i)
}
i++
}
return i, buf[start:i], nil
}
func isNumeric(b byte) bool {
return (b >= '0' && b <= '9') || b == '.'
}
// scanNumber returns the end position within buf, start at i after
// scanning over buf for an integer, or float. It returns an
// error if a invalid number is scanned.
func scanNumber(buf []byte, i int) (int, error) {
start := i
var isInt bool
// Is negative number?
if i < len(buf) && buf[i] == '-' {
i++
// There must be more characters now, as just '-' is illegal.
if i == len(buf) {
return i, ErrInvalidNumber
}
}
// how many decimal points we've see
decimal := false
// indicates the number is float in scientific notation
scientific := false
for {
if i >= len(buf) {
break
}
if buf[i] == ',' || buf[i] == ' ' {
break
}
if buf[i] == 'i' && i > start && !isInt {
isInt = true
i++
continue
}
if buf[i] == '.' {
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
if decimal {
return i, ErrInvalidNumber
}
decimal = true
}
// `e` is valid for floats but not as the first char
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
scientific = true
i++
continue
}
// + and - are only valid at this point if they follow an e (scientific notation)
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
i++
continue
}
// NaN is an unsupported value
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
return i, ErrInvalidNumber
}
if !isNumeric(buf[i]) {
return i, ErrInvalidNumber
}
i++
}
if isInt && (decimal || scientific) {
return i, ErrInvalidNumber
}
numericDigits := i - start
if isInt {
numericDigits--
}
if decimal {
numericDigits--
}
if buf[start] == '-' {
numericDigits--
}
if numericDigits == 0 {
return i, ErrInvalidNumber
}
// It's more common that numbers will be within min/max range for their type but we need to prevent
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
// if we should parse the number to the actual type. It does not do it all the time because it incurs
// extra allocations and we end up converting the type again when writing points to disk.
if isInt {
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
if buf[i-1] != 'i' {
return i, ErrInvalidNumber
}
// Parse the int to check bounds the number of digits could be larger than the max range
// We subtract 1 from the index to remove the `i` from our tests
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
}
}
} else {
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
return i, makeError("invalid float", buf, i)
}
}
}
return i, nil
}
// scanBoolean returns the end position within buf, start at i after
// scanning over buf for boolean. Valid values for a boolean are
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
// is scanned.
func scanBoolean(buf []byte, i int) (int, []byte, error) {
start := i
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
return i, buf[start:i], makeError("invalid value", buf, i)
}
i++
for {
if i >= len(buf) {
break
}
if buf[i] == ',' || buf[i] == ' ' {
break
}
i++
}
// Single char bool (t, T, f, F) is ok
if i-start == 1 {
return i, buf[start:i], nil
}
// length must be 4 for true or TRUE
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
// length must be 5 for false or FALSE
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
// Otherwise
valid := false
switch buf[start] {
case 't':
valid = bytes.Equal(buf[start:i], []byte("true"))
case 'f':
valid = bytes.Equal(buf[start:i], []byte("false"))
case 'T':
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
case 'F':
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
}
if !valid {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
return i, buf[start:i], nil
}
// skipWhitespace returns the end position within buf, starting at i after
// scanning over spaces in tags
func skipWhitespace(buf []byte, i int) int {
for i < len(buf) {
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
break
}
i++
}
return i
}
// makeError is a helper function for making a metric parsing error.
// reason is the reason that the error occured.
// buf should be the current buffer we are parsing.
// i is the current index, to give some context on where in the buffer we are.
func makeError(reason string, buf []byte, i int) error {
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
reason, buf, i)
}

355
metric/parse_test.go Normal file
View File

@@ -0,0 +1,355 @@
package metric
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const trues = `booltest b=T
booltest b=t
booltest b=True
booltest b=TRUE
booltest b=true
`
const falses = `booltest b=F
booltest b=f
booltest b=False
booltest b=FALSE
booltest b=false
`
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
w\,eather,host=local temp=99 1465839830100400200
weather,location=us\,midwest temperature=82 1465839830100400200
weather,location=us-midwest temp\=rature=82 1465839830100400200
weather,location\ place=us-midwest temperature=82 1465839830100400200
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
`
const withTimestamps = `cpu usage=99 1480595849000000000
cpu usage=99 1480595850000000000
cpu usage=99 1480595851700030000
cpu usage=99 1480595852000000300
`
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
`
// some metrics are invalid
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
cpu 1480595852000000300
cpu usage=99 1480595852foobar300
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
`
func TestParse(t *testing.T) {
start := time.Now()
metrics, err := Parse([]byte(sevenMetrics))
assert.NoError(t, err)
assert.Len(t, metrics, 7)
// all metrics parsed together w/o a timestamp should have the same time.
firstTime := metrics[0].Time()
for _, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"idle": float64(99),
"busy": int64(1),
"b": true,
"s": "string",
},
m.Fields(),
)
assert.Equal(t,
map[string]string{
"host": "foo",
"datacenter": "us-east",
},
m.Tags(),
)
assert.True(t, m.Time().After(start))
assert.True(t, m.Time().Equal(firstTime))
}
}
func TestParseErrors(t *testing.T) {
start := time.Now()
metrics, err := Parse([]byte(someInvalid))
assert.Error(t, err)
assert.Len(t, metrics, 4)
// all metrics parsed together w/o a timestamp should have the same time.
firstTime := metrics[0].Time()
for _, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
},
m.Fields(),
)
assert.Equal(t,
map[string]string{
"host": "foo",
"datacenter": "us-east",
},
m.Tags(),
)
assert.True(t, m.Time().After(start))
assert.True(t, m.Time().Equal(firstTime))
}
}
func TestParseWithTimestamps(t *testing.T) {
metrics, err := Parse([]byte(withTimestamps))
assert.NoError(t, err)
assert.Len(t, metrics, 4)
expectedTimestamps := []time.Time{
time.Unix(0, 1480595849000000000),
time.Unix(0, 1480595850000000000),
time.Unix(0, 1480595851700030000),
time.Unix(0, 1480595852000000300),
}
// all metrics parsed together w/o a timestamp should have the same time.
for i, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"usage": float64(99),
},
m.Fields(),
)
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
}
}
func TestParseEscapes(t *testing.T) {
metrics, err := Parse([]byte(withEscapes))
assert.NoError(t, err)
assert.Len(t, metrics, 6)
tests := []struct {
name string
fields map[string]interface{}
tags map[string]string
}{
{
name: `w, eather`,
fields: map[string]interface{}{"temp": float64(99)},
tags: map[string]string{"host": "local"},
},
{
name: `w,eather`,
fields: map[string]interface{}{"temp": float64(99)},
tags: map[string]string{"host": "local"},
},
{
name: `weather`,
fields: map[string]interface{}{"temperature": float64(82)},
tags: map[string]string{"location": `us,midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{`temp=rature`: float64(82)},
tags: map[string]string{"location": `us-midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{"temperature": float64(82)},
tags: map[string]string{`location place`: `us-midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{`temperature`: `too"hot"`},
tags: map[string]string{"location": `us-midwest`},
},
}
for i, test := range tests {
assert.Equal(t, test.name, metrics[i].Name())
assert.Equal(t, test.fields, metrics[i].Fields())
assert.Equal(t, test.tags, metrics[i].Tags())
}
}
func TestParseTrueBooleans(t *testing.T) {
metrics, err := Parse([]byte(trues))
assert.NoError(t, err)
assert.Len(t, metrics, 5)
for _, metric := range metrics {
assert.Equal(t, "booltest", metric.Name())
assert.Equal(t, true, metric.Fields()["b"])
}
}
func TestParseFalseBooleans(t *testing.T) {
metrics, err := Parse([]byte(falses))
assert.NoError(t, err)
assert.Len(t, metrics, 5)
for _, metric := range metrics {
assert.Equal(t, "booltest", metric.Name())
assert.Equal(t, false, metric.Fields()["b"])
}
}
func TestParsePointBadNumber(t *testing.T) {
for _, tt := range []string{
"cpu v=- ",
"cpu v=-i ",
"cpu v=-. ",
"cpu v=. ",
"cpu v=1.0i ",
"cpu v=1ii ",
"cpu v=1a ",
"cpu v=-e-e-e ",
"cpu v=42+3 ",
"cpu v= ",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseTagsMissingParts(t *testing.T) {
for _, tt := range []string{
`cpu,host`,
`cpu,host,`,
`cpu,host=`,
`cpu,f=oo=bar value=1`,
`cpu,host value=1i`,
`cpu,host=serverA,region value=1i`,
`cpu,host=serverA,region= value=1i`,
`cpu,host=serverA,region=,zone=us-west value=1i`,
`cpu, value=1`,
`cpu, ,,`,
`cpu,,,`,
`cpu,host=serverA,=us-east value=1i`,
`cpu,host=serverAa\,,=us-east value=1i`,
`cpu,host=serverA\,,=us-east value=1i`,
`cpu, =serverA value=1i`,
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParsePointWhitespace(t *testing.T) {
for _, tt := range []string{
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000 `,
} {
m, err := Parse([]byte(tt + "\n"))
assert.NoError(t, err, tt)
assert.Equal(t, "cpu", m[0].Name())
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
}
}
func TestParsePointInvalidFields(t *testing.T) {
for _, tt := range []string{
"test,foo=bar a=101,=value",
"test,foo=bar =value",
"test,foo=bar a=101,key=",
"test,foo=bar key=",
`test,foo=bar a=101,b="foo`,
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParsePointNoFields(t *testing.T) {
for _, tt := range []string{
"cpu_load_short,host=server01,region=us-west",
"very_long_measurement_name",
"cpu,host==",
"============",
"cpu",
"cpu\n\n\n\n\n\n\n",
" ",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
// a b=1 << this is the shortest possible metric
// any shorter is just ignored
func TestParseBufTooShort(t *testing.T) {
for _, tt := range []string{
"",
"a",
"a ",
"a b=",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseInvalidBooleans(t *testing.T) {
for _, tt := range []string{
"test b=tru",
"test b=fals",
"test b=faLse",
"test q=foo",
"test b=lambchops",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseInvalidNumbers(t *testing.T) {
for _, tt := range []string{
"test b=-",
"test b=1.1.1",
"test b=nan",
"test b=9i10",
"test b=9999999999999999999i",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseNegativeTimestamps(t *testing.T) {
for _, tt := range []string{
"test foo=101 -1257894000000000000",
} {
metrics, err := Parse([]byte(tt + "\n"))
assert.NoError(t, err, tt)
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
}
}
func TestParseMaxKeyLength(t *testing.T) {
key := ""
for {
if len(key) > MaxKeyLength {
break
}
key += "test"
}
_, err := Parse([]byte(key + " value=1\n"))
assert.Error(t, err)
}

View File

@@ -1,111 +0,0 @@
package telegraf
import (
"fmt"
"math"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Untyped, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewGaugeMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewGaugeMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Gauge, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewCounterMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewCounterMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Counter, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
now.UnixNano())
assert.Equal(t, lineProto, m.String())
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
now.Unix())
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
}
func TestNewMetricFailNaN(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": math.NaN(),
}
_, err := NewMetric("cpu", tags, fields, now)
assert.Error(t, err)
}

View File

@@ -1,4 +1,4 @@
package telegraf
package plugins
import "time"

View File

@@ -1,4 +1,4 @@
package telegraf
package plugins
// Aggregator is an interface for implementing an Aggregator plugin.
// the RunningAggregator wraps this interface and guarantees that

View File

@@ -0,0 +1,42 @@
# MinMax Aggregator Plugin
The minmax aggregator plugin aggregates min & max values of each field it sees,
emitting the aggrate every `period` seconds.
### Configuration:
```toml
# Keep the aggregate min/max of each metric passing through.
[[aggregators.minmax]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
```
### Measurements & Fields:
- measurement1
- field1_max
- field1_min
### Tags:
No tags are applied by this aggregator.
### Example Output:
```
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1.72 1475583980000000000
system,host=tars load1=1.6 1475583990000000000
system,host=tars load1=1.66 1475584000000000000
system,host=tars load1=1.63 1475584010000000000
system,host=tars load1_max=1.72,load1_min=1.6 1475584010000000000
system,host=tars load1=1.46 1475584020000000000
system,host=tars load1=1.39 1475584030000000000
system,host=tars load1=1.41 1475584040000000000
system,host=tars load1_max=1.46,load1_min=1.39 1475584040000000000
```

View File

@@ -1,7 +1,7 @@
package minmax
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/aggregators"
)
@@ -9,7 +9,7 @@ type MinMax struct {
cache map[uint64]aggregate
}
func NewMinMax() telegraf.Aggregator {
func NewMinMax() plugins.Aggregator {
mm := &MinMax{}
mm.Reset()
return mm
@@ -43,7 +43,7 @@ func (m *MinMax) Description() string {
return "Keep the aggregate min/max of each metric passing through."
}
func (m *MinMax) Add(in telegraf.Metric) {
func (m *MinMax) Add(in plugins.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
@@ -86,7 +86,7 @@ func (m *MinMax) Add(in telegraf.Metric) {
}
}
func (m *MinMax) Push(acc telegraf.Accumulator) {
func (m *MinMax) Push(acc plugins.Accumulator) {
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
@@ -113,7 +113,7 @@ func convert(in interface{}) (float64, bool) {
}
func init() {
aggregators.Add("minmax", func() telegraf.Aggregator {
aggregators.Add("minmax", func() plugins.Aggregator {
return NewMinMax()
})
}

View File

@@ -4,11 +4,11 @@ import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
)
var m1, _ = telegraf.NewMetric("m1",
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
@@ -24,7 +24,7 @@ var m1, _ = telegraf.NewMetric("m1",
},
time.Now(),
)
var m2, _ = telegraf.NewMetric("m1",
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),

View File

@@ -1,8 +1,8 @@
package aggregators
import "github.com/influxdata/telegraf"
import "github.com/influxdata/telegraf/plugins"
type Creator func() telegraf.Aggregator
type Creator func() plugins.Aggregator
var Aggregators = map[string]Creator{}

View File

@@ -1,4 +1,4 @@
package telegraf
package plugins
type Input interface {
// SampleConfig returns the default configuration of the Input

View File

@@ -9,7 +9,7 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -35,7 +35,7 @@ func (a *Aerospike) Description() string {
return "Read stats from aerospike server(s)"
}
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
func (a *Aerospike) Gather(acc plugins.Accumulator) error {
if len(a.Servers) == 0 {
return a.gatherServer("127.0.0.1:3000", acc)
}
@@ -54,7 +54,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
return errChan.Error()
}
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
func (a *Aerospike) gatherServer(hostport string, acc plugins.Accumulator) error {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return err
@@ -152,7 +152,7 @@ func copyTags(m map[string]string) map[string]string {
}
func init() {
inputs.Add("aerospike", func() telegraf.Input {
inputs.Add("aerospike", func() plugins.Input {
return &Aerospike{}
})
}

View File

@@ -27,6 +27,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"

View File

@@ -2,6 +2,16 @@
#### Plugin arguments:
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
- **username** string: Username for HTTP basic authentication
- **password** string: Password for HTTP basic authentication
- **timeout** duration: time that the HTTP connection will remain waiting for response. Defalt 4 seconds ("4s")
##### Optional SSL Config
- **ssl_ca** string: the full path for the SSL CA certicate
- **ssl_cert** string: the full path for the SSL certificate
- **ssl_key** string: the full path for the key file
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
#### Description

View File

@@ -10,18 +10,43 @@ import (
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Apache struct {
Urls []string
Urls []string
Username string
Password string
ResponseTimeout internal.Duration
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
}
var sampleConfig = `
## An array of Apache status URI to gather stats.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## user credentials for basic HTTP authentication
username = "myuser"
password = "mypassword"
## Timeout to the complete conection and reponse time in seconds
response_timeout = "25s" ## default to 5 seconds
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
`
func (n *Apache) SampleConfig() string {
@@ -32,10 +57,13 @@ func (n *Apache) Description() string {
return "Read Apache status information (mod_status)"
}
func (n *Apache) Gather(acc telegraf.Accumulator) error {
func (n *Apache) Gather(acc plugins.Accumulator) error {
if len(n.Urls) == 0 {
n.Urls = []string{"http://localhost/server-status?auto"}
}
if n.ResponseTimeout.Duration < time.Second {
n.ResponseTimeout.Duration = time.Second * 5
}
var outerr error
var errch = make(chan error)
@@ -61,21 +89,46 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
return outerr
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
var tr *http.Transport
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
resp, err := client.Get(addr.String())
if addr.Scheme == "https" {
tlsCfg, err := internal.GetTLSConfig(
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
if err != nil {
return err
}
tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
TLSClientConfig: tlsCfg,
}
} else {
tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
}
client := &http.Client{
Transport: tr,
Timeout: n.ResponseTimeout.Duration,
}
req, err := http.NewRequest("GET", addr.String(), nil)
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
}
if len(n.Username) != 0 && len(n.Password) != 0 {
req.SetBasicAuth(n.Username, n.Password)
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
}
@@ -175,7 +228,7 @@ func getTags(addr *url.URL) map[string]string {
}
func init() {
inputs.Add("apache", func() telegraf.Input {
inputs.Add("apache", func() plugins.Input {
return &Apache{}
})
}

View File

@@ -8,7 +8,7 @@ import (
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -70,7 +70,7 @@ func prettyToBytes(v string) uint64 {
return uint64(result)
}
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
tags := getTags(bdev)
metrics, err := filepath.Glob(bdev + "/stats_total/*")
if len(metrics) < 0 {
@@ -105,7 +105,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
return nil
}
func (b *Bcache) Gather(acc telegraf.Accumulator) error {
func (b *Bcache) Gather(acc plugins.Accumulator) error {
bcacheDevsChecked := make(map[string]bool)
var restrictDevs bool
if len(b.BcacheDevs) != 0 {
@@ -136,7 +136,7 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("bcache", func() telegraf.Input {
inputs.Add("bcache", func() plugins.Input {
return &Bcache{}
})
}

View File

@@ -4,7 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil"
"log"
@@ -35,13 +35,13 @@ type Cassandra struct {
type javaMetric struct {
host string
metric string
acc telegraf.Accumulator
acc plugins.Accumulator
}
type cassandraMetric struct {
host string
metric string
acc telegraf.Accumulator
acc plugins.Accumulator
}
type jmxMetric interface {
@@ -49,12 +49,12 @@ type jmxMetric interface {
}
func newJavaMetric(host string, metric string,
acc telegraf.Accumulator) *javaMetric {
acc plugins.Accumulator) *javaMetric {
return &javaMetric{host: host, metric: metric, acc: acc}
}
func newCassandraMetric(host string, metric string,
acc telegraf.Accumulator) *cassandraMetric {
acc plugins.Accumulator) *cassandraMetric {
return &cassandraMetric{host: host, metric: metric, acc: acc}
}
@@ -257,7 +257,7 @@ func parseServerTokens(server string) map[string]string {
return serverTokens
}
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
func (c *Cassandra) Gather(acc plugins.Accumulator) error {
context := c.Context
servers := c.Servers
metrics := c.Metrics
@@ -289,7 +289,6 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
requestUrl.User = url.UserPassword(serverTokens["user"],
serverTokens["passwd"])
}
fmt.Printf("host %s url %s\n", serverTokens["host"], requestUrl)
out, err := c.getAttr(requestUrl)
if out["status"] != 200.0 {
@@ -303,7 +302,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("cassandra", func() telegraf.Input {
inputs.Add("cassandra", func() plugins.Input {
return &Cassandra{jClient: &JolokiaClientImpl{client: &http.Client{}}}
})
}

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil"
"log"
@@ -75,7 +75,7 @@ func (c *Ceph) SampleConfig() string {
return sampleConfig
}
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
func (c *Ceph) Gather(acc plugins.Accumulator) error {
if c.GatherAdminSocketStats {
if err := c.gatherAdminSocketStats(acc); err != nil {
return err
@@ -91,7 +91,7 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error {
return nil
}
func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
func (c *Ceph) gatherAdminSocketStats(acc plugins.Accumulator) error {
sockets, err := findSockets(c)
if err != nil {
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
@@ -117,10 +117,10 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
return nil
}
func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error {
func (c *Ceph) gatherClusterStats(acc plugins.Accumulator) error {
jobs := []struct {
command string
parser func(telegraf.Accumulator, string) error
parser func(plugins.Accumulator, string) error
}{
{"status", decodeStatus},
{"df", decodeDf},
@@ -155,7 +155,7 @@ func init() {
GatherClusterStats: false,
}
inputs.Add(measurement, func() telegraf.Input { return &c })
inputs.Add(measurement, func() plugins.Input { return &c })
}
@@ -322,7 +322,7 @@ func (c *Ceph) exec(command string) (string, error) {
return output, nil
}
func decodeStatus(acc telegraf.Accumulator, input string) error {
func decodeStatus(acc plugins.Accumulator, input string) error {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(input), &data)
if err != nil {
@@ -347,7 +347,7 @@ func decodeStatus(acc telegraf.Accumulator, input string) error {
return nil
}
func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error {
func decodeStatusOsdmap(acc plugins.Accumulator, data map[string]interface{}) error {
osdmap, ok := data["osdmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
@@ -360,7 +360,7 @@ func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) e
return nil
}
func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error {
func decodeStatusPgmap(acc plugins.Accumulator, data map[string]interface{}) error {
pgmap, ok := data["pgmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
@@ -376,7 +376,7 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) er
return nil
}
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
func decodeStatusPgmapState(acc plugins.Accumulator, data map[string]interface{}) error {
pgmap, ok := data["pgmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
@@ -409,7 +409,7 @@ func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{
return nil
}
func decodeDf(acc telegraf.Accumulator, input string) error {
func decodeDf(acc plugins.Accumulator, input string) error {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(input), &data)
if err != nil {
@@ -451,7 +451,7 @@ func decodeDf(acc telegraf.Accumulator, input string) error {
return nil
}
func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error {
func decodeOsdPoolStats(acc plugins.Accumulator, input string) error {
data := make([]map[string]interface{}, 0)
err := json.Unmarshal([]byte(input), &data)
if err != nil {

View File

@@ -1,7 +1,7 @@
package cgroup
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -34,5 +34,5 @@ func (g *CGroup) Description() string {
}
func init() {
inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} })
inputs.Add("cgroup", func() plugins.Input { return &CGroup{} })
}

View File

@@ -11,12 +11,12 @@ import (
"regexp"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
)
const metricName = "cgroup"
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
func (g *CGroup) Gather(acc plugins.Accumulator) error {
list := make(chan pathInfo)
go g.generateDirs(list)
@@ -32,7 +32,7 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error {
return nil
}
func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
func (g *CGroup) gatherDir(dir string, acc plugins.Accumulator) error {
fields := make(map[string]interface{})
list := make(chan pathInfo)

View File

@@ -3,9 +3,9 @@
package cgroup
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
)
func (g *CGroup) Gather(acc telegraf.Accumulator) error {
func (g *CGroup) Gather(acc plugins.Accumulator) error {
return nil
}

View File

@@ -10,7 +10,7 @@ import (
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -35,7 +35,7 @@ func (*Chrony) SampleConfig() string {
`
}
func (c *Chrony) Gather(acc telegraf.Accumulator) error {
func (c *Chrony) Gather(acc plugins.Accumulator) error {
if len(c.path) == 0 {
return errors.New("chronyc not found: verify that chrony is installed and that chronyc is in your PATH")
}
@@ -127,7 +127,7 @@ func init() {
if len(path) > 0 {
c.path = path
}
inputs.Add("chrony", func() telegraf.Input {
inputs.Add("chrony", func() plugins.Input {
return &c
})
}

View File

@@ -18,21 +18,28 @@ API endpoint. In the following order the plugin will attempt to authenticate.
```toml
[[inputs.cloudwatch]]
## Amazon Region (required)
region = 'us-east-1'
region = "us-east-1"
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
#
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = '1m'
period = "5m"
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = '1m'
delay = "5m"
## Override global run interval (optional - defaults to global interval)
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = '1m'
interval = "5m"
## Metric Statistic Namespace (required)
namespace = 'AWS/ELB'
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
@@ -43,16 +50,16 @@ API endpoint. In the following order the plugin will attempt to authenticate.
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
[[inputs.cloudwatch.metrics]]
names = ['Latency', 'RequestCount']
names = ["Latency", "RequestCount"]
## Dimension filters for Metric (optional)
[[inputs.cloudwatch.metrics.dimensions]]
name = 'LoadBalancerName'
value = 'p-example'
name = "LoadBalancerName"
value = "p-example"
[[inputs.cloudwatch.metrics.dimensions]]
name = 'AvailabilityZone'
value = '*'
name = "AvailabilityZone"
value = "*"
```
#### Requirements and Terminology
@@ -71,16 +78,16 @@ wildcard dimension is ignored.
Example:
```
[[inputs.cloudwatch.metrics]]
names = ['Latency']
names = ["Latency"]
## Dimension filters for Metric (optional)
[[inputs.cloudwatch.metrics.dimensions]]
name = 'LoadBalancerName'
value = 'p-example'
name = "LoadBalancerName"
value = "p-example"
[[inputs.cloudwatch.metrics.dimensions]]
name = 'AvailabilityZone'
value = '*'
name = "AvailabilityZone"
value = "*"
```
If the following ELBs are available:

View File

@@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
internalaws "github.com/influxdata/telegraf/internal/config/aws"
"github.com/influxdata/telegraf/internal/errchan"
@@ -63,7 +63,7 @@ type (
func (c *CloudWatch) SampleConfig() string {
return `
## Amazon Region
region = 'us-east-1'
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
@@ -80,22 +80,29 @@ func (c *CloudWatch) SampleConfig() string {
#profile = ""
#shared_credential_file = ""
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
#
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = '1m'
period = "5m"
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = '1m'
delay = "5m"
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = '1m'
interval = "5m"
## Configure the TTL for the internal cache of metrics.
## Defaults to 1 hr if not specified
#cache_ttl = '10m'
#cache_ttl = "10m"
## Metric Statistic Namespace (required)
namespace = 'AWS/ELB'
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
@@ -106,12 +113,12 @@ func (c *CloudWatch) SampleConfig() string {
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
#[[inputs.cloudwatch.metrics]]
# names = ['Latency', 'RequestCount']
# names = ["Latency", "RequestCount"]
#
# ## Dimension filters for Metric (optional)
# [[inputs.cloudwatch.metrics.dimensions]]
# name = 'LoadBalancerName'
# value = 'p-example'
# name = "LoadBalancerName"
# value = "p-example"
`
}
@@ -119,11 +126,7 @@ func (c *CloudWatch) Description() string {
return "Pull Metric Statistics from Amazon CloudWatch"
}
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
if c.client == nil {
c.initializeCloudWatch()
}
func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) {
var metrics []*cloudwatch.Metric
// check for provided metric filter
@@ -133,7 +136,6 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
if !hasWilcard(m.Dimensions) {
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
for k, d := range m.Dimensions {
fmt.Printf("Dimension [%s]:[%s]\n", d.Name, d.Value)
dimensions[k] = &cloudwatch.Dimension{
Name: aws.String(d.Name),
Value: aws.String(d.Value),
@@ -149,11 +151,11 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
} else {
allMetrics, err := c.fetchNamespaceMetrics()
if err != nil {
return err
return nil, err
}
for _, name := range m.MetricNames {
for _, metric := range allMetrics {
if isSelected(metric, m.Dimensions) {
if isSelected(name, metric, m.Dimensions) {
metrics = append(metrics, &cloudwatch.Metric{
Namespace: aws.String(c.Namespace),
MetricName: aws.String(name),
@@ -163,16 +165,26 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
}
}
}
}
} else {
var err error
metrics, err = c.fetchNamespaceMetrics()
if err != nil {
return err
return nil, err
}
}
return metrics, nil
}
func (c *CloudWatch) Gather(acc plugins.Accumulator) error {
if c.client == nil {
c.initializeCloudWatch()
}
metrics, err := SelectMetrics(c)
if err != nil {
return err
}
metricCount := len(metrics)
errChan := errchan.New(metricCount)
@@ -198,7 +210,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("cloudwatch", func() telegraf.Input {
inputs.Add("cloudwatch", func() plugins.Input {
ttl, _ := time.ParseDuration("1hr")
return &CloudWatch{
CacheTTL: internal.Duration{Duration: ttl},
@@ -229,13 +241,12 @@ func (c *CloudWatch) initializeCloudWatch() error {
/*
* Fetch available metrics for given CloudWatch Namespace
*/
func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err error) {
func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
if c.metricCache != nil && c.metricCache.IsValid() {
metrics = c.metricCache.Metrics
return
return c.metricCache.Metrics, nil
}
metrics = []*cloudwatch.Metric{}
metrics := []*cloudwatch.Metric{}
var token *string
for more := true; more; {
@@ -263,14 +274,14 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
TTL: c.CacheTTL.Duration,
}
return
return metrics, nil
}
/*
* Gather given Metric and emit any error
*/
func (c *CloudWatch) gatherMetric(
acc telegraf.Accumulator,
acc plugins.Accumulator,
metric *cloudwatch.Metric,
now time.Time,
errChan chan error,
@@ -375,7 +386,10 @@ func hasWilcard(dimensions []*Dimension) bool {
return false
}
func isSelected(metric *cloudwatch.Metric, dimensions []*Dimension) bool {
func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool {
if name != *metric.MetricName {
return false
}
if len(metric.Dimensions) != len(dimensions) {
return false
}

View File

@@ -11,9 +11,9 @@ import (
"github.com/stretchr/testify/assert"
)
type mockCloudWatchClient struct{}
type mockGatherCloudWatchClient struct{}
func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
metric := &cloudwatch.Metric{
Namespace: params.Namespace,
MetricName: aws.String("Latency"),
@@ -31,7 +31,7 @@ func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput)
return result, nil
}
func (m *mockCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
func (m *mockGatherCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
dataPoint := &cloudwatch.Datapoint{
Timestamp: params.EndTime,
Minimum: aws.Float64(0.1),
@@ -62,7 +62,7 @@ func TestGather(t *testing.T) {
}
var acc testutil.Accumulator
c.client = &mockCloudWatchClient{}
c.client = &mockGatherCloudWatchClient{}
c.Gather(&acc)
@@ -83,6 +83,94 @@ func TestGather(t *testing.T) {
}
type mockSelectMetricsCloudWatchClient struct{}
func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
metrics := []*cloudwatch.Metric{}
// 4 metrics are available
metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"}
// for 3 ELBs
loadBalancers := []string{"lb-1", "lb-2", "lb-3"}
// in 2 AZs
availabilityZones := []string{"us-east-1a", "us-east-1b"}
for _, m := range metricNames {
for _, lb := range loadBalancers {
// For each metric/ELB pair, we get an aggregate value across all AZs.
metrics = append(metrics, &cloudwatch.Metric{
Namespace: aws.String("AWS/ELB"),
MetricName: aws.String(m),
Dimensions: []*cloudwatch.Dimension{
&cloudwatch.Dimension{
Name: aws.String("LoadBalancerName"),
Value: aws.String(lb),
},
},
})
for _, az := range availabilityZones {
// We get a metric for each metric/ELB/AZ triplet.
metrics = append(metrics, &cloudwatch.Metric{
Namespace: aws.String("AWS/ELB"),
MetricName: aws.String(m),
Dimensions: []*cloudwatch.Dimension{
&cloudwatch.Dimension{
Name: aws.String("LoadBalancerName"),
Value: aws.String(lb),
},
&cloudwatch.Dimension{
Name: aws.String("AvailabilityZone"),
Value: aws.String(az),
},
},
})
}
}
}
result := &cloudwatch.ListMetricsOutput{
Metrics: metrics,
}
return result, nil
}
func (m *mockSelectMetricsCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
return nil, nil
}
func TestSelectMetrics(t *testing.T) {
duration, _ := time.ParseDuration("1m")
internalDuration := internal.Duration{
Duration: duration,
}
c := &CloudWatch{
Region: "us-east-1",
Namespace: "AWS/ELB",
Delay: internalDuration,
Period: internalDuration,
RateLimit: 10,
Metrics: []*Metric{
&Metric{
MetricNames: []string{"Latency", "RequestCount"},
Dimensions: []*Dimension{
&Dimension{
Name: "LoadBalancerName",
Value: "*",
},
&Dimension{
Name: "AvailabilityZone",
Value: "*",
},
},
},
},
}
c.client = &mockSelectMetricsCloudWatchClient{}
metrics, err := SelectMetrics(c)
// We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2
// AZs. We should get 12 metrics.
assert.Equal(t, 12, len(metrics))
assert.Nil(t, err)
}
func TestGenerateStatisticsInputParams(t *testing.T) {
d := &cloudwatch.Dimension{
Name: aws.String("LoadBalancerName"),

View File

@@ -9,7 +9,7 @@ import (
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
"log"
"path/filepath"
@@ -70,7 +70,7 @@ func (c *Conntrack) SampleConfig() string {
return sampleConfig
}
func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
func (c *Conntrack) Gather(acc plugins.Accumulator) error {
c.setDefaults()
var metricKey string
@@ -116,5 +116,5 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add(inputName, func() telegraf.Input { return &Conntrack{} })
inputs.Add(inputName, func() plugins.Input { return &Conntrack{} })
}

View File

@@ -29,9 +29,9 @@ to query the data. It will not report the [telemetry](https://www.consul.io/docs
Tags:
- node: on which node check/service is registered on
- service_name: name of the service (this is the service name not the service ID)
- check_id
Fields:
- check_id
- check_name
- service_id
- status
@@ -41,6 +41,6 @@ Fields:
```
$ telegraf --config ./telegraf.conf -input-filter consul -test
* Plugin: consul, Collection 1
> consul_health_checks,host=wolfpit,node=consul-server-node check_id="serfHealth",check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com check_id="service:www-example-com.test01",check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
```

View File

@@ -4,7 +4,7 @@ import (
"net/http"
"github.com/hashicorp/consul/api"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -90,24 +90,24 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
return api.NewClient(config)
}
func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.HealthCheck) {
func (c *Consul) GatherHealthCheck(acc plugins.Accumulator, checks []*api.HealthCheck) {
for _, check := range checks {
record := make(map[string]interface{})
tags := make(map[string]string)
record["check_id"] = check.CheckID
record["check_name"] = check.Name
record["service_id"] = check.ServiceID
record["status"] = check.Status
tags["node"] = check.Node
tags["service_name"] = check.ServiceName
tags["check_id"] = check.CheckID
acc.AddFields("consul_health_checks", record, tags)
}
}
func (c *Consul) Gather(acc telegraf.Accumulator) error {
func (c *Consul) Gather(acc plugins.Accumulator) error {
if c.client == nil {
newClient, err := c.createAPIClient()
@@ -130,7 +130,7 @@ func (c *Consul) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("consul", func() telegraf.Input {
inputs.Add("consul", func() plugins.Input {
return &Consul{}
})
}

View File

@@ -22,7 +22,6 @@ var sampleChecks = []*api.HealthCheck{
func TestGatherHealtCheck(t *testing.T) {
expectedFields := map[string]interface{}{
"check_id": "foo.health123",
"check_name": "foo.health",
"status": "passing",
"service_id": "foo.123",
@@ -31,6 +30,7 @@ func TestGatherHealtCheck(t *testing.T) {
expectedTags := map[string]string{
"node": "localhost",
"service_name": "foo",
"check_id": "foo.health123",
}
var acc testutil.Accumulator

View File

@@ -2,7 +2,7 @@ package couchbase
import (
couchbase "github.com/couchbase/go-couchbase"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
"sync"
)
@@ -34,7 +34,7 @@ func (r *Couchbase) Description() string {
// Reads stats from all configured clusters. Accumulates stats.
// Returns one of the errors encountered while gathering stats (if any).
func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
func (r *Couchbase) Gather(acc plugins.Accumulator) error {
if len(r.Servers) == 0 {
r.gatherServer("http://localhost:8091/", acc, nil)
return nil
@@ -57,7 +57,7 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
return outerr
}
func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error {
func (r *Couchbase) gatherServer(addr string, acc plugins.Accumulator, pool *couchbase.Pool) error {
if pool == nil {
client, err := couchbase.Connect(addr)
if err != nil {
@@ -98,7 +98,7 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co
}
func init() {
inputs.Add("couchbase", func() telegraf.Input {
inputs.Add("couchbase", func() plugins.Input {
return &Couchbase{}
})
}

View File

@@ -4,7 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
"net/http"
"reflect"
@@ -82,7 +82,7 @@ func (*CouchDB) SampleConfig() string {
`
}
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
func (c *CouchDB) Gather(accumulator plugins.Accumulator) error {
errorChannel := make(chan error, len(c.HOSTs))
var wg sync.WaitGroup
for _, u := range c.HOSTs {
@@ -122,7 +122,7 @@ var client = &http.Client{
Timeout: time.Duration(4 * time.Second),
}
func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error {
func (c *CouchDB) fetchAndInsertData(accumulator plugins.Accumulator, host string) error {
response, error := client.Get(host)
if error != nil {
@@ -209,7 +209,7 @@ func (c *CouchDB) generateFields(prefix string, obj metaData) map[string]interfa
}
func init() {
inputs.Add("couchdb", func() telegraf.Input {
inputs.Add("couchdb", func() plugins.Input {
return &CouchDB{}
})
}

View File

@@ -11,7 +11,7 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -64,7 +64,7 @@ var ErrProtocolError = errors.New("disque protocol error")
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *Disque) Gather(acc telegraf.Accumulator) error {
func (g *Disque) Gather(acc plugins.Accumulator) error {
if len(g.Servers) == 0 {
url := &url.URL{
Host: ":7711",
@@ -101,7 +101,7 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
const defaultPort = "7711"
func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
if g.c == nil {
_, _, err := net.SplitHostPort(addr.Host)
@@ -204,7 +204,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("disque", func() telegraf.Input {
inputs.Add("disque", func() plugins.Input {
return &Disque{}
})
}

View File

@@ -8,7 +8,7 @@ import (
"strconv"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -55,7 +55,7 @@ func (d *DnsQuery) SampleConfig() string {
func (d *DnsQuery) Description() string {
return "Query given DNS server and gives statistics"
}
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
func (d *DnsQuery) Gather(acc plugins.Accumulator) error {
d.setDefaultValues()
errChan := errchan.New(len(d.Domains) * len(d.Servers))
@@ -156,7 +156,7 @@ func (d *DnsQuery) parseRecordType() (uint16, error) {
}
func init() {
inputs.Add("dns_query", func() telegraf.Input {
inputs.Add("dns_query", func() plugins.Input {
return &DnsQuery{}
})
}

View File

@@ -15,7 +15,7 @@ import (
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -79,7 +79,7 @@ func (d *Docker) Description() string {
func (d *Docker) SampleConfig() string { return sampleConfig }
// Gather starts stats collection
func (d *Docker) Gather(acc telegraf.Accumulator) error {
func (d *Docker) Gather(acc plugins.Accumulator) error {
if d.client == nil {
var c *client.Client
var err error
@@ -136,7 +136,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
return nil
}
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
func (d *Docker) gatherInfo(acc plugins.Accumulator) error {
// Init vars
dataFields := make(map[string]interface{})
metadataFields := make(map[string]interface{})
@@ -211,7 +211,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
func (d *Docker) gatherContainer(
container types.Container,
acc telegraf.Accumulator,
acc plugins.Accumulator,
) error {
var v *types.StatsJSON
// Parse container name
@@ -221,14 +221,18 @@ func (d *Docker) gatherContainer(
cname = strings.TrimPrefix(container.Names[0], "/")
}
// the image name sometimes has a version part.
// ie, rabbitmq:3-management
imageParts := strings.Split(container.Image, ":")
imageName := imageParts[0]
// the image name sometimes has a version part, or a private repo
// ie, rabbitmq:3-management or docker.someco.net:4443/rabbitmq:3-management
imageName := ""
imageVersion := "unknown"
if len(imageParts) > 1 {
imageVersion = imageParts[1]
i := strings.LastIndex(container.Image, ":") // index of last ':' character
if i > -1 {
imageVersion = container.Image[i+1:]
imageName = container.Image[:i]
} else {
imageName = container.Image
}
tags := map[string]string{
"engine_host": d.engine_host,
"container_name": cname,
@@ -268,7 +272,7 @@ func (d *Docker) gatherContainer(
func gatherContainerStats(
stat *types.StatsJSON,
acc telegraf.Accumulator,
acc plugins.Accumulator,
tags map[string]string,
id string,
perDevice bool,
@@ -364,11 +368,22 @@ func gatherContainerStats(
if field == "container_id" {
continue
}
var uintV uint64
switch v := value.(type) {
case uint64:
uintV = v
case int64:
uintV = uint64(v)
default:
continue
}
_, ok := totalNetworkStatMap[field]
if ok {
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + value.(uint64)
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV
} else {
totalNetworkStatMap[field] = value
totalNetworkStatMap[field] = uintV
}
}
}
@@ -407,7 +422,7 @@ func calculateCPUPercent(stat *types.StatsJSON) float64 {
func gatherBlockIOMetrics(
stat *types.StatsJSON,
acc telegraf.Accumulator,
acc plugins.Accumulator,
tags map[string]string,
now time.Time,
id string,
@@ -487,11 +502,22 @@ func gatherBlockIOMetrics(
if field == "container_id" {
continue
}
var uintV uint64
switch v := value.(type) {
case uint64:
uintV = v
case int64:
uintV = uint64(v)
default:
continue
}
_, ok := totalStatMap[field]
if ok {
totalStatMap[field] = totalStatMap[field].(uint64) + value.(uint64)
totalStatMap[field] = totalStatMap[field].(uint64) + uintV
} else {
totalStatMap[field] = value
totalStatMap[field] = uintV
}
}
}
@@ -543,7 +569,7 @@ func parseSize(sizeStr string) (int64, error) {
}
func init() {
inputs.Add("docker", func() telegraf.Input {
inputs.Add("docker", func() plugins.Input {
return &Docker{
PerDevice: true,
Timeout: internal.Duration{Duration: time.Second * 5},

View File

@@ -340,7 +340,7 @@ func (d FakeDockerClient) ContainerList(octx context.Context, options types.Cont
container2 := types.Container{
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
Names: []string{"/etcd2"},
Image: "quay.io/coreos/etcd:v2.2.2",
Image: "quay.io:4443/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941933,
Status: "Up 4 hours",
@@ -429,7 +429,7 @@ func TestDockerGatherInfo(t *testing.T) {
},
map[string]string{
"container_name": "etcd2",
"container_image": "quay.io/coreos/etcd",
"container_image": "quay.io:4443/coreos/etcd",
"cpu": "cpu3",
"container_version": "v2.2.2",
"engine_host": "absol",
@@ -477,7 +477,7 @@ func TestDockerGatherInfo(t *testing.T) {
map[string]string{
"engine_host": "absol",
"container_name": "etcd2",
"container_image": "quay.io/coreos/etcd",
"container_image": "quay.io:4443/coreos/etcd",
"container_version": "v2.2.2",
},
)

View File

@@ -11,7 +11,7 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -51,7 +51,7 @@ func (d *Dovecot) SampleConfig() string { return sampleConfig }
const defaultPort = "24242"
// Reads stats from all configured servers.
func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
func (d *Dovecot) Gather(acc plugins.Accumulator) error {
if !validQuery[d.Type] {
return fmt.Errorf("Error: %s is not a valid query type\n",
d.Type)
@@ -81,7 +81,7 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
return errChan.Error()
}
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error {
func (d *Dovecot) gatherServer(addr string, acc plugins.Accumulator, qtype string, filter string) error {
_, _, err := net.SplitHostPort(addr)
if err != nil {
return fmt.Errorf("Error: %s on url %s\n", err, addr)
@@ -111,7 +111,7 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri
return gatherStats(&buf, acc, host, qtype)
}
func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, host string, qtype string) error {
func gatherStats(buf *bytes.Buffer, acc plugins.Accumulator, host string, qtype string) error {
lines := strings.Split(buf.String(), "\n")
head := strings.Split(lines[0], "\t")
@@ -183,7 +183,7 @@ func secParser(tm string) float64 {
}
func init() {
inputs.Add("dovecot", func() telegraf.Input {
inputs.Add("dovecot", func() plugins.Input {
return &Dovecot{}
})
}

View File

@@ -2,7 +2,8 @@
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)
or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics.
### Configuration:
@@ -14,13 +15,18 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## set local to false when you want to read the indices stats from all nodes
## within the cluster
## When local is true (the default), the node will read only its own stats.
## Set local to false when you want to read the node stats from all nodes
## of the cluster.
local = true
## set cluster_health to true when you want to also obtain cluster level stats
## Set cluster_health to true when you want to also obtain cluster health stats
cluster_health = false
## Set cluster_stats to true when you want to obtain cluster stats from the
## Master node.
cluster_stats = false
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"

View File

@@ -4,21 +4,27 @@ import (
"encoding/json"
"fmt"
"net/http"
"regexp"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
"io/ioutil"
"strings"
)
// mask for masking username/password from error messages
var mask = regexp.MustCompile(`https?:\/\/\S+:\S+@`)
// Nodestats are always generated, so simply define a constant for these endpoints
const statsPath = "/_nodes/stats"
const statsPathLocal = "/_nodes/_local/stats"
const healthPath = "/_cluster/health"
type node struct {
type nodeStat struct {
Host string `json:"host"`
Name string `json:"name"`
Attributes map[string]string `json:"attributes"`
@@ -58,20 +64,41 @@ type indexHealth struct {
UnassignedShards int `json:"unassigned_shards"`
}
type clusterStats struct {
NodeName string `json:"node_name"`
ClusterName string `json:"cluster_name"`
Status string `json:"status"`
Indices interface{} `json:"indices"`
Nodes interface{} `json:"nodes"`
}
type catMaster struct {
NodeID string `json:"id"`
NodeIP string `json:"ip"`
NodeName string `json:"node"`
}
const sampleConfig = `
## specify a list of one or more Elasticsearch servers
# you can add username and password to your url to use basic authentication:
# servers = ["http://user:pass@localhost:9200"]
servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## set local to false when you want to read the indices stats from all nodes
## within the cluster
## When local is true (the default), the node will read only its own stats.
## Set local to false when you want to read the node stats from all nodes
## of the cluster.
local = true
## set cluster_health to true when you want to also obtain cluster level stats
## Set cluster_health to true when you want to also obtain cluster health stats
cluster_health = false
## Set cluster_stats to true when you want to also obtain cluster stats from the
## Master node.
cluster_stats = false
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
@@ -83,15 +110,18 @@ const sampleConfig = `
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
// servers.
type Elasticsearch struct {
Local bool
Servers []string
HttpTimeout internal.Duration
ClusterHealth bool
SSLCA string `toml:"ssl_ca"` // Path to CA file
SSLCert string `toml:"ssl_cert"` // Path to host cert file
SSLKey string `toml:"ssl_key"` // Path to cert key file
InsecureSkipVerify bool // Use SSL but skip chain & host verification
client *http.Client
Local bool
Servers []string
HttpTimeout internal.Duration
ClusterHealth bool
ClusterStats bool
SSLCA string `toml:"ssl_ca"` // Path to CA file
SSLCert string `toml:"ssl_cert"` // Path to host cert file
SSLKey string `toml:"ssl_key"` // Path to cert key file
InsecureSkipVerify bool // Use SSL but skip chain & host verification
client *http.Client
catMasterResponseTokens []string
isMaster bool
}
// NewElasticsearch return a new instance of Elasticsearch
@@ -113,7 +143,7 @@ func (e *Elasticsearch) Description() string {
// Gather reads the stats from Elasticsearch and writes it to the
// Accumulator.
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
func (e *Elasticsearch) Gather(acc plugins.Accumulator) error {
if e.client == nil {
client, err := e.createHttpClient()
@@ -123,12 +153,12 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
e.client = client
}
errChan := errchan.New(len(e.Servers))
errChan := errchan.New(len(e.Servers) * 3)
var wg sync.WaitGroup
wg.Add(len(e.Servers))
for _, serv := range e.Servers {
go func(s string, acc telegraf.Accumulator) {
go func(s string, acc plugins.Accumulator) {
defer wg.Done()
var url string
if e.Local {
@@ -136,12 +166,36 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
} else {
url = s + statsPath
}
e.isMaster = false
if e.ClusterStats {
// get cat/master information here so NodeStats can determine
// whether this node is the Master
e.setCatMaster(s + "/_cat/master")
}
// Always gather node states
if err := e.gatherNodeStats(url, acc); err != nil {
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
errChan.C <- err
return
}
if e.ClusterHealth {
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
url = s + "/_cluster/health?level=indices"
if err := e.gatherClusterHealth(url, acc); err != nil {
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
errChan.C <- err
return
}
}
if e.ClusterStats && e.isMaster {
if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil {
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
errChan.C <- err
return
}
}
}(serv, acc)
}
@@ -167,14 +221,15 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
return client, nil
}
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) error {
nodeStats := &struct {
ClusterName string `json:"cluster_name"`
Nodes map[string]*node `json:"nodes"`
ClusterName string `json:"cluster_name"`
Nodes map[string]*nodeStat `json:"nodes"`
}{}
if err := e.gatherData(url, nodeStats); err != nil {
if err := e.gatherJsonData(url, nodeStats); err != nil {
return err
}
for id, n := range nodeStats.Nodes {
tags := map[string]string{
"node_id": id,
@@ -183,6 +238,11 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
"cluster_name": nodeStats.ClusterName,
}
if e.ClusterStats {
// check for master
e.isMaster = (id == e.catMasterResponseTokens[0])
}
for k, v := range n.Attributes {
tags["node_attribute_"+k] = v
}
@@ -202,6 +262,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
now := time.Now()
for p, s := range stats {
f := jsonparser.JSONFlattener{}
// parse Json, ignoring strings and bools
err := f.FlattenJSON("", s)
if err != nil {
return err
@@ -212,31 +273,31 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
return nil
}
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
clusterStats := &clusterHealth{}
if err := e.gatherData(url, clusterStats); err != nil {
func (e *Elasticsearch) gatherClusterHealth(url string, acc plugins.Accumulator) error {
healthStats := &clusterHealth{}
if err := e.gatherJsonData(url, healthStats); err != nil {
return err
}
measurementTime := time.Now()
clusterFields := map[string]interface{}{
"status": clusterStats.Status,
"timed_out": clusterStats.TimedOut,
"number_of_nodes": clusterStats.NumberOfNodes,
"number_of_data_nodes": clusterStats.NumberOfDataNodes,
"active_primary_shards": clusterStats.ActivePrimaryShards,
"active_shards": clusterStats.ActiveShards,
"relocating_shards": clusterStats.RelocatingShards,
"initializing_shards": clusterStats.InitializingShards,
"unassigned_shards": clusterStats.UnassignedShards,
"status": healthStats.Status,
"timed_out": healthStats.TimedOut,
"number_of_nodes": healthStats.NumberOfNodes,
"number_of_data_nodes": healthStats.NumberOfDataNodes,
"active_primary_shards": healthStats.ActivePrimaryShards,
"active_shards": healthStats.ActiveShards,
"relocating_shards": healthStats.RelocatingShards,
"initializing_shards": healthStats.InitializingShards,
"unassigned_shards": healthStats.UnassignedShards,
}
acc.AddFields(
"elasticsearch_cluster_health",
clusterFields,
map[string]string{"name": clusterStats.ClusterName},
map[string]string{"name": healthStats.ClusterName},
measurementTime,
)
for name, health := range clusterStats.Indices {
for name, health := range healthStats.Indices {
indexFields := map[string]interface{}{
"status": health.Status,
"number_of_shards": health.NumberOfShards,
@@ -257,7 +318,60 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator)
return nil
}
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) error {
clusterStats := &clusterStats{}
if err := e.gatherJsonData(url, clusterStats); err != nil {
return err
}
now := time.Now()
tags := map[string]string{
"node_name": clusterStats.NodeName,
"cluster_name": clusterStats.ClusterName,
"status": clusterStats.Status,
}
stats := map[string]interface{}{
"nodes": clusterStats.Nodes,
"indices": clusterStats.Indices,
}
for p, s := range stats {
f := jsonparser.JSONFlattener{}
// parse json, including bools and strings
err := f.FullFlattenJSON("", s, true, true)
if err != nil {
return err
}
acc.AddFields("elasticsearch_clusterstats_"+p, f.Fields, tags, now)
}
return nil
}
func (e *Elasticsearch) setCatMaster(url string) error {
r, err := e.client.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
// to let the underlying transport close the connection and re-establish a new one for
// future calls.
return fmt.Errorf("status-code %d, expected %d", r.StatusCode, http.StatusOK)
}
response, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
e.catMasterResponseTokens = strings.Split(string(response), " ")
return nil
}
func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error {
r, err := e.client.Get(url)
if err != nil {
return err
@@ -270,14 +384,16 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
r.StatusCode, http.StatusOK)
}
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
return err
}
return nil
}
func init() {
inputs.Add("elasticsearch", func() telegraf.Input {
inputs.Add("elasticsearch", func() plugins.Input {
return NewElasticsearch()
})
}

View File

@@ -8,6 +8,8 @@ import (
"github.com/influxdata/telegraf/testutil"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -37,16 +39,13 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
func (t *transportMock) CancelRequest(_ *http.Request) {
}
func TestElasticsearch(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
var acc testutil.Accumulator
if err := es.Gather(&acc); err != nil {
t.Fatal(err)
func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) {
if es.isMaster != expected {
msg := fmt.Sprintf("IsMaster set incorrectly")
assert.Fail(t, msg)
}
}
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
tags := map[string]string{
"cluster_name": "es-testcluster",
"node_attribute_master": "true",
@@ -55,25 +54,55 @@ func TestElasticsearch(t *testing.T) {
"node_host": "test",
}
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
}
func TestGatherClusterStats(t *testing.T) {
func TestGather(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
var acc testutil.Accumulator
if err := es.Gather(&acc); err != nil {
t.Fatal(err)
}
checkIsMaster(es, false, t)
checkNodeStatsResult(t, &acc)
}
func TestGatherNodeStats(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
var acc testutil.Accumulator
if err := es.gatherNodeStats("junk", &acc); err != nil {
t.Fatal(err)
}
checkIsMaster(es, false, t)
checkNodeStatsResult(t, &acc)
}
func TestGatherClusterHealth(t *testing.T) {
es := newElasticsearchWithClient()
es.Servers = []string{"http://example.com:9200"}
es.ClusterHealth = true
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
var acc testutil.Accumulator
require.NoError(t, es.Gather(&acc))
require.NoError(t, es.gatherClusterHealth("junk", &acc))
checkIsMaster(es, false, t)
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
clusterHealthExpected,
@@ -88,6 +117,77 @@ func TestGatherClusterStats(t *testing.T) {
map[string]string{"index": "v2"})
}
func TestGatherClusterStatsMaster(t *testing.T) {
// This needs multiple steps to replicate the multiple calls internally.
es := newElasticsearchWithClient()
es.ClusterStats = true
es.Servers = []string{"http://example.com:9200"}
// first get catMaster
es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult)
require.NoError(t, es.setCatMaster("junk"))
IsMasterResultTokens := strings.Split(string(IsMasterResult), " ")
if es.catMasterResponseTokens[0] != IsMasterResultTokens[0] {
msg := fmt.Sprintf("catmaster is incorrect")
assert.Fail(t, msg)
}
// now get node status, which determines whether we're master
var acc testutil.Accumulator
es.Local = true
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
if err := es.gatherNodeStats("junk", &acc); err != nil {
t.Fatal(err)
}
checkIsMaster(es, true, t)
checkNodeStatsResult(t, &acc)
// now test the clusterstats method
es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse)
require.NoError(t, es.gatherClusterStats("junk", &acc))
tags := map[string]string{
"cluster_name": "es-testcluster",
"node_name": "test.host.com",
"status": "red",
}
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_nodes", clusterstatsNodesExpected, tags)
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_indices", clusterstatsIndicesExpected, tags)
}
func TestGatherClusterStatsNonMaster(t *testing.T) {
// This needs multiple steps to replicate the multiple calls internally.
es := newElasticsearchWithClient()
es.ClusterStats = true
es.Servers = []string{"http://example.com:9200"}
// first get catMaster
es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult)
require.NoError(t, es.setCatMaster("junk"))
IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ")
if es.catMasterResponseTokens[0] != IsNotMasterResultTokens[0] {
msg := fmt.Sprintf("catmaster is incorrect")
assert.Fail(t, msg)
}
// now get node status, which determines whether we're master
var acc testutil.Accumulator
es.Local = true
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
if err := es.gatherNodeStats("junk", &acc); err != nil {
t.Fatal(err)
}
// ensure flag is clear so Cluster Stats would not be done
checkIsMaster(es, false, t)
checkNodeStatsResult(t, &acc)
}
func newElasticsearchWithClient() *Elasticsearch {
es := NewElasticsearch()
es.client = &http.Client{}

View File

@@ -1,6 +1,6 @@
package elasticsearch
const clusterResponse = `
const clusterHealthResponse = `
{
"cluster_name": "elasticsearch_telegraf",
"status": "green",
@@ -71,7 +71,7 @@ var v2IndexExpected = map[string]interface{}{
"unassigned_shards": 20,
}
const statsResponse = `
const nodeStatsResponse = `
{
"cluster_name": "es-testcluster",
"nodes": {
@@ -489,7 +489,7 @@ const statsResponse = `
}
`
var indicesExpected = map[string]interface{}{
var nodestatsIndicesExpected = map[string]interface{}{
"id_cache_memory_size_in_bytes": float64(0),
"completion_size_in_bytes": float64(0),
"suggest_total": float64(0),
@@ -561,7 +561,7 @@ var indicesExpected = map[string]interface{}{
"segments_fixed_bit_set_memory_in_bytes": float64(0),
}
var osExpected = map[string]interface{}{
var nodestatsOsExpected = map[string]interface{}{
"load_average_0": float64(0.01),
"load_average_1": float64(0.04),
"load_average_2": float64(0.05),
@@ -576,7 +576,7 @@ var osExpected = map[string]interface{}{
"mem_used_in_bytes": float64(1621868544),
}
var processExpected = map[string]interface{}{
var nodestatsProcessExpected = map[string]interface{}{
"mem_total_virtual_in_bytes": float64(4747890688),
"timestamp": float64(1436460392945),
"open_file_descriptors": float64(160),
@@ -586,7 +586,7 @@ var processExpected = map[string]interface{}{
"cpu_user_in_millis": float64(13610),
}
var jvmExpected = map[string]interface{}{
var nodestatsJvmExpected = map[string]interface{}{
"timestamp": float64(1436460392945),
"uptime_in_millis": float64(202245),
"mem_non_heap_used_in_bytes": float64(39634576),
@@ -621,7 +621,7 @@ var jvmExpected = map[string]interface{}{
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
}
var threadPoolExpected = map[string]interface{}{
var nodestatsThreadPoolExpected = map[string]interface{}{
"merge_threads": float64(6),
"merge_queue": float64(4),
"merge_active": float64(5),
@@ -726,7 +726,7 @@ var threadPoolExpected = map[string]interface{}{
"flush_completed": float64(3),
}
var fsExpected = map[string]interface{}{
var nodestatsFsExpected = map[string]interface{}{
"data_0_total_in_bytes": float64(19507089408),
"data_0_free_in_bytes": float64(16909316096),
"data_0_available_in_bytes": float64(15894814720),
@@ -736,7 +736,7 @@ var fsExpected = map[string]interface{}{
"total_total_in_bytes": float64(19507089408),
}
var transportExpected = map[string]interface{}{
var nodestatsTransportExpected = map[string]interface{}{
"server_open": float64(13),
"rx_count": float64(6),
"rx_size_in_bytes": float64(1380),
@@ -744,12 +744,12 @@ var transportExpected = map[string]interface{}{
"tx_size_in_bytes": float64(1380),
}
var httpExpected = map[string]interface{}{
var nodestatsHttpExpected = map[string]interface{}{
"current_open": float64(3),
"total_opened": float64(3),
}
var breakersExpected = map[string]interface{}{
var nodestatsBreakersExpected = map[string]interface{}{
"fielddata_estimated_size_in_bytes": float64(0),
"fielddata_overhead": float64(1.03),
"fielddata_tripped": float64(0),
@@ -763,3 +763,273 @@ var breakersExpected = map[string]interface{}{
"parent_limit_size_in_bytes": float64(727213670),
"parent_estimated_size_in_bytes": float64(0),
}
const clusterStatsResponse = `
{
"host":"ip-10-0-1-214",
"log_type":"metrics",
"timestamp":1475767451229,
"log_level":"INFO",
"node_name":"test.host.com",
"cluster_name":"es-testcluster",
"status":"red",
"indices":{
"count":1,
"shards":{
"total":4,
"primaries":4,
"replication":0.0,
"index":{
"shards":{
"min":4,
"max":4,
"avg":4.0
},
"primaries":{
"min":4,
"max":4,
"avg":4.0
},
"replication":{
"min":0.0,
"max":0.0,
"avg":0.0
}
}
},
"docs":{
"count":4,
"deleted":0
},
"store":{
"size_in_bytes":17084,
"throttle_time_in_millis":0
},
"fielddata":{
"memory_size_in_bytes":0,
"evictions":0
},
"query_cache":{
"memory_size_in_bytes":0,
"total_count":0,
"hit_count":0,
"miss_count":0,
"cache_size":0,
"cache_count":0,
"evictions":0
},
"completion":{
"size_in_bytes":0
},
"segments":{
"count":4,
"memory_in_bytes":11828,
"terms_memory_in_bytes":8932,
"stored_fields_memory_in_bytes":1248,
"term_vectors_memory_in_bytes":0,
"norms_memory_in_bytes":1280,
"doc_values_memory_in_bytes":368,
"index_writer_memory_in_bytes":0,
"index_writer_max_memory_in_bytes":2048000,
"version_map_memory_in_bytes":0,
"fixed_bit_set_memory_in_bytes":0
},
"percolate":{
"total":0,
"time_in_millis":0,
"current":0,
"memory_size_in_bytes":-1,
"memory_size":"-1b",
"queries":0
}
},
"nodes":{
"count":{
"total":1,
"master_only":0,
"data_only":0,
"master_data":1,
"client":0
},
"versions":[
{
"version": "2.3.3"
}
],
"os":{
"available_processors":1,
"allocated_processors":1,
"mem":{
"total_in_bytes":593301504
},
"names":[
{
"name":"Linux",
"count":1
}
]
},
"process":{
"cpu":{
"percent":0
},
"open_file_descriptors":{
"min":145,
"max":145,
"avg":145
}
},
"jvm":{
"max_uptime_in_millis":11580527,
"versions":[
{
"version":"1.8.0_101",
"vm_name":"OpenJDK 64-Bit Server VM",
"vm_version":"25.101-b13",
"vm_vendor":"Oracle Corporation",
"count":1
}
],
"mem":{
"heap_used_in_bytes":70550288,
"heap_max_in_bytes":1065025536
},
"threads":30
},
"fs":{
"total_in_bytes":8318783488,
"free_in_bytes":6447439872,
"available_in_bytes":6344785920
},
"plugins":[
{
"name":"cloud-aws",
"version":"2.3.3",
"description":"The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
"jvm":true,
"classname":"org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
"isolated":true,
"site":false
},
{
"name":"kopf",
"version":"2.0.1",
"description":"kopf - simple web administration tool for Elasticsearch",
"url":"/_plugin/kopf/",
"jvm":false,
"site":true
},
{
"name":"tr-metrics",
"version":"7bd5b4b",
"description":"Logs cluster and node stats for performance monitoring.",
"jvm":true,
"classname":"com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
"isolated":true,
"site":false
}
]
}
}
`
var clusterstatsIndicesExpected = map[string]interface{}{
"completion_size_in_bytes": float64(0),
"count": float64(1),
"docs_count": float64(4),
"docs_deleted": float64(0),
"fielddata_evictions": float64(0),
"fielddata_memory_size_in_bytes": float64(0),
"percolate_current": float64(0),
"percolate_memory_size_in_bytes": float64(-1),
"percolate_queries": float64(0),
"percolate_time_in_millis": float64(0),
"percolate_total": float64(0),
"percolate_memory_size": "-1b",
"query_cache_cache_count": float64(0),
"query_cache_cache_size": float64(0),
"query_cache_evictions": float64(0),
"query_cache_hit_count": float64(0),
"query_cache_memory_size_in_bytes": float64(0),
"query_cache_miss_count": float64(0),
"query_cache_total_count": float64(0),
"segments_count": float64(4),
"segments_doc_values_memory_in_bytes": float64(368),
"segments_fixed_bit_set_memory_in_bytes": float64(0),
"segments_index_writer_max_memory_in_bytes": float64(2.048e+06),
"segments_index_writer_memory_in_bytes": float64(0),
"segments_memory_in_bytes": float64(11828),
"segments_norms_memory_in_bytes": float64(1280),
"segments_stored_fields_memory_in_bytes": float64(1248),
"segments_term_vectors_memory_in_bytes": float64(0),
"segments_terms_memory_in_bytes": float64(8932),
"segments_version_map_memory_in_bytes": float64(0),
"shards_index_primaries_avg": float64(4),
"shards_index_primaries_max": float64(4),
"shards_index_primaries_min": float64(4),
"shards_index_replication_avg": float64(0),
"shards_index_replication_max": float64(0),
"shards_index_replication_min": float64(0),
"shards_index_shards_avg": float64(4),
"shards_index_shards_max": float64(4),
"shards_index_shards_min": float64(4),
"shards_primaries": float64(4),
"shards_replication": float64(0),
"shards_total": float64(4),
"store_size_in_bytes": float64(17084),
"store_throttle_time_in_millis": float64(0),
}
var clusterstatsNodesExpected = map[string]interface{}{
"count_client": float64(0),
"count_data_only": float64(0),
"count_master_data": float64(1),
"count_master_only": float64(0),
"count_total": float64(1),
"fs_available_in_bytes": float64(6.34478592e+09),
"fs_free_in_bytes": float64(6.447439872e+09),
"fs_total_in_bytes": float64(8.318783488e+09),
"jvm_max_uptime_in_millis": float64(1.1580527e+07),
"jvm_mem_heap_max_in_bytes": float64(1.065025536e+09),
"jvm_mem_heap_used_in_bytes": float64(7.0550288e+07),
"jvm_threads": float64(30),
"jvm_versions_0_count": float64(1),
"jvm_versions_0_version": "1.8.0_101",
"jvm_versions_0_vm_name": "OpenJDK 64-Bit Server VM",
"jvm_versions_0_vm_vendor": "Oracle Corporation",
"jvm_versions_0_vm_version": "25.101-b13",
"os_allocated_processors": float64(1),
"os_available_processors": float64(1),
"os_mem_total_in_bytes": float64(5.93301504e+08),
"os_names_0_count": float64(1),
"os_names_0_name": "Linux",
"process_cpu_percent": float64(0),
"process_open_file_descriptors_avg": float64(145),
"process_open_file_descriptors_max": float64(145),
"process_open_file_descriptors_min": float64(145),
"versions_0_version": "2.3.3",
"plugins_0_classname": "org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
"plugins_0_description": "The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
"plugins_0_isolated": true,
"plugins_0_jvm": true,
"plugins_0_name": "cloud-aws",
"plugins_0_site": false,
"plugins_0_version": "2.3.3",
"plugins_1_description": "kopf - simple web administration tool for Elasticsearch",
"plugins_1_jvm": false,
"plugins_1_name": "kopf",
"plugins_1_site": true,
"plugins_1_url": "/_plugin/kopf/",
"plugins_1_version": "2.0.1",
"plugins_2_classname": "com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
"plugins_2_description": "Logs cluster and node stats for performance monitoring.",
"plugins_2_isolated": true,
"plugins_2_jvm": true,
"plugins_2_name": "tr-metrics",
"plugins_2_site": false,
"plugins_2_version": "7bd5b4b",
}
const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com "
const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com "

View File

@@ -11,9 +11,9 @@ import (
"syscall"
"time"
"github.com/gonuts/go-shellquote"
"github.com/kballard/go-shellquote"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -61,12 +61,12 @@ func NewExec() *Exec {
}
type Runner interface {
Run(*Exec, string, telegraf.Accumulator) ([]byte, error)
Run(*Exec, string, plugins.Accumulator) ([]byte, error)
}
type CommandRunner struct{}
func AddNagiosState(exitCode error, acc telegraf.Accumulator) error {
func AddNagiosState(exitCode error, acc plugins.Accumulator) error {
nagiosState := 0
if exitCode != nil {
exiterr, ok := exitCode.(*exec.ExitError)
@@ -89,7 +89,7 @@ func AddNagiosState(exitCode error, acc telegraf.Accumulator) error {
func (c CommandRunner) Run(
e *Exec,
command string,
acc telegraf.Accumulator,
acc plugins.Accumulator,
) ([]byte, error) {
split_cmd, err := shellquote.Split(command)
if err != nil || len(split_cmd) == 0 {
@@ -145,7 +145,7 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer {
}
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) {
func (e *Exec) ProcessCommand(command string, acc plugins.Accumulator, wg *sync.WaitGroup) {
defer wg.Done()
out, err := e.runner.Run(e, command, acc)
@@ -176,7 +176,7 @@ func (e *Exec) SetParser(parser parsers.Parser) {
e.parser = parser
}
func (e *Exec) Gather(acc telegraf.Accumulator) error {
func (e *Exec) Gather(acc plugins.Accumulator) error {
var wg sync.WaitGroup
// Legacy single command support
if e.Command != "" {
@@ -226,7 +226,7 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("exec", func() telegraf.Input {
inputs.Add("exec", func() plugins.Input {
return NewExec()
})
}

View File

@@ -6,7 +6,7 @@ import (
"runtime"
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/testutil"
@@ -36,7 +36,7 @@ const malformedJson = `
"status": "green",
`
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1"
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n"
const lineProtocolMulti = `
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
@@ -83,7 +83,7 @@ func newRunnerMock(out []byte, err error) Runner {
}
}
func (r runnerMock) Run(e *Exec, command string, acc telegraf.Accumulator) ([]byte, error) {
func (r runnerMock) Run(e *Exec, command string, acc plugins.Accumulator) ([]byte, error) {
if r.err != nil {
return nil, r.err
}

View File

@@ -4,9 +4,10 @@ import (
"crypto/md5"
"fmt"
"io"
"log"
"os"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal/globpath"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -46,7 +47,7 @@ func (_ *FileStat) Description() string {
func (_ *FileStat) SampleConfig() string { return sampleConfig }
func (f *FileStat) Gather(acc telegraf.Accumulator) error {
func (f *FileStat) Gather(acc plugins.Accumulator) error {
var errS string
var err error
@@ -78,8 +79,14 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
"file": fileName,
}
fields := map[string]interface{}{
"exists": int64(1),
"size_bytes": fileInfo.Size(),
"exists": int64(1),
}
if fileInfo == nil {
log.Printf("E! Unable to get info for file [%s], possible permissions issue",
fileName)
} else {
fields["size_bytes"] = fileInfo.Size()
}
if f.Md5 {
@@ -119,7 +126,7 @@ func getMd5(file string) (string, error) {
}
func init() {
inputs.Add("filestat", func() telegraf.Input {
inputs.Add("filestat", func() plugins.Input {
return NewFileStat()
})
}

View File

@@ -14,7 +14,7 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -129,7 +129,7 @@ func (h *GrayLog) Description() string {
}
// Gathers data for all servers.
func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
func (h *GrayLog) Gather(acc plugins.Accumulator) error {
var wg sync.WaitGroup
if h.client.HTTPClient() == nil {
@@ -178,14 +178,14 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
// Gathers data from a particular server
// Parameters:
// acc : The telegraf Accumulator to use
// acc : The plugins.Accumulator to use
// serverURL: endpoint to send request to
// service : the service being queried
//
// Returns:
// error: Any error that may have occurred
func (h *GrayLog) gatherServer(
acc telegraf.Accumulator,
acc plugins.Accumulator,
serverURL string,
) error {
resp, _, err := h.sendRequest(serverURL)
@@ -304,7 +304,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) {
}
func init() {
inputs.Add("graylog", func() telegraf.Input {
inputs.Add("graylog", func() plugins.Input {
return &GrayLog{
client: &RealHTTPClient{},
}

View File

@@ -13,7 +13,7 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -115,7 +115,7 @@ func (r *haproxy) Description() string {
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *haproxy) Gather(acc telegraf.Accumulator) error {
func (g *haproxy) Gather(acc plugins.Accumulator) error {
if len(g.Servers) == 0 {
return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc)
}
@@ -160,7 +160,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
return errChan.Error()
}
func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
func (g *haproxy) gatherServerSocket(addr string, acc plugins.Accumulator) error {
socketPath := getSocketAddr(addr)
c, err := net.Dial("unix", socketPath)
@@ -178,7 +178,7 @@ func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro
return importCsvResult(c, acc, socketPath)
}
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error {
if !strings.HasPrefix(addr, "http") {
return g.gatherServerSocket(addr, acc)
}
@@ -229,7 +229,7 @@ func getSocketAddr(sock string) string {
}
}
func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error {
csv := csv.NewReader(r)
result, err := csv.ReadAll()
now := time.Now()
@@ -263,6 +263,11 @@ func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
if err == nil {
fields["smax"] = ival
}
case HF_SLIM:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
fields["slim"] = ival
}
case HF_STOT:
ival, err := strconv.ParseUint(v, 10, 64)
if err == nil {
@@ -431,7 +436,7 @@ func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
}
func init() {
inputs.Add("haproxy", func() telegraf.Input {
inputs.Add("haproxy", func() plugins.Input {
return &haproxy{}
})
}

View File

@@ -198,6 +198,7 @@ func HaproxyGetFieldValues() map[string]interface{} {
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"slim": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
@@ -223,6 +224,6 @@ be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,
be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38,
be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17,
be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381,
be_app,host0,0,0,1,32,,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
be_app,host4,0,0,2,29,,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
be_app,host0,0,0,1,32,32,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
be_app,host4,0,0,2,29,32,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
`

View File

@@ -8,7 +8,7 @@ Hddtemp should be installed and its daemon running
## Configuration
```
```toml
[[inputs.hddtemp]]
## By default, telegraf gathers temps data from all disks detected by the
## hddtemp.
@@ -20,3 +20,24 @@ Hddtemp should be installed and its daemon running
# address = "127.0.0.1:7634"
# devices = ["sda", "*"]
```
## Measurements
- hddtemp
- temperature
Tags:
- device
- model
- unit
- status
## Example output
```
> hddtemp,unit=C,status=,host=server1,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000
> hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=38i 148165564700000000
> hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=36i 1481655647000000000
```

View File

@@ -8,7 +8,7 @@ import (
"strings"
)
type disk struct {
type Disk struct {
DeviceName string
Model string
Temperature int32
@@ -16,12 +16,19 @@ type disk struct {
Status string
}
func Fetch(address string) ([]disk, error) {
type hddtemp struct {
}
func New() *hddtemp {
return &hddtemp{}
}
func (h *hddtemp) Fetch(address string) ([]Disk, error) {
var (
err error
conn net.Conn
buffer bytes.Buffer
disks []disk
disks []Disk
)
if conn, err = net.Dial("tcp", address); err != nil {
@@ -48,7 +55,7 @@ func Fetch(address string) ([]disk, error) {
status = temperatureField
}
disks = append(disks, disk{
disks = append(disks, Disk{
DeviceName: device,
Model: fields[offset+2],
Temperature: int32(temperature),

View File

@@ -10,13 +10,13 @@ func TestFetch(t *testing.T) {
l := serve(t, []byte("|/dev/sda|foobar|36|C|"))
defer l.Close()
disks, err := Fetch(l.Addr().String())
disks, err := New().Fetch(l.Addr().String())
if err != nil {
t.Error("expecting err to be nil")
}
expected := []disk{
expected := []Disk{
{
DeviceName: "sda",
Model: "foobar",
@@ -31,7 +31,7 @@ func TestFetch(t *testing.T) {
}
func TestFetchWrongAddress(t *testing.T) {
_, err := Fetch("127.0.0.1:1")
_, err := New().Fetch("127.0.0.1:1")
if err == nil {
t.Error("expecting err to be non-nil")
@@ -42,13 +42,13 @@ func TestFetchStatus(t *testing.T) {
l := serve(t, []byte("|/dev/sda|foobar|SLP|C|"))
defer l.Close()
disks, err := Fetch(l.Addr().String())
disks, err := New().Fetch(l.Addr().String())
if err != nil {
t.Error("expecting err to be nil")
}
expected := []disk{
expected := []Disk{
{
DeviceName: "sda",
Model: "foobar",
@@ -67,13 +67,13 @@ func TestFetchTwoDisks(t *testing.T) {
l := serve(t, []byte("|/dev/hda|ST380011A|46|C||/dev/hdd|ST340016A|SLP|*|"))
defer l.Close()
disks, err := Fetch(l.Addr().String())
disks, err := New().Fetch(l.Addr().String())
if err != nil {
t.Error("expecting err to be nil")
}
expected := []disk{
expected := []Disk{
{
DeviceName: "hda",
Model: "ST380011A",

View File

@@ -3,7 +3,7 @@
package hddtemp
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp"
)
@@ -13,6 +13,11 @@ const defaultAddress = "127.0.0.1:7634"
type HDDTemp struct {
Address string
Devices []string
fetcher Fetcher
}
type Fetcher interface {
Fetch(address string) ([]gohddtemp.Disk, error)
}
func (_ *HDDTemp) Description() string {
@@ -35,8 +40,11 @@ func (_ *HDDTemp) SampleConfig() string {
return hddtempSampleConfig
}
func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
disks, err := gohddtemp.Fetch(h.Address)
func (h *HDDTemp) Gather(acc plugins.Accumulator) error {
if h.fetcher == nil {
h.fetcher = gohddtemp.New()
}
disks, err := h.fetcher.Fetch(h.Address)
if err != nil {
return err
@@ -53,7 +61,7 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
}
fields := map[string]interface{}{
disk.DeviceName: disk.Temperature,
"temperature": disk.Temperature,
}
acc.AddFields("hddtemp", fields, tags)
@@ -65,7 +73,7 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("hddtemp", func() telegraf.Input {
inputs.Add("hddtemp", func() plugins.Input {
return &HDDTemp{
Address: defaultAddress,
Devices: []string{"*"},

View File

@@ -0,0 +1,80 @@
package hddtemp
import (
"testing"
hddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type mockFetcher struct {
}
func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) {
return []hddtemp.Disk{
hddtemp.Disk{
DeviceName: "Disk1",
Model: "Model1",
Temperature: 13,
Unit: "C",
},
hddtemp.Disk{
DeviceName: "Disk2",
Model: "Model2",
Temperature: 14,
Unit: "C",
},
}, nil
}
func newMockFetcher() *mockFetcher {
return &mockFetcher{}
}
func TestFetch(t *testing.T) {
hddtemp := &HDDTemp{
fetcher: newMockFetcher(),
Devices: []string{"*"},
}
acc := &testutil.Accumulator{}
err := hddtemp.Gather(acc)
require.NoError(t, err)
assert.Equal(t, acc.NFields(), 2)
var tests = []struct {
fields map[string]interface{}
tags map[string]string
}{
{
map[string]interface{}{
"temperature": int32(13),
},
map[string]string{
"device": "Disk1",
"model": "Model1",
"unit": "C",
"status": "",
},
},
{
map[string]interface{}{
"temperature": int32(14),
},
map[string]string{
"device": "Disk2",
"model": "Model2",
"unit": "C",
"status": "",
},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, "hddtemp", test.fields, test.tags)
}
}

View File

@@ -10,10 +10,11 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers/influx"
"github.com/influxdata/telegraf/selfstat"
)
const (
@@ -41,8 +42,20 @@ type HTTPListener struct {
listener net.Listener
parser influx.InfluxParser
acc telegraf.Accumulator
acc plugins.Accumulator
pool *pool
BytesRecv selfstat.Stat
RequestsServed selfstat.Stat
WritesServed selfstat.Stat
QueriesServed selfstat.Stat
PingsServed selfstat.Stat
RequestsRecv selfstat.Stat
WritesRecv selfstat.Stat
QueriesRecv selfstat.Stat
PingsRecv selfstat.Stat
NotFoundsServed selfstat.Stat
BuffersCreated selfstat.Stat
}
const sampleConfig = `
@@ -71,16 +84,31 @@ func (h *HTTPListener) Description() string {
return "Influx HTTP write listener"
}
func (h *HTTPListener) Gather(_ telegraf.Accumulator) error {
log.Printf("D! The http_listener has created %d buffers", h.pool.ncreated())
func (h *HTTPListener) Gather(_ plugins.Accumulator) error {
h.BuffersCreated.Set(h.pool.ncreated())
return nil
}
// Start starts the http listener service.
func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
func (h *HTTPListener) Start(acc plugins.Accumulator) error {
h.mu.Lock()
defer h.mu.Unlock()
tags := map[string]string{
"address": h.ServiceAddress,
}
h.BytesRecv = selfstat.Register("http_listener", "bytes_received", tags)
h.RequestsServed = selfstat.Register("http_listener", "requests_served", tags)
h.WritesServed = selfstat.Register("http_listener", "writes_served", tags)
h.QueriesServed = selfstat.Register("http_listener", "queries_served", tags)
h.PingsServed = selfstat.Register("http_listener", "pings_served", tags)
h.RequestsRecv = selfstat.Register("http_listener", "requests_received", tags)
h.WritesRecv = selfstat.Register("http_listener", "writes_received", tags)
h.QueriesRecv = selfstat.Register("http_listener", "queries_received", tags)
h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags)
h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags)
h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags)
if h.MaxBodySize == 0 {
h.MaxBodySize = DEFAULT_MAX_BODY_SIZE
}
@@ -141,10 +169,16 @@ func (h *HTTPListener) httpListen() error {
}
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
h.RequestsRecv.Incr(1)
defer h.RequestsServed.Incr(1)
switch req.URL.Path {
case "/write":
h.WritesRecv.Incr(1)
defer h.WritesServed.Incr(1)
h.serveWrite(res, req)
case "/query":
h.QueriesRecv.Incr(1)
defer h.QueriesServed.Incr(1)
// Deliver a dummy response to the query endpoint, as some InfluxDB
// clients test endpoint availability with a query
res.Header().Set("Content-Type", "application/json")
@@ -152,9 +186,12 @@ func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
res.WriteHeader(http.StatusOK)
res.Write([]byte("{\"results\":[]}"))
case "/ping":
h.PingsRecv.Incr(1)
defer h.PingsServed.Incr(1)
// respond to ping requests
res.WriteHeader(http.StatusNoContent)
default:
defer h.NotFoundsServed.Incr(1)
// Don't know how to respond to calls to other endpoints
http.NotFound(res, req)
}
@@ -195,6 +232,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
badRequest(res)
return
}
h.BytesRecv.Incr(int64(n))
if err == io.EOF {
if return400 {
@@ -248,7 +286,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
bufStart = 0
continue
}
if err := h.parse(buf[:i], now); err != nil {
if err := h.parse(buf[:i+1], now); err != nil {
log.Println("E! " + err.Error())
return400 = true
}
@@ -286,7 +324,7 @@ func badRequest(res http.ResponseWriter) {
}
func init() {
inputs.Add("http_listener", func() telegraf.Input {
inputs.Add("http_listener", func() plugins.Input {
return &HTTPListener{
ServiceAddress: ":8186",
}

View File

@@ -8,7 +8,7 @@ import (
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -141,7 +141,7 @@ func (h *HTTPResponse) HTTPGather() (map[string]interface{}, error) {
}
// Gather gets all metric fields and tags and returns any errors it encounters
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
func (h *HTTPResponse) Gather(acc plugins.Accumulator) error {
// Set default values
if h.ResponseTimeout.Duration < time.Second {
h.ResponseTimeout.Duration = time.Second * 5
@@ -174,7 +174,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
}
func init() {
inputs.Add("http_response", func() telegraf.Input {
inputs.Add("http_response", func() plugins.Input {
return &HTTPResponse{}
})
}

View File

@@ -37,6 +37,8 @@ You can also specify which keys from server response should be considered tags:
]
```
If the JSON response is an array of objects, then each object will be parsed with the same configuration.
You can also specify additional request parameters for the service:
```
@@ -150,3 +152,53 @@ httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5
httpjson_mycollector2_load,server='http://service.net/json/stats' value=100
httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335
```
# Example 3, Multiple Metrics in Response:
The response JSON can be treated as an array of data points that are all parsed with the same configuration.
```
[[inputs.httpjson]]
name = "mycollector"
servers = [
"http://my.service.com/_stats"
]
# HTTP method to use (case-sensitive)
method = "GET"
tag_keys = ["service"]
```
which responds with the following JSON:
```json
[
{
"service": "service01",
"a": 0.5,
"b": {
"c": "some text",
"d": 0.1,
"e": 5
}
},
{
"service": "service02",
"a": 0.6,
"b": {
"c": "some text",
"d": 0.2,
"e": 6
}
}
]
```
The collected metrics will be:
```
httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
httpjson_mycollector_a,service='service02',server='http://my.service.com/_stats' value=0.6
httpjson_mycollector_b_d,service='service02',server='http://my.service.com/_stats' value=0.2
httpjson_mycollector_b_e,service='service02',server='http://my.service.com/_stats' value=6
```

View File

@@ -10,7 +10,7 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
@@ -120,7 +120,7 @@ func (h *HttpJson) Description() string {
}
// Gathers data for all servers.
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
func (h *HttpJson) Gather(acc plugins.Accumulator) error {
var wg sync.WaitGroup
if h.client.HTTPClient() == nil {
@@ -169,14 +169,14 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
// Gathers data from a particular server
// Parameters:
// acc : The telegraf Accumulator to use
// acc : The plugins.Accumulator to use
// serverURL: endpoint to send request to
// service : the service being queried
//
// Returns:
// error: Any error that may have occurred
func (h *HttpJson) gatherServer(
acc telegraf.Accumulator,
acc plugins.Accumulator,
serverURL string,
) error {
resp, responseTime, err := h.sendRequest(serverURL)
@@ -292,7 +292,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
}
func init() {
inputs.Add("httpjson", func() telegraf.Input {
inputs.Add("httpjson", func() plugins.Input {
return &HttpJson{
client: &RealHTTPClient{},
ResponseTimeout: internal.Duration{

View File

@@ -511,3 +511,52 @@ func TestHttpJson200Tags(t *testing.T) {
}
}
}
const validJSONArrayTags = `
[
{
"value": 15,
"role": "master",
"build": "123"
},
{
"value": 17,
"role": "slave",
"build": "456"
}
]`
// Test that array data is collected correctly
func TestHttpJsonArray200Tags(t *testing.T) {
httpjson := genMockHttpJson(validJSONArrayTags, 200)
for _, service := range httpjson {
if service.Name == "other_webapp" {
var acc testutil.Accumulator
err := service.Gather(&acc)
// Set responsetime
for _, p := range acc.Metrics {
p.Fields["response_time"] = 1.0
}
require.NoError(t, err)
assert.Equal(t, 8, acc.NFields())
assert.Equal(t, uint64(4), acc.NMetrics())
for _, m := range acc.Metrics {
if m.Tags["role"] == "master" {
assert.Equal(t, "123", m.Tags["build"])
assert.Equal(t, float64(15), m.Fields["value"])
assert.Equal(t, float64(1), m.Fields["response_time"])
assert.Equal(t, "httpjson_"+service.Name, m.Measurement)
} else if m.Tags["role"] == "slave" {
assert.Equal(t, "456", m.Tags["build"])
assert.Equal(t, float64(17), m.Fields["value"])
assert.Equal(t, float64(1), m.Fields["response_time"])
assert.Equal(t, "httpjson_"+service.Name, m.Measurement)
} else {
assert.FailNow(t, "unknown metric")
}
}
}
}
}

View File

@@ -9,7 +9,7 @@ import (
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -43,7 +43,7 @@ func (*InfluxDB) SampleConfig() string {
`
}
func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
func (i *InfluxDB) Gather(acc plugins.Accumulator) error {
if len(i.URLs) == 0 {
i.URLs = []string{"http://localhost:8086/debug/vars"}
}
@@ -94,43 +94,44 @@ type point struct {
}
type memstats struct {
Alloc int64 `json:"Alloc"`
TotalAlloc int64 `json:"TotalAlloc"`
Sys int64 `json:"Sys"`
Lookups int64 `json:"Lookups"`
Mallocs int64 `json:"Mallocs"`
Frees int64 `json:"Frees"`
HeapAlloc int64 `json:"HeapAlloc"`
HeapSys int64 `json:"HeapSys"`
HeapIdle int64 `json:"HeapIdle"`
HeapInuse int64 `json:"HeapInuse"`
HeapReleased int64 `json:"HeapReleased"`
HeapObjects int64 `json:"HeapObjects"`
StackInuse int64 `json:"StackInuse"`
StackSys int64 `json:"StackSys"`
MSpanInuse int64 `json:"MSpanInuse"`
MSpanSys int64 `json:"MSpanSys"`
MCacheInuse int64 `json:"MCacheInuse"`
MCacheSys int64 `json:"MCacheSys"`
BuckHashSys int64 `json:"BuckHashSys"`
GCSys int64 `json:"GCSys"`
OtherSys int64 `json:"OtherSys"`
NextGC int64 `json:"NextGC"`
LastGC int64 `json:"LastGC"`
PauseTotalNs int64 `json:"PauseTotalNs"`
NumGC int64 `json:"NumGC"`
GCCPUFraction float64 `json:"GCCPUFraction"`
Alloc int64 `json:"Alloc"`
TotalAlloc int64 `json:"TotalAlloc"`
Sys int64 `json:"Sys"`
Lookups int64 `json:"Lookups"`
Mallocs int64 `json:"Mallocs"`
Frees int64 `json:"Frees"`
HeapAlloc int64 `json:"HeapAlloc"`
HeapSys int64 `json:"HeapSys"`
HeapIdle int64 `json:"HeapIdle"`
HeapInuse int64 `json:"HeapInuse"`
HeapReleased int64 `json:"HeapReleased"`
HeapObjects int64 `json:"HeapObjects"`
StackInuse int64 `json:"StackInuse"`
StackSys int64 `json:"StackSys"`
MSpanInuse int64 `json:"MSpanInuse"`
MSpanSys int64 `json:"MSpanSys"`
MCacheInuse int64 `json:"MCacheInuse"`
MCacheSys int64 `json:"MCacheSys"`
BuckHashSys int64 `json:"BuckHashSys"`
GCSys int64 `json:"GCSys"`
OtherSys int64 `json:"OtherSys"`
NextGC int64 `json:"NextGC"`
LastGC int64 `json:"LastGC"`
PauseTotalNs int64 `json:"PauseTotalNs"`
PauseNs [256]int64 `json:"PauseNs"`
NumGC int64 `json:"NumGC"`
GCCPUFraction float64 `json:"GCCPUFraction"`
}
// Gathers data from a particular URL
// Parameters:
// acc : The telegraf Accumulator to use
// acc : The plugins.Accumulator to use
// url : endpoint to send request to
//
// Returns:
// error: Any error that may have occurred
func (i *InfluxDB) gatherURL(
acc telegraf.Accumulator,
acc plugins.Accumulator,
url string,
) error {
shardCounter := 0
@@ -202,6 +203,7 @@ func (i *InfluxDB) gatherURL(
"next_gc": m.NextGC,
"last_gc": m.LastGC,
"pause_total_ns": m.PauseTotalNs,
"pause_ns": m.PauseNs[(m.NumGC+255)%256],
"num_gc": m.NumGC,
"gcc_pu_fraction": m.GCCPUFraction,
},
@@ -256,7 +258,7 @@ func (i *InfluxDB) gatherURL(
}
func init() {
inputs.Add("influxdb", func() telegraf.Input {
inputs.Add("influxdb", func() plugins.Input {
return &InfluxDB{
Timeout: internal.Duration{Duration: time.Second * 5},
}

View File

@@ -86,6 +86,7 @@ func TestInfluxDB(t *testing.T) {
"frees": int64(381008),
"heap_idle": int64(15802368),
"pause_total_ns": int64(5132914),
"pause_ns": int64(127053),
"lookups": int64(77),
"heap_sys": int64(33849344),
"mcache_sys": int64(16384),

View File

@@ -0,0 +1,83 @@
# Internal Input Plugin
The `internal` plugin collects metrics about the telegraf agent itself.
Note that some metrics are aggregates across all instances of one type of
plugin.
### Configuration:
```toml
# Collect statistics about itself
[[inputs.internal]]
## If true, collect telegraf memory stats.
# collect_memstats = true
```
### Measurements & Fields:
memstats are taken from the Go runtime: https://golang.org/pkg/runtime/#MemStats
- internal\_memstats
- alloc\_bytes
- frees
- heap\_alloc\_bytes
- heap\_idle\_bytes
- heap\_in\_use\_bytes
- heap\_objects\_bytes
- heap\_released\_bytes
- heap\_sys\_bytes
- mallocs
- num\_gc
- pointer\_lookups
- sys\_bytes
- total\_alloc\_bytes
agent stats collect aggregate stats on all telegraf plugins.
- internal\_agent
- gather\_errors
- metrics\_dropped
- metrics\_gathered
- metrics\_written
internal\_gather stats collect aggregate stats on all input plugins
that are of the same input type. They are tagged with `input=<plugin_name>`.
- internal\_gather
- gather\_time\_ns
- metrics\_gathered
internal\_write stats collect aggregate stats on all output plugins
that are of the same input type. They are tagged with `output=<plugin_name>`.
- internal\_write
- buffer\_limit
- buffer\_size
- metrics\_written
- metrics\_filtered
- write\_time\_ns
internal\_\<plugin\_name\> are metrics which are defined on a per-plugin basis, and
usually contain tags which differentiate each instance of a particular type of
plugin.
- internal\_\<plugin\_name\>
- individual plugin-specific fields, such as requests counts.
### Tags:
All measurements for specific plugins are tagged with information relevant
to each particular plugin.
### Example Output:
```
internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000
internal_agent,host=tyrion metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000
internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i,metrics_written=18i,buffer_size=0i 1480682800000000000
internal_gather,input=internal,host=tyrion metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000
internal_gather,input=http_listener,host=tyrion metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000
internal_http_listener,address=:8186,host=tyrion queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000
```

View File

@@ -0,0 +1,66 @@
package internal
import (
"runtime"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/selfstat"
)
type Self struct {
CollectMemstats bool
}
func NewSelf() plugins.Input {
return &Self{
CollectMemstats: true,
}
}
var sampleConfig = `
## If true, collect telegraf memory stats.
# collect_memstats = true
`
func (s *Self) Description() string {
return "Collect statistics about itself"
}
func (s *Self) SampleConfig() string {
return sampleConfig
}
func (s *Self) Gather(acc plugins.Accumulator) error {
if s.CollectMemstats {
m := &runtime.MemStats{}
runtime.ReadMemStats(m)
fields := map[string]interface{}{
"alloc_bytes": m.Alloc, // bytes allocated and not yet freed
"total_alloc_bytes": m.TotalAlloc, // bytes allocated (even if freed)
"sys_bytes": m.Sys, // bytes obtained from system (sum of XxxSys below)
"pointer_lookups": m.Lookups, // number of pointer lookups
"mallocs": m.Mallocs, // number of mallocs
"frees": m.Frees, // number of frees
// Main allocation heap statistics.
"heap_alloc_bytes": m.HeapAlloc, // bytes allocated and not yet freed (same as Alloc above)
"heap_sys_bytes": m.HeapSys, // bytes obtained from system
"heap_idle_bytes": m.HeapIdle, // bytes in idle spans
"heap_in_use_bytes": m.HeapInuse, // bytes in non-idle span
"heap_released_bytes": m.HeapReleased, // bytes released to the OS
"heap_objects_bytes": m.HeapObjects, // total number of allocated objects
"num_gc": m.NumGC,
}
acc.AddFields("internal_memstats", fields, map[string]string{})
}
for _, m := range selfstat.Metrics() {
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
return nil
}
func init() {
inputs.Add("internal", NewSelf)
}

View File

@@ -0,0 +1,62 @@
package internal
import (
"testing"
"github.com/influxdata/telegraf/selfstat"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
func TestSelfPlugin(t *testing.T) {
s := NewSelf()
acc := &testutil.Accumulator{}
s.Gather(acc)
assert.True(t, acc.HasMeasurement("internal_memstats"))
// test that a registered stat is incremented
stat := selfstat.Register("mytest", "test", map[string]string{"test": "foo"})
stat.Incr(1)
stat.Incr(2)
s.Gather(acc)
acc.AssertContainsTaggedFields(t, "internal_mytest",
map[string]interface{}{
"test": int64(3),
},
map[string]string{
"test": "foo",
},
)
acc.ClearMetrics()
// test that a registered stat is set properly
stat.Set(101)
s.Gather(acc)
acc.AssertContainsTaggedFields(t, "internal_mytest",
map[string]interface{}{
"test": int64(101),
},
map[string]string{
"test": "foo",
},
)
acc.ClearMetrics()
// test that regular and timing stats can share the same measurement, and
// that timings are set properly.
timing := selfstat.RegisterTiming("mytest", "test_ns", map[string]string{"test": "foo"})
timing.Incr(100)
timing.Incr(200)
s.Gather(acc)
acc.AssertContainsTaggedFields(t, "internal_mytest",
map[string]interface{}{
"test": int64(101),
"test_ns": int64(150),
},
map[string]string{
"test": "foo",
},
)
}

View File

@@ -5,7 +5,7 @@ import (
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -37,7 +37,7 @@ func (m *Ipmi) Description() string {
return "Read metrics from one or many bare metal servers"
}
func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
func (m *Ipmi) Gather(acc plugins.Accumulator) error {
if m.runner == nil {
m.runner = CommandRunner{}
}
@@ -51,7 +51,7 @@ func (m *Ipmi) Gather(acc telegraf.Accumulator) error {
return nil
}
func (m *Ipmi) gatherServer(serv string, acc telegraf.Accumulator) error {
func (m *Ipmi) gatherServer(serv string, acc plugins.Accumulator) error {
conn := NewConnection(serv)
res, err := m.runner.Run(conn, "sdr")
@@ -123,7 +123,7 @@ func transform(s string) string {
}
func init() {
inputs.Add("ipmi_sensor", func() telegraf.Input {
inputs.Add("ipmi_sensor", func() plugins.Input {
return &Ipmi{}
})
}

View File

@@ -9,7 +9,7 @@ import (
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -42,7 +42,7 @@ func (ipt *Iptables) SampleConfig() string {
}
// Gather gathers iptables packets and bytes throughput from the configured tables and chains.
func (ipt *Iptables) Gather(acc telegraf.Accumulator) error {
func (ipt *Iptables) Gather(acc plugins.Accumulator) error {
if ipt.Table == "" || len(ipt.Chains) == 0 {
return nil
}
@@ -88,7 +88,7 @@ var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
var valuesRe = regexp.MustCompile(`^\s*([0-9]+)\s+([0-9]+)\s+.*?(/\*\s(.*)\s\*/)?$`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
func (ipt *Iptables) parseAndGather(data string, acc plugins.Accumulator) error {
lines := strings.Split(data, "\n")
if len(lines) < 3 {
return nil
@@ -120,7 +120,7 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
type chainLister func(table, chain string) (string, error)
func init() {
inputs.Add("iptables", func() telegraf.Input {
inputs.Add("iptables", func() plugins.Input {
ipt := new(Iptables)
ipt.lister = ipt.chainList
return ipt

Some files were not shown because too many files have changed in this diff Show More