Compare commits

...

153 Commits

Author SHA1 Message Date
Cameron Sparr
94de9dca1f Fix single quote parsing of TOML durations
closes #2023
2016-11-14 12:42:14 +00:00
John Engelman
8ecfe13bf8 Update docs on Cloudwatch. Set default period to 5m. (#2000) 2016-11-07 12:14:55 +00:00
John Engelman
a90a687d89 Fix up AWS plugin docs so they don't use single quotes. (#1991)
Also don't use named returns in fetchNamespaceMetrics since it's
non-standard for the rest of the codebase.
2016-11-04 13:19:41 +00:00
Cameron Sparr
5ef6fe1d85 Update etc/telegraf.conf 2016-11-04 13:19:32 +00:00
Cameron Sparr
f9aef06a3c CircleCI script, do not explicitly set version tag 2016-11-03 17:07:25 +00:00
Cameron Sparr
105bb65f73 Add release 1.2 section to changelog 2016-11-03 17:01:53 +00:00
Cameron Sparr
16081b2d1a Update etc/telegraf.conf 2016-11-03 14:31:55 +00:00
Matteo Cerutti
e43cfc2fce fix leap_status value in chrony input plugin (#1983) 2016-11-03 10:46:54 +00:00
Prunar
137272afea Update README.md (#1963)
Typo
2016-11-02 14:25:09 +00:00
Cameron Sparr
2150510bd4 nats_consumer: buffer incoming messages
fixes #1956
2016-10-27 13:39:27 +01:00
albundy83
fc59757a1a Just fix typo (#1962) 2016-10-27 11:45:17 +01:00
Cameron Sparr
0cfa0d419a udp_listener & tcp_listener set default values
closes #1936
2016-10-27 10:25:24 +01:00
Paulo Pires
522658bd07 Fix NATS plug-ins reconnection logic (#1955)
* NATS output plug-in now retries to reconnect forever after a lost connection.

* NATS input plug-in now retries to reconnect forever after a lost connection.

* Fixes #1953
2016-10-26 15:45:33 +01:00
Jonathan Chauncey
b1a97e35b9 fix(kubernetes): Only initialize RoundTripper once (#1951)
fixes #1933
2016-10-26 13:47:35 +01:00
Cameron Sparr
c66363cba5 Update Go version: 1.7.1->1.7.3 2016-10-25 14:49:21 +01:00
Cameron Sparr
61269c3500 Update config generation docs
closes #1925
2016-10-25 14:46:50 +01:00
Priyank Trivedi
393d129982 Fix typo from 'Proctstas' to 'Procstat' in procstat plugin's README (#1945) 2016-10-25 13:57:55 +01:00
Cameron Sparr
80d4864844 Only install fpm,rpm,boto if we need them 2016-10-25 13:31:48 +01:00
Cameron Sparr
f729fa990d Unit testing for internal.Duration Unmarshal
closes #1926
2016-10-25 13:11:32 +01:00
Alex Zorin
662db7a944 Fix panic in internal.Duration UnmarshalTOML 2016-10-25 18:30:01 +11:00
Cameron Sparr
c849b58de9 http_listener input unit tests 2016-10-24 18:17:49 +01:00
Cameron Sparr
097b1e09db http listener refactor
in this commit:

- chunks out the http request body to avoid making very large
  allocations.
- establishes a limit for the maximum http request body size that the
  listener will accept.
- utilizes a pool of byte buffers to reduce GC pressure.
2016-10-24 18:17:49 +01:00
John Hu
babd37bf35 Typo (#1924) 2016-10-21 14:11:03 +01:00
David Norton
91f48e7ad5 Merge pull request #1847 from jchauncey/kubernetes-plugin
feat(kubernetes): Add kubernetes input plugin
2016-10-17 15:58:47 -04:00
Jonathan Chauncey
a12bd878e0 feat(kubernetes): Add kubernetes input plugin
closes #1774
2016-10-17 15:40:55 -04:00
Cameron Sparr
a4e8f24b16 Set reasonable defaults in ping plugin
closes #1742
2016-10-17 15:21:09 +01:00
Cameron Sparr
a65447d22e Use mysql.ParseDSN func instead of url.Parse
The MySQL DB driver has it's own DSN parsing function. Previously we
were using the url.Parse function, but this causes problems because a
valid MySQL DSN can be an invalid http URL, namely when using some
special characters in the password.

This change uses the MySQL DB driver's builtin ParseDSN function and
applies a timeout parameter natively via that.

Another benefit of this change is that we fail earlier if given an
invalid MySQL DSN.

closes #870
closes #1842
2016-10-12 17:10:28 +01:00
Cameron Sparr
b00ad65b08 Log config file parsing errors properly
closes #1344
2016-10-12 16:50:22 +01:00
Cameron Sparr
a84ce5d5cb drop metrics outside of the aggregators period 2016-10-12 14:56:03 +01:00
Cameron Sparr
8ca4a50c18 delete nil fields in the metric maker.
closes #1771
2016-10-12 14:50:19 +01:00
Cameron Sparr
03b2984ac2 Fixup some code based on feedback from @dgnorton 2016-10-12 14:50:19 +01:00
Cameron Sparr
9540a6532f Update influxdb dependency for new models.Tags 2016-10-12 14:50:19 +01:00
Cameron Sparr
cace663bbf Processor & Aggregator Contrib doc 2016-10-12 14:50:19 +01:00
Cameron Sparr
acfdd15aa9 Processor & Aggregator configuration doccing 2016-10-12 14:50:19 +01:00
Cameron Sparr
78f544c0aa Support --aggregator-filter & --processor-filter 2016-10-12 14:50:19 +01:00
Cameron Sparr
2175a72fcc Rebase fixup 2016-10-12 14:50:19 +01:00
Cameron Sparr
b03c1d9691 Support ordering of processor plugins 2016-10-12 14:50:19 +01:00
Cameron Sparr
fead80844e Refactor handling of MinMax functionality into RunningAggregator
allows for easier addition of a sliding window at a later time.

Also makes `period` be a generic argument for all aggregator plugins.
2016-10-12 14:50:19 +01:00
Cameron Sparr
ef885eda62 Change minmax aggregator to store float64 2016-10-12 14:50:19 +01:00
Cameron Sparr
64a71263a1 Support Processor & Aggregator Plugins
closes #1726
2016-10-12 14:50:19 +01:00
Cameron Sparr
974221f0cf Fix phpfpm fcgi client panic when URL doesnt exist
closes #1886
2016-10-12 11:58:38 +01:00
Ririsoft
bccef2856d Revert "Moving cgroup path name to field from tag to reduce cardinality (#1457)"
This was introducing a regression with influxdb output, leading to
collision an points missing.
This reverts commit 53f40063b3.

closes #1724
closes #1796
2016-10-12 11:04:28 +01:00
Patrick Hemmer
80df3f7634 snmp: fix initialization of table fields in manual tables (#1836) 2016-10-12 11:00:39 +01:00
Cameron Sparr
e96f7a9b12 graphite parser, handle multiple templates empty filter
Previously, the graphite parser would simply overwrite any template that
had an identical filter to a previous template. This included the empty
filter.

Now we will still overwrite, but first we will sort to make sure that
the most "specific" template always matches.

closes #1731
2016-10-11 15:22:51 +01:00
Cameron Sparr
2bbb6aa6f2 Add doc for SNMP debug tips (#1831) 2016-10-11 14:48:08 +01:00
Cameron Sparr
1ff721ad84 Add riemann output plugin deprecation message 2016-10-11 12:28:20 +01:00
Eric
3e3b094270 Only log warning on type when in debug mode.
closes #1793
2016-10-11 11:35:43 +01:00
Eric
1f7a8fceef Fixed json serialization to make sure only value type supported by OpenTSDB are sent and made sure we send numbers un-quoted event though OpenTSDB API accepts them as this is not clean json. 2016-10-11 11:32:24 +01:00
Marko Crnic
b702a9758b haproxy/README: make quotes consistent
closes #1700
2016-10-11 11:30:22 +01:00
Marko Crnic
3b607aa8ae haproxy: add README covering basics of the plugin 2016-10-11 11:29:04 +01:00
Marko Crnic
4a4a6892f9 haproxy: update HAproxy docs URL 2016-10-11 11:29:04 +01:00
Marko Crnic
56b627dfe2 haproxy_test: extend tests to cover name globbing 2016-10-11 11:29:04 +01:00
Marko Crnic
5c87b92976 haproxy_test: define expected results in one place
Map holding expected results was defined in multiple places, making test
cases a bit hard to read. This way we can change our expectations of
good results in one place and have them affect multiple test cases.
2016-10-11 11:29:04 +01:00
Marko Crnic
dbcc312b0e haproxy: clarify handling of http and socket addresses
This behaviour was introduced along with socket support, but never got
documented properly.
2016-10-11 11:29:04 +01:00
Marko Crnic
2d842fefb8 haproxy: add support for socket name globbing 2016-10-11 11:29:04 +01:00
Marko Crnic
d63e3c8cc4 haproxy: move socket address detection to own function 2016-10-11 11:29:04 +01:00
Stian Øvrevåge
187a894fe9 Create CONFIG-EXAMPLES.md with a switch interface example
Added a standard example for collecting interface metrics from switches or routers and tagging them properly.

closes #1666
2016-10-11 11:00:25 +01:00
Cameron Sparr
ca55c4a55d Remove COMING SOON: multiple statsd fields 2016-10-11 10:57:34 +01:00
Cameron Sparr
d627bdbbdb logparser: allow numbers in ident & auth parameters
fixes #1810
2016-10-10 11:27:35 +01:00
Edie Zhang
4f06f6b3d8 adding the tags in the graylog output plugin
closes #1861
2016-10-07 12:24:21 +01:00
Cameron Sparr
7f0fe78615 Changelog update for systemd log change 2016-10-06 17:48:23 +01:00
Ririsoft
5913f7cb36 Log to systemd journal
Let's align to InfluxDB 1.0 logging policy and log to systemd journal by
default.

closes #1732
2016-10-06 17:48:22 +01:00
James Carr
8dc42ad9f2 Add idle_since to emitted metrics (#1844) 2016-10-06 14:26:53 +01:00
Cameron Sparr
886bdd2ef2 changelog update 2016-10-06 14:25:28 +01:00
Patrick Hemmer
5a86a2ff26 snmp: return error on unknown conversion type (#1853) 2016-10-06 14:23:51 +01:00
zensqlmonitor
817d696628 SQL Server plugin: Fix WaitStats issue (#1859)
Issue #1854
2016-10-06 14:21:14 +01:00
Cameron Sparr
4ab0344ebf Update changelog & readme for 1.0.1 2016-10-05 08:41:58 +01:00
Patrick Hemmer
7b05170145 update to latest gosnmp (#1850) 2016-10-05 08:40:56 +01:00
Patrick Hemmer
b48ad4b737 fix snmp emitting empty fields
closes #1848
closes #1835
2016-10-04 16:25:16 +01:00
Patrick Hemmer
9feb639bbd fix translating snmp fields not in MIB (#1846) 2016-10-04 16:22:15 +01:00
Cameron Sparr
ce5054c850 Changelog update 2016-10-03 18:20:10 +01:00
Cameron Sparr
c7834209d2 Major Logging Overhaul
in this commit:

- centralize logging output handler.
- set global Info/Debug/Error log levels based on config file or flags.
- remove per-plugin debug arg handling.
- add a I!, D!, or E! to every log message.
- add configuration option to specify where to send logs.

closes #1786
2016-10-03 17:13:03 +01:00
Cameron Sparr
78ced6bc30 Use a bufio.Scanner in http listener
this will prevent potential very large allocations due to a very large
chunk size send from a client.

fixes #1823
2016-09-29 16:07:51 +01:00
Cameron Sparr
ca8e512e5b Update changelog 2016-09-28 16:12:32 +01:00
zensqlmonitor
573628dbdd Fix collation issue 2016-09-28 16:11:00 +01:00
Peter Murray
e477620dc5 Making '-service' flags work from a non-interactive session, i.e. Ansible, related to #1760 2016-09-28 16:09:43 +01:00
Łukasz Harasimowicz
32268fb25b Disable mesos tasks statistics until we find a better way to deal with them.
Due to quite real problem of generating vast number of data series through
mesos tasks metrics this feature is disabled until better solution is found.
2016-09-28 16:07:35 +01:00
Łukasz Harasimowicz
80391bfe1f Fixed tags on mesos_task metrics.
Tagging values by executor_id can create quite a lot data series
in InfluxDB so we should stick to framework_id and server.
2016-09-28 16:07:35 +01:00
Cameron Sparr
e19845c202 Load config directory using filepath.Walk
closes #1137
2016-09-28 16:01:52 +01:00
Cameron Sparr
52134555d6 globpath: only walk tree if ** is defined
closes #1517
2016-09-28 15:44:29 +01:00
Cameron Sparr
e7e39df6a0 Default SNMP parameter changes
max-repetitions = 10 is the default of net-snmp utils according to
http://net-snmp.sourceforge.net/docs/man/snmpbulkwalk.html

retries = 3 is the default of gosnmp:
https://godoc.org/github.com/soniah/gosnmp#pkg-variables

Could deal with some parts of the performance issues reported
by #1665
2016-09-28 14:34:20 +01:00
Patrick Hemmer
055ef168ae add oid_index_suffix to snmp plugin 2016-09-27 11:30:25 +01:00
Patrick Hemmer
2778b7be30 add snmp conversions for MAC addresses & IPs 2016-09-27 11:30:25 +01:00
Patrick Hemmer
953db51b2c Adjust snmp translation to return conversion info.
Also consolidated the translation code to obtain all info with just 1 command execution.

Also split test command mocks out to their own file for cleanliness.
2016-09-27 11:30:25 +01:00
Cameron Sparr
c043461f6c Fix varnish plugin to use default values
closes #1752
2016-09-23 16:06:33 +01:00
Cameron Sparr
ddc07f9ef8 Fix powerdns integer parse error handling
closes #1751
2016-09-23 16:05:15 +01:00
lost_z
2cf1db0837 add mysql uptime (#1735) 2016-09-23 15:59:22 +01:00
Cameron Sparr
17e6496830 update changelog 2016-09-23 11:38:52 +01:00
Vinh Quốc Nguyễn
1d10eda84e Fix crash when allow pending messgae wasn't set (#1785)
The default is 0 so we hit a division by 0 error and crash. This checks
ensure we will not crash and `log` and continue to let telegraf run

Also we set default allow pending message number to 10000
2016-09-23 11:37:47 +01:00
Daniele Gozzi
9ea3dbeee8 Allow numeric and non-string values for tag_keys. (#1782)
* Allow numeric and non-string values for tag_keys.

According to the go documentation the JSON deserializer only produces these
base types in output:
- string
- bool
- float64
- nil
With this patch bool, float64 and nil values get converted to a string when
their field key is specified in tag_keys. Previously the field was simply
discarded.

* Updated handling of nil for passing tests.

The automated tests are less than trivial to reproduece locally for me,
so I hope CircleCI wonn't mind...

* Updated changelog entries with PR and issue links.
2016-09-21 18:07:35 +01:00
Rikaard Hosein
100501ba72 statsd input plugin correctly handles colons in data-dog tag values now (#1794)
* Code correctly handles colons in tag values now

* Modified existing datadog tag test to include a tag value containing a colon
2016-09-21 14:37:42 +01:00
Cameron Sparr
f12368698b Update etc/telegraf.conf
closes #1789
2016-09-21 11:53:06 +01:00
Ross McDonald
6b25a73629 Add container state metrics to docker plugin (#1791)
* Add container state metrics to docker plugin.

* Update changelog.
2016-09-21 10:37:49 +01:00
David Moravek
90c7475c68 Fix sysstat resource leak (#1792) 2016-09-21 10:19:59 +01:00
Cameron Sparr
6648c101dd Add configurable timeout to influxdb input
closes #1773
2016-09-16 16:50:39 +01:00
Cameron Sparr
8d3285522c Prometheus output: do not remake metrics map each write
closes #1775
2016-09-16 16:50:39 +01:00
David Norton
b613405f42 Merge pull request #1768 from influxdata/dgn-speedup-statsd-parser
speed up statsd parser
2016-09-15 10:46:56 -04:00
David Norton
e999298078 speed up statsd parser 2016-09-15 08:11:06 -04:00
David Norton
0f0ab953f6 Merge pull request #1766 from influxdata/dgn-statsd-parsing-benchmarks
add statsd parsing benchmarks
2016-09-15 07:10:18 -04:00
David Norton
aaddbd153e add statsd parsing benchmarks 2016-09-14 11:12:02 -04:00
Cameron Sparr
9b2e2cc41f kafka panic: Check that error is non-nil before
fixes #1764
2016-09-14 08:54:22 +01:00
Cameron Sparr
bc22309459 Add commit & branch to Makefile 2016-09-13 09:31:30 +01:00
Gunnar
b6f81b538a Add commit to Telegraf version string (#1756) 2016-09-13 08:41:02 +01:00
Cameron Sparr
c3aa43a6bd Fix prometheus output panic on reload
closes #1530
2016-09-12 10:46:37 +01:00
Rene Zbinden
b2ea39077e fix issue #1716 (#1749) 2016-09-12 10:30:35 +01:00
Cameron Sparr
811567a2f4 Update go version to 1.7, fix vet errors
closes #1728
2016-09-09 16:11:17 +01:00
Cameron Sparr
ca8fb440cc Fix statsd scientific notation parsing
closes #1733
2016-09-09 15:13:11 +01:00
Cameron Sparr
ac58a6bb3c Fix unmarshal of influxdb metrics will null tags
closes #1738
2016-09-09 14:49:21 +01:00
Sean Beckett
9757d39240 Update CHANGELOG.md 2016-09-08 09:11:24 -06:00
Cameron Sparr
5a9e7d77b8 Update readme & chglog for 1.0 2016-09-08 15:26:10 +01:00
Cameron Sparr
e963b7f01b alphabetize service inputs, add logparser 2016-09-07 15:55:21 +01:00
Nathan D Acuff
e7899d4dc5 Postgresql database blacklist configuration option (#1699)
* separate hello and authenticate functions, force connection close at end of write cycle so we don't hold open idle connections, which has the benefit of mostly removing the chance of getting hopelessly connection lost

* update changelog, though this will need to be updated again to merge into telegraf master

* bump instrumental agent version

* fix test to deal with better better connect/reconnect logic and changed ident & auth handshake

* Update CHANGELOG.md

correct URL from instrumental fork to origin and put the change in the correct part of the file

* go fmt

* Split out Instrumental tests for invalid metric and value.

* Ensure nothing remains on the wire after final test.

* Force valid metric names by replacing invalid parts with underscores.

* Multiple invalid characters being joined into a single udnerscore.

* Adjust comment to what happens.

* undo split hello and auth commands, to reduce roundtrips

* Add ignored_databases option to postgresql configuration files, to enable easy filtering of system databases without needing to whitelist all the databases on the server.  Add tests for database whitelist and blacklist.

* run go fmt on new postgresql database whitelist/blacklist code

* add postgresql database blacklist option to changelog

* remove a bad merge from the changelog
2016-09-07 09:39:55 +01:00
Cameron Sparr
301c79e57c Add a 404 and high-traffic test to http listener
also remove locking around adding metrics. Instead, keep a waitgroup on
the ServeHTTP function and wait for that to finish before returning from
the Stop() function

closes #1407
2016-09-06 17:21:01 +01:00
ncohensm
67c288abda initial http_listener implementation
fix incredibly stupid bugs

populate README

support query endpoint and change default listen port

set response headers for query endpoint

add unit tests

revert erroneous Godeps change

add plugin ref to top-level README

remove debug output and add empty post body test

fix linter errors

move stoppableListener into repo

use constants for http status codes

add CHANGELOG entry

address code review comments re. style/structure

address further code review comments

add note to README re. database creation calls per PR comments
2016-09-06 17:21:01 +01:00
Cameron Sparr
8dd2a8527a Refactor NATS ssl config 2016-09-06 13:52:29 +01:00
Cameron Sparr
2fe427b3b3 mongodb input: fix version 2.2 panic
closes #1628
2016-09-06 11:58:06 +01:00
Paulo Pires
6b1cc67664 Add NATS output plugin.
Added NATS server container needed for tests.

Added NATS output plug-in. Fixes #1487

NATS output plug-in use internal.GetTLSConfig to instrument TLS configuration.

Added NATS output plug-in to changelog.

closes #1487
closes #1697
2016-09-06 11:39:57 +01:00
Cameron Sparr
1271f9d71a jolokia input: add note about POST permissions
closes #1628
2016-09-06 11:11:27 +01:00
aaron jheng
49ea4e9f39 [Docker Plugin] add server hostname for each docker measurements (#1599)
* add server hostname for each docker measurements

* update CHANGELOG

* move feature to v1.1

* tweak docker_engine_host tag
2016-09-06 08:37:46 +01:00
Cameron Sparr
50ef3282b6 Refactor and code cleanup of filtering
started working on this with the idea of fixing #1623, although I
realized that this was actually just a documentation issue around
a toml eccentricity.

closes #1623
2016-09-05 16:30:18 +01:00
Phil
b63dedb74d sanitize parenthesis (#1701) 2016-09-05 14:30:40 +01:00
Denis Orlikhin
5628049440 Handle negative integers coming as unsigned integers from Aerospike (#1679)
* Handle negative integers coming as unsigned integers from Aerospike stats

* skip values with overflow

* aerospike stat values parsing tests

* better tests
2016-09-05 14:29:14 +01:00
Cameron Sparr
54c9ba7639 Update documentation for Gauge & Counters 2016-09-05 12:58:07 +01:00
Cameron Sparr
b18d375d6c Implement AddGauge & AddCounter functions
and utilize them in the in the 'system' input plugins.
2016-09-02 16:51:26 +01:00
Cameron Sparr
6dbbe65897 Remove Add() function from accumulator 2016-09-02 16:35:27 +01:00
Cameron Sparr
03d8abccdd Implement telegraf metric types
And use them in the prometheus output plugin.

Still need to test the prometheus output plugin.

Also need to actually create typed metrics in the system plugins.

closes #1683
2016-09-02 16:35:27 +01:00
David Caldwell
0f6d317a8e Fix MySQL plugin not sending 0 value fields (#1695)
closes #1695
2016-09-02 15:22:30 +01:00
Cameron Sparr
792682590c Remove snmp_legacy unit tests and docker image 2016-08-31 12:17:06 +01:00
François de Metz
2d3da343b3 Add basic filestack webhook.
closes #1542

Generalize event.

Add doc.

Update default config.

Add filestack to the list of plugins.

Check that video conversion event returns 400.

Update the readme.

Update the changelog.
2016-08-31 10:48:27 +01:00
Charles-Henri
094eda22c0 Add new iptables plugin
The iptables plugin aims at monitoring bytes and packet counters
matching a given set of iptables rules.

Typically the user would set a dedicated monitoring chain into a given
iptables table, and add the rules to monitor to this chain. The plugin
will allow to focus on the counters for this particular table/chain.

closes #1471
2016-08-31 10:42:44 +01:00
Butitsnotme
4886109d9c Added option to remove all CRs from input stream
Added the option removecr to inputs.exec to remove all carraige returns
(CR, ASCII 0x0D, Unicode codepoint \u0D, ^M). The option is boolean and
not enabled if not present in the config file.

closes #1606

Updated CHANGELOG.md with information about removecr

Ran go fmt ./...

Moved removal of CRs to internal/internal.go

Moved the code to remove carriage returns from
plugins/inputs/exec/exec.go to internal/internal.go. Additionally
changed the conditional on which it gets applied from using a
configuration file option to checking if it is running on Windows.

Moved Carriage Return check to correct place

Moved the carriage return removal back to the exec plugin. Added unit
testing for it. Fixed a bug (removing too many characters).

Ran go fmt ./...

Reverted CHANGELOG to master

Updated Changelog
2016-08-31 10:32:33 +01:00
Cameron Sparr
2dc47285bd Move CloudWatch rate limit to config (#1673)
* Move CloudWatch rate limit to config
Reference #1670

* make that variable a string

* ahem, apparently limiter wants an int

* add the ratelimit to the sample config

* update the test to include the rate

* set a default value of 10 for ratelimit

* Move default ratelimit to init
2016-08-31 10:29:24 +01:00
Eric
6e33a6d62f OpenTSDB HTTP output
closes #1539

First version of http put working

Refactored code to separate http handling from opentsdb module. Added batching support.

Fixed tag cleaning in http output and refactored telnet output.

Removed useless struct.

Fixed current unittest and added a new one.

Added benchmark test to test json serialization. Made sure http client would reuse connection.

Ran go fmt on opentsdb sources.

Updated README file

Removed useHttp in favor of parsing host string to determine the right API to use for sending metrics. Also renamed BatchSize to HttpBatchSize to better convey that it is only used when using Http API.

Updated changelog

Fixed format issues.

Removed TagSet type to make it more explicit.

Fixed unittest after removing TagSet type.

Revert "Updated changelog"

This reverts commit 24dba5520008d876b5a8d266c34a53e8805cc5f5.

Added PR under 1.1 release.

add missing redis metrics

This makes sure that all redis metrics are present without having to use a hard-coded list of what metrics to pull in.
2016-08-31 10:27:08 +01:00
Cameron Sparr
a8f9eb23cc add missing redis metrics (#1689)
This makes sure that all redis metrics are present without having to use a hard-coded list of what metrics to pull in.
2016-08-31 08:44:47 +01:00
Patrick Hemmer
41a5ee6571 add missing redis metrics
This makes sure that all redis metrics are present without having to use a hard-coded list of what metrics to pull in.
2016-08-31 01:05:11 -04:00
Nathan Haneysmith
7d8de4b8e1 Move default ratelimit to init 2016-08-30 14:33:51 -07:00
Cameron Sparr
cc2b53abf4 Fix changelog for #1650 2016-08-30 16:24:07 +01:00
Yaser Alraddadi
32aa1cc814 httpjson: support configurable response_timeout (#1651)
* httpjson: support configurable response_timeout

* make default ResponseTimeout in init

* Update CHANGELOG.md
2016-08-30 16:23:15 +01:00
Simon Murray
38d877165a Ceph Cluster Performance Input Plugin
The existing ceph input plugin only has access to the local admin daemon socket
on the local host, and as such has access to a limited subset of data.  This
extends the plugin to use CLI commands to get access to the full spread of Ceph
data.  This patch collects global OSD map and IO statistics, PG state and per pool
IO and utilization statistics.

closes #1513
2016-08-30 15:43:07 +01:00
Cameron Sparr
5c5984bfe1 Changelog update 2016-08-30 15:26:27 +01:00
tuier
30cdc31a27 Some improvment in mesos input plugin, (#1572)
* Some improvment in mesos input plugin,
     Removing uneeded statistics prefix for task's metric,
     Adding framework id tags into each task's metric,
     Adding state (leader/follower) tags to master's metric,
     Make sure the slave's metrics are tags with slave

* typo, replacing cpus_total with elected to determine leader

* Remove remaining statistics_ from sample

* using timestamp from mesos as metric timestamp

* change mesos-tasks to mesos_tasks, measurement

* change measurement name in test

* Replace follower by standby
2016-08-30 15:25:29 +01:00
Cameron Sparr
602a36e241 fix changelog #1607 2016-08-30 07:04:10 +01:00
Joel "The Merciless" Meador
b863ee1d65 [Instrumental] Underscore metric name output (#1607)
* separate hello and authenticate functions, force connection close at end of write cycle so we don't hold open idle connections, which has the benefit of mostly removing the chance of getting hopelessly connection lost

* update changelog, though this will need to be updated again to merge into telegraf master

* bump instrumental agent version

* fix test to deal with better better connect/reconnect logic and changed ident & auth handshake

* Update CHANGELOG.md

correct URL from instrumental fork to origin and put the change in the correct part of the file

* go fmt

* Split out Instrumental tests for invalid metric and value.

* Ensure nothing remains on the wire after final test.

* Force valid metric names by replacing invalid parts with underscores.

* Multiple invalid characters being joined into a single udnerscore.

* Adjust comment to what happens.

* undo split hello and auth commands, to reduce roundtrips

* Split out Instrumental tests for invalid metric and value.

* Ensure nothing remains on the wire after final test.

* Force valid metric names by replacing invalid parts with underscores.

* Multiple invalid characters being joined into a single udnerscore.

* add an entry to CHANGELOG for easy merging upstream

* go fmt variable alignment

* remove some bugfixes from changelog which now more properly are in a different section.

* remove headers and whitespace should should have been removed with the last commit
2016-08-30 07:03:32 +01:00
Nathan Haneysmith
ca49babf3a set a default value of 10 for ratelimit 2016-08-29 11:41:43 -07:00
SoleAngelus
cf37b5cdcf Update WINDOWS_SERVICE.md (#1669)
1. Added further clarification on running commands in PowerShell.
2. Added double quotes to file paths.
2016-08-29 17:36:05 +01:00
Cameron Sparr
969f388ef2 Make elasticsearch timeout configurable
closes #1674
2016-08-29 11:06:30 +01:00
Nathan Haneysmith
0589a1d0a5 update the test to include the rate 2016-08-25 18:17:33 -07:00
Nathan Haneysmith
4e019a176d add the ratelimit to the sample config 2016-08-25 18:04:29 -07:00
Nathan Haneysmith
a0e23d30fe ahem, apparently limiter wants an int 2016-08-25 17:56:33 -07:00
Nathan Haneysmith
e931706249 make that variable a string 2016-08-25 17:53:46 -07:00
Nathan Haneysmith
2457d95262 Move CloudWatch rate limit to config
Reference #1670
2016-08-25 17:46:38 -07:00
Cameron Sparr
e9d33726a9 start aerospike container 1st for unit tests
because it requires some time to initialize before it can respond to
metric requests.
2016-08-24 09:16:55 +01:00
Cameron Sparr
2462e04bf2 Rdme upd (#1660)
* Update README and CHANGELOG with 1.0 RC 1

* Increase circleci test docker sleep

* update aerospike dependency
2016-08-24 08:41:12 +01:00
199 changed files with 9686 additions and 3213 deletions

View File

@@ -1,19 +1,117 @@
## v1.0 [unreleased]
## v1.2 [unreleased]
### Release Notes
### Features
### Bugfixes
## v1.1 [unreleased]
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
## v1.1.1 [unreleased]
### Bugfixes
- [#2023](https://github.com/influxdata/telegraf/issues/2023): Fix issue parsing toml durations with single quotes.
## v1.1.0 [2016-11-07]
### Release Notes
- Telegraf now supports two new types of plugins: processors & aggregators.
- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log.
On most systems, the logs will be directed to the systemd journal and can be
accessed by `journalctl -u telegraf.service`. Consult the systemd journal
documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf#L70)
available in 1.1, which will allow users to easily configure telegraf to
continue sending logs to /var/log/telegraf/telegraf.log.
### Features
- [#1726](https://github.com/influxdata/telegraf/issues/1726): Processor & Aggregator plugin support.
- [#1861](https://github.com/influxdata/telegraf/pull/1861): adding the tags in the graylog output plugin
- [#1732](https://github.com/influxdata/telegraf/pull/1732): Telegraf systemd service, log to journal.
- [#1782](https://github.com/influxdata/telegraf/pull/1782): Allow numeric and non-string values for tag_keys.
- [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types.
- [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows
- [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout.
- [#1607](https://github.com/influxdata/telegraf/pull/1607): Massage metric names in Instrumental output plugin
- [#1572](https://github.com/influxdata/telegraf/pull/1572): mesos improvements.
- [#1513](https://github.com/influxdata/telegraf/issues/1513): Add Ceph Cluster Performance Statistics
- [#1650](https://github.com/influxdata/telegraf/issues/1650): Ability to configure response_timeout in httpjson input.
- [#1685](https://github.com/influxdata/telegraf/issues/1685): Add additional redis metrics.
- [#1539](https://github.com/influxdata/telegraf/pull/1539): Added capability to send metrics through Http API for OpenTSDB.
- [#1471](https://github.com/influxdata/telegraf/pull/1471): iptables input plugin.
- [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin.
- [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements.
- [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin.
- [#1407](https://github.com/influxdata/telegraf/pull/1407) & [#1915](https://github.com/influxdata/telegraf/pull/1915): HTTP service listener input plugin.
- [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql
- [#1791](https://github.com/influxdata/telegraf/pull/1791): Add Docker container state metrics to Docker input plugin output
- [#1755](https://github.com/influxdata/telegraf/issues/1755): Add support to SNMP for IP & MAC address conversion.
- [#1729](https://github.com/influxdata/telegraf/issues/1729): Add support to SNMP for OID index suffixes.
- [#1813](https://github.com/influxdata/telegraf/pull/1813): Change default arguments for SNMP plugin.
- [#1686](https://github.com/influxdata/telegraf/pull/1686): Mesos input plugin: very high-cardinality mesos-task metrics removed.
- [#1838](https://github.com/influxdata/telegraf/pull/1838): Logging overhaul to centralize the logger & log levels, & provide a logfile config option.
- [#1700](https://github.com/influxdata/telegraf/pull/1700): HAProxy plugin socket glob matching.
- [#1847](https://github.com/influxdata/telegraf/pull/1847): Add Kubernetes plugin for retrieving pod metrics.
### Bugfixes
- [#1955](https://github.com/influxdata/telegraf/issues/1955): Fix NATS plug-ins reconnection logic.
- [#1936](https://github.com/influxdata/telegraf/issues/1936): Set required default values in udp_listener & tcp_listener.
- [#1926](https://github.com/influxdata/telegraf/issues/1926): Fix toml unmarshal panic in Duration objects.
- [#1746](https://github.com/influxdata/telegraf/issues/1746): Fix handling of non-string values for JSON keys listed in tag_keys.
- [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2.
- [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing
- [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax
- [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic
- [#1764](https://github.com/influxdata/telegraf/issues/1764): Fix kafka consumer panic when nil error is returned down errs channel.
- [#1768](https://github.com/influxdata/telegraf/pull/1768): Speed up statsd parsing.
- [#1751](https://github.com/influxdata/telegraf/issues/1751): Fix powerdns integer parse error handling.
- [#1752](https://github.com/influxdata/telegraf/issues/1752): Fix varnish plugin defaults not being used.
- [#1517](https://github.com/influxdata/telegraf/issues/1517): Fix windows glob paths.
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows.
- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix.
- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated.
- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads.
- [#1833](https://github.com/influxdata/telegraf/issues/1833): Fix translating SNMP fields not in MIB.
- [#1835](https://github.com/influxdata/telegraf/issues/1835): Fix SNMP emitting empty fields.
- [#1854](https://github.com/influxdata/telegraf/pull/1853): SQL Server waitstats truncation bug.
- [#1810](https://github.com/influxdata/telegraf/issues/1810): Fix logparser common log format: numbers in ident.
- [#1793](https://github.com/influxdata/telegraf/pull/1793): Fix JSON Serialization in OpenTSDB output.
- [#1731](https://github.com/influxdata/telegraf/issues/1731): Fix Graphite template ordering, use most specific.
- [#1836](https://github.com/influxdata/telegraf/pull/1836): Fix snmp table field initialization for non-automatic table.
- [#1724](https://github.com/influxdata/telegraf/issues/1724): cgroups path being parsed as metric.
- [#1886](https://github.com/influxdata/telegraf/issues/1886): Fix phpfpm fcgi client panic when URL does not exist.
- [#1344](https://github.com/influxdata/telegraf/issues/1344): Fix config file parse error logging.
- [#1771](https://github.com/influxdata/telegraf/issues/1771): Delete nil fields in the metric maker.
- [#870](https://github.com/influxdata/telegraf/issues/870): Fix MySQL special characters in DSN parsing.
- [#1742](https://github.com/influxdata/telegraf/issues/1742): Ping input odd timeout behavior.
## v1.0.1 [2016-09-26]
### Bugfixes
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes.
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags.
- [#1773](https://github.com/influxdata/telegraf/issues/1773): Add configurable timeout to influxdb input plugin.
- [#1785](https://github.com/influxdata/telegraf/pull/1785): Fix statsd no default value panic.
## v1.0 [2016-09-08]
### Release Notes
**Breaking Change** The SNMP plugin is being deprecated in it's current form.
There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
which fixes many of the issues and confusions
of it's predecessor. For users wanting to continue to use the deprecated SNMP
of its predecessor. For users wanting to continue to use the deprecated SNMP
plugin, you will need to change your config file from `[[inputs.snmp]]` to
`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_
backwards-compatible.
- Telegraf now supports being installed as an official windows service,
which can be installed via
`> C:\Program Files\Telegraf\telegraf.exe --service install`
**Breaking Change**: Aerospike main server node measurements have been renamed
aerospike_node. Aerospike namespace measurements have been renamed to
aerospike_namespace. They will also now be tagged with the node_name
@@ -44,6 +142,10 @@ should now look like:
path = "/"
```
- Telegraf now supports being installed as an official windows service,
which can be installed via
`> C:\Program Files\Telegraf\telegraf.exe --service install`
- `flush_jitter` behavior has been changed. The random jitter will now be
evaluated at every flush interval, rather than once at startup. This makes it
consistent with the behavior of `collection_jitter`.
@@ -140,6 +242,7 @@ consistent with the behavior of `collection_jitter`.
- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic.
- [#1634](https://github.com/influxdata/telegraf/issues/1634): Fix ntpq panic when field is missing.
- [#1637](https://github.com/influxdata/telegraf/issues/1637): Sanitize graphite output field names.
- [#1695](https://github.com/influxdata/telegraf/pull/1695): Fix MySQL plugin not sending 0 value fields.
## v0.13.1 [2016-05-24]

View File

@@ -2,7 +2,7 @@
1. [Sign the CLA](http://influxdb.com/community/cla.html)
1. Make changes or write plugin (see below for details)
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go`
1. If your plugin requires a new Go package,
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
1. Write a README for your plugin, if it's an input plugin, it should be structured
@@ -16,8 +16,8 @@ for a good example.
## GoDoc
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
on the GoDoc
Public interfaces for inputs, outputs, processors, aggregators, metrics,
and the accumulator can be found on the GoDoc
[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf)
@@ -32,7 +32,7 @@ Assuming you can already build the project, run these in the telegraf directory:
1. `go get github.com/sparrc/gdm`
1. `gdm restore`
1. `gdm save`
1. `GOOS=linux gdm save`
## Input Plugins
@@ -46,7 +46,7 @@ and submit new inputs.
### Input Plugin Guidelines
* A plugin must conform to the `telegraf.Input` interface.
* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface.
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example.
* Input Plugins must be added to the
@@ -84,9 +84,9 @@ func (s *Simple) SampleConfig() string {
func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
} else {
acc.Add("state", "not great", nil)
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
}
return nil
@@ -97,6 +97,13 @@ func init() {
}
```
## Adding Typed Metrics
In addition the the `AddFields` function, the accumulator also supports an
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
metrics. Metric types are ignored for the InfluxDB output, but can be used
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
## Input Plugins Accepting Arbitrary Data Formats
Some input plugins (such as
@@ -170,7 +177,7 @@ similar constructs.
### Output Plugin Guidelines
* An output must conform to the `outputs.Output` interface.
* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
@@ -268,6 +275,186 @@ and `Stop()` methods.
* Same as the `Output` guidelines, except that they must conform to the
`output.ServiceOutput` interface.
## Processor Plugins
This section is for developers who want to create a new processor plugin.
### Processor Plugin Guidelines
* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface.
* Processors should call `processors.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this processor does.
### Processor Example
```go
package printer
// printer.go
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/processors"
)
type Printer struct {
}
var sampleConfig = `
`
func (p *Printer) SampleConfig() string {
return sampleConfig
}
func (p *Printer) Description() string {
return "Print all metrics that pass through this filter."
}
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
for _, metric := range in {
fmt.Println(metric.String())
}
return in
}
func init() {
processors.Add("printer", func() telegraf.Processor {
return &Printer{}
})
}
```
## Aggregator Plugins
This section is for developers who want to create a new aggregator plugin.
### Aggregator Plugin Guidelines
* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface.
* Aggregators should call `aggregators.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
aggregator can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of each
metric.
* When the `Reset()` function is called, all caches should be cleared.
### Aggregator Example
```go
package min
// min.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type Min struct {
// caches for metric fields, names, and tags
fieldCache map[uint64]map[string]float64
nameCache map[uint64]string
tagCache map[uint64]map[string]string
}
func NewMin() telegraf.Aggregator {
m := &Min{}
m.Reset()
return m
}
var sampleConfig = `
## period is the flush & clear interval of the aggregator.
period = "30s"
## If true drop_original will drop the original metrics and
## only send aggregates.
drop_original = false
`
func (m *Min) SampleConfig() string {
return sampleConfig
}
func (m *Min) Description() string {
return "Keep the aggregate min of each metric passing through."
}
func (m *Min) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.nameCache[id]; !ok {
// hit an uncached metric, create caches for first time:
m.nameCache[id] = in.Name()
m.tagCache[id] = in.Tags()
m.fieldCache[id] = make(map[string]float64)
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
m.fieldCache[id][k] = fv
}
}
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.fieldCache[id][k]; !ok {
// hit an uncached field of a cached metric
m.fieldCache[id][k] = fv
continue
}
if fv < m.fieldCache[id][k] {
// set new minimum
m.fieldCache[id][k] = fv
}
}
}
}
}
func (m *Min) Push(acc telegraf.Accumulator) {
for id, _ := range m.nameCache {
fields := map[string]interface{}{}
for k, v := range m.fieldCache[id] {
fields[k+"_min"] = v
}
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
}
}
func (m *Min) Reset() {
m.fieldCache = make(map[uint64]map[string]float64)
m.nameCache = make(map[uint64]string)
m.tagCache = make(map[uint64]map[string]string)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("min", func() telegraf.Aggregator {
return NewMin()
})
}
```
## Unit Tests
### Execute short tests

16
Godeps
View File

@@ -1,6 +1,6 @@
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/aerospike/aerospike-client-go 45863b7fd8640dc12f7fdd397104d97e1986f25a
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
@@ -19,7 +19,7 @@ github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
@@ -27,8 +27,9 @@ github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
@@ -37,8 +38,8 @@ github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
@@ -47,8 +48,7 @@ github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
github.com/soniah/gosnmp eb32571c2410868d85849ad67d1e51d01273eb84
github.com/sparrc/aerospike-client-go d4bb42d2c2d39dae68e054116f4538af189e05d5
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
@@ -56,7 +56,7 @@ github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef

View File

@@ -1,4 +1,6 @@
VERSION := $(shell sh -c 'git describe --always --tags')
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
COMMIT := $(shell sh -c 'git rev-parse HEAD')
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else
@@ -13,17 +15,18 @@ windows: prepare-windows build-windows
# Only run the build (no dependency grabbing)
build:
go install -ldflags "-X main.version=$(VERSION)" ./...
go install -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
build-windows:
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
"-X main.version=$(VERSION)" \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
build-for-docker:
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.version=$(VERSION)" \
./cmd/telegraf/telegraf.go
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
# run package script
package:
@@ -42,6 +45,7 @@ prepare-windows:
# Run all docker containers necessary for unit tests
docker-run:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
@@ -52,29 +56,28 @@ docker-run:
docker run --name postgres -p "5432:5432" -d postgres
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
docker run --name redis -p "6379:6379" -d redis
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
docker run --name nats -p "4222:4222" -d nats
# Run docker containers necessary for CircleCI unit tests
docker-run-circle:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
docker run --name nats -p "4222:4222" -d nats
# Kill all docker containers, ignore errors
docker-kill:
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
# Run full unit tests using docker containers (includes setup and teardown)
test: vet docker-kill docker-run

103
README.md
View File

@@ -20,12 +20,12 @@ new plugins.
### Linux deb and rpm Packages:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_amd64.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.x86_64.rpm
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm
Latest (arm):
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta3_armhf.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta3.armhf.rpm
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm
##### Package Instructions:
@@ -46,14 +46,14 @@ to use this repo to install & update telegraf.
### Linux tarballs:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_i386.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_linux_armhf.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz
### FreeBSD tarball:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_freebsd_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz
### Ansible Role:
@@ -69,7 +69,7 @@ brew install telegraf
### Windows Binaries (EXPERIMENTAL)
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta3_windows_amd64.zip
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip
### From Source:
@@ -85,44 +85,42 @@ if you don't have it already. You also must build with golang version 1.5+.
## How to use it:
```console
$ telegraf -help
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
See usage with:
Usage:
telegraf <flags>
The flags are:
-config <file> configuration file to load
-test gather metrics once, print them to stdout, and exit
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-output-filter filter the output plugins to enable, separator is :
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-debug print metrics as they're generated to stdout
-quiet run in quiet mode
-version print the version to stdout
Examples:
# generate a telegraf config file:
telegraf -sample-config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf -sample-config -input-filter cpu -output-filter influxdb
# run a single telegraf collection, outputing metrics to stdout
telegraf -config telegraf.conf -test
# run telegraf with all plugins defined in config file
telegraf -config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
```
telegraf --help
```
### Generate a telegraf config file:
```
telegraf config > telegraf.conf
```
### Generate config with only cpu input & influxdb output plugins defined
```
telegraf --input-filter cpu --output-filter influxdb config
```
### Run a single telegraf collection, outputing metrics to stdout
```
telegraf --config telegraf.conf -test
```
### Run telegraf with all plugins defined in config file
```
telegraf --config telegraf.conf
```
### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
```
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
```
## Configuration
@@ -161,6 +159,7 @@ Currently implemented sources:
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
@@ -212,18 +211,21 @@ Currently implemented sources:
Telegraf can also collect metrics via the following service plugins:
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
We'll be adding support for many more over the coming months. Read on if you
want to add support for another service or third-party API.
@@ -243,6 +245,7 @@ want to add support for another service or third-party API.
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)

View File

@@ -2,26 +2,33 @@ package telegraf
import "time"
// Accumulator is an interface for "accumulating" metrics from plugin(s).
// The metrics are sent down a channel shared between all plugins.
type Accumulator interface {
// AddFields adds a metric to the accumulator with the given measurement
// name, fields, and tags (and timestamp). If a timestamp is not provided,
// then the accumulator sets it to "now".
// Create a point with a value, decorating it with tags
// NOTE: tags is expected to be owned by the caller, don't mutate
// it after passing to Add.
Add(measurement string,
value interface{},
tags map[string]string,
t ...time.Time)
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
AddError(err error)
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
AddGauge(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
Debug() bool
SetDebug(enabled bool)
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
AddCounter(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
SetPrecision(precision, interval time.Duration)
DisablePrecision()
AddError(err error)
}

View File

@@ -1,161 +1,77 @@
package agent
import (
"fmt"
"log"
"math"
"sync/atomic"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/models"
)
type MetricMaker interface {
Name() string
MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric
}
func NewAccumulator(
inputConfig *models.InputConfig,
maker MetricMaker,
metrics chan telegraf.Metric,
) *accumulator {
acc := accumulator{}
acc.metrics = metrics
acc.inputConfig = inputConfig
acc.precision = time.Nanosecond
acc := accumulator{
maker: maker,
metrics: metrics,
precision: time.Nanosecond,
}
return &acc
}
type accumulator struct {
metrics chan telegraf.Metric
defaultTags map[string]string
debug bool
// print every point added to the accumulator
trace bool
inputConfig *models.InputConfig
maker MetricMaker
precision time.Duration
errCount uint64
}
func (ac *accumulator) Add(
measurement string,
value interface{},
tags map[string]string,
t ...time.Time,
) {
fields := make(map[string]interface{})
fields["value"] = value
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
return
}
ac.AddFields(measurement, fields, tags, t...)
}
func (ac *accumulator) AddFields(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if len(fields) == 0 || len(measurement) == 0 {
return
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
return
func (ac *accumulator) AddGauge(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
return
func (ac *accumulator) AddCounter(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
ac.metrics <- m
}
// Override measurement name if set
if len(ac.inputConfig.NameOverride) != 0 {
measurement = ac.inputConfig.NameOverride
}
// Apply measurement prefix and suffix if set
if len(ac.inputConfig.MeasurementPrefix) != 0 {
measurement = ac.inputConfig.MeasurementPrefix + measurement
}
if len(ac.inputConfig.MeasurementSuffix) != 0 {
measurement = measurement + ac.inputConfig.MeasurementSuffix
}
if tags == nil {
tags = make(map[string]string)
}
// Apply plugin-wide tags if set
for k, v := range ac.inputConfig.Tags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply daemon-wide tags if set
for k, v := range ac.defaultTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
ac.inputConfig.Filter.FilterTags(tags)
result := make(map[string]interface{})
for k, v := range fields {
// Filter out any filtered fields
if ac.inputConfig != nil {
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
continue
}
}
// Validate uint64 and float64 fields
switch val := v.(type) {
case uint64:
// InfluxDB does not support writing uint64
if val < uint64(9223372036854775808) {
result[k] = int64(val)
} else {
result[k] = int64(9223372036854775807)
}
continue
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
if ac.debug {
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
}
continue
}
}
result[k] = v
}
fields = nil
if len(result) == 0 {
return
}
var timestamp time.Time
if len(t) > 0 {
timestamp = t[0]
} else {
timestamp = time.Now()
}
timestamp = timestamp.Round(ac.precision)
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return
}
if ac.trace {
fmt.Println("> " + m.String())
}
ac.metrics <- m
}
// AddError passes a runtime error to the accumulator.
@@ -166,23 +82,7 @@ func (ac *accumulator) AddError(err error) {
}
atomic.AddUint64(&ac.errCount, 1)
//TODO suppress/throttle consecutive duplicate errors?
log.Printf("ERROR in input [%s]: %s", ac.inputConfig.Name, err)
}
func (ac *accumulator) Debug() bool {
return ac.debug
}
func (ac *accumulator) SetDebug(debug bool) {
ac.debug = debug
}
func (ac *accumulator) Trace() bool {
return ac.trace
}
func (ac *accumulator) SetTrace(trace bool) {
ac.trace = trace
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
}
// SetPrecision takes two time.Duration objects. If the first is non-zero,
@@ -206,17 +106,12 @@ func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
}
}
func (ac *accumulator) DisablePrecision() {
ac.precision = time.Nanosecond
}
func (ac *accumulator) setDefaultTags(tags map[string]string) {
ac.defaultTags = tags
}
func (ac *accumulator) addDefaultTag(key, value string) {
if ac.defaultTags == nil {
ac.defaultTags = make(map[string]string)
func (ac accumulator) getTime(t []time.Time) time.Time {
var timestamp time.Time
if len(t) > 0 {
timestamp = t[0]
} else {
timestamp = time.Now()
}
ac.defaultTags[key] = value
return timestamp.Round(ac.precision)
}

View File

@@ -4,82 +4,115 @@ import (
"bytes"
"fmt"
"log"
"math"
"os"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAdd(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-a.metrics
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-a.metrics
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
actual)
}
func TestAddNoPrecisionWithInterval(t *testing.T) {
a := accumulator{}
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
func TestAddFields(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
fields := map[string]interface{}{
"usage": float64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Contains(t, actual, "acctest usage=99")
testm = <-a.metrics
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Contains(t, actual, "acctest,acc=test usage=99")
testm = <-a.metrics
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
actual)
}
func TestAddNoIntervalWithPrecision(t *testing.T) {
a := accumulator{}
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
func TestAccAddError(t *testing.T) {
errBuf := bytes.NewBuffer(nil)
log.SetOutput(errBuf)
defer log.SetOutput(os.Stderr)
a.SetPrecision(time.Second, time.Millisecond)
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddError(fmt.Errorf("foo"))
a.AddError(fmt.Errorf("bar"))
a.AddError(fmt.Errorf("baz"))
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
assert.EqualValues(t, 3, a.errCount)
require.Len(t, errs, 4) // 4 because of trailing newline
assert.Contains(t, string(errs[0]), "TestPlugin")
assert.Contains(t, string(errs[0]), "foo")
assert.Contains(t, string(errs[1]), "TestPlugin")
assert.Contains(t, string(errs[1]), "bar")
assert.Contains(t, string(errs[2]), "TestPlugin")
assert.Contains(t, string(errs[2]), "baz")
}
func TestAddNoIntervalWithPrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
@@ -97,17 +130,21 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
}
func TestAddDisablePrecision(t *testing.T) {
a := accumulator{}
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(time.Second, time.Millisecond)
a.DisablePrecision()
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.SetPrecision(time.Nanosecond, 0)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
@@ -124,15 +161,48 @@ func TestAddDisablePrecision(t *testing.T) {
actual)
}
func TestDifferentPrecisions(t *testing.T) {
a := accumulator{}
func TestAddNoPrecisionWithInterval(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
actual)
}
func TestDifferentPrecisions(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Equal(t,
@@ -140,7 +210,9 @@ func TestDifferentPrecisions(t *testing.T) {
actual)
a.SetPrecision(0, time.Millisecond)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
@@ -148,7 +220,9 @@ func TestDifferentPrecisions(t *testing.T) {
actual)
a.SetPrecision(0, time.Microsecond)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
@@ -156,7 +230,9 @@ func TestDifferentPrecisions(t *testing.T) {
actual)
a.SetPrecision(0, time.Nanosecond)
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
@@ -164,321 +240,100 @@ func TestDifferentPrecisions(t *testing.T) {
actual)
}
func TestAddDefaultTags(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
func TestAddGauge(t *testing.T) {
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,default=tag value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
actual)
}
func TestAddFields(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{
"usage": float64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
actual)
}
// Test that all Inf fields get dropped, and not added to metrics channel
func TestAddInfFields(t *testing.T) {
inf := math.Inf(1)
ninf := math.Inf(-1)
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{
"usage": inf,
"nusage": ninf,
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
assert.Len(t, a.metrics, 0)
// test that non-inf fields are kept and not dropped
fields["notinf"] = float64(100)
a.AddFields("acctest", fields, map[string]string{})
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest notinf=100")
}
// Test that nan fields are dropped and not added
func TestAddNaNFields(t *testing.T) {
nan := math.NaN()
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{
"usage": nan,
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
assert.Len(t, a.metrics, 0)
// test that non-nan fields are kept and not dropped
fields["notnan"] = float64(100)
a.AddFields("acctest", fields, map[string]string{})
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest notnan=100")
}
func TestAddUint64Fields(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{
"usage": uint64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
actual)
}
func TestAddUint64Overflow(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
fields := map[string]interface{}{
"usage": uint64(9223372036854775808),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
actual)
}
func TestAddInts(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
a.Add("acctest", int(101), map[string]string{})
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,default=tag value=101i")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
actual)
}
func TestAddFloats(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
actual)
}
func TestAddStrings(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
a.Add("acctest", "test", map[string]string{"acc": "test"})
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
actual)
}
func TestAddBools(t *testing.T) {
a := accumulator{}
a.addDefaultTag("default", "tag")
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
a.inputConfig = &models.InputConfig{}
a.Add("acctest", true, map[string]string{"acc": "test"})
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
actual)
}
// Test that tag filters get applied to metrics.
func TestAccFilterTags(t *testing.T) {
a := accumulator{}
now := time.Now()
a.metrics = make(chan telegraf.Metric, 10)
defer close(a.metrics)
filter := models.Filter{
TagExclude: []string{"acc"},
}
assert.NoError(t, filter.CompileFilter())
a.inputConfig = &models.InputConfig{}
a.inputConfig.Filter = filter
a.Add("acctest", float64(101), map[string]string{})
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
testm := <-a.metrics
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-a.metrics
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-a.metrics
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Gauge)
}
func TestAccAddError(t *testing.T) {
errBuf := bytes.NewBuffer(nil)
log.SetOutput(errBuf)
defer log.SetOutput(os.Stderr)
func TestAddCounter(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a := accumulator{}
a.inputConfig = &models.InputConfig{}
a.inputConfig.Name = "mock_plugin"
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
a.AddError(fmt.Errorf("foo"))
a.AddError(fmt.Errorf("bar"))
a.AddError(fmt.Errorf("baz"))
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
assert.EqualValues(t, 3, a.errCount)
require.Len(t, errs, 4) // 4 because of trailing newline
assert.Contains(t, string(errs[0]), "mock_plugin")
assert.Contains(t, string(errs[0]), "foo")
assert.Contains(t, string(errs[1]), "mock_plugin")
assert.Contains(t, string(errs[1]), "bar")
assert.Contains(t, string(errs[2]), "mock_plugin")
assert.Contains(t, string(errs[2]), "baz")
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Counter)
}
type TestMetricMaker struct {
}
func (tm *TestMetricMaker) Name() string {
return "TestPlugin"
}
func (tm *TestMetricMaker) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
switch mType {
case telegraf.Untyped:
if m, err := telegraf.NewMetric(measurement, tags, fields, t); err == nil {
return m
}
case telegraf.Counter:
if m, err := telegraf.NewCounterMetric(measurement, tags, fields, t); err == nil {
return m
}
case telegraf.Gauge:
if m, err := telegraf.NewGaugeMetric(measurement, tags, fields, t); err == nil {
return m
}
}
return nil
}

View File

@@ -49,18 +49,16 @@ func (a *Agent) Connect() error {
switch ot := o.Output.(type) {
case telegraf.ServiceOutput:
if err := ot.Start(); err != nil {
log.Printf("Service for output %s failed to start, exiting\n%s\n",
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
o.Name, err.Error())
return err
}
}
if a.Config.Agent.Debug {
log.Printf("Attempting connection to output: %s\n", o.Name)
}
log.Printf("D! Attempting connection to output: %s\n", o.Name)
err := o.Output.Connect()
if err != nil {
log.Printf("Failed to connect to output %s, retrying in 15s, "+
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
"error was '%s' \n", o.Name, err)
time.Sleep(15 * time.Second)
err = o.Output.Connect()
@@ -68,9 +66,7 @@ func (a *Agent) Connect() error {
return err
}
}
if a.Config.Agent.Debug {
log.Printf("Successfully connected to output: %s\n", o.Name)
}
log.Printf("D! Successfully connected to output: %s\n", o.Name)
}
return nil
}
@@ -92,9 +88,9 @@ func panicRecover(input *models.RunningInput) {
if err := recover(); err != nil {
trace := make([]byte, 2048)
runtime.Stack(trace, true)
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
input.Name, err, trace)
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
input.Name(), err, trace)
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
"stack trace, configuration, and OS information: " +
"https://github.com/influxdata/telegraf/issues/new")
}
@@ -107,20 +103,18 @@ func (a *Agent) gatherer(
input *models.RunningInput,
interval time.Duration,
metricC chan telegraf.Metric,
) error {
) {
defer panicRecover(input)
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
var outerr error
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
acc := NewAccumulator(input, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
acc.setDefaultTags(a.Config.Tags)
input.SetDebug(a.Config.Agent.Debug)
input.SetDefaultTags(a.Config.Tags)
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
@@ -128,17 +122,12 @@ func (a *Agent) gatherer(
gatherWithTimeout(shutdown, input, acc, interval)
elapsed := time.Since(start)
if outerr != nil {
return outerr
}
if a.Config.Agent.Debug {
log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n",
input.Name, interval, elapsed)
}
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
input.Name(), interval, elapsed)
select {
case <-shutdown:
return nil
return
case <-ticker.C:
continue
}
@@ -167,13 +156,13 @@ func gatherWithTimeout(
select {
case err := <-done:
if err != nil {
log.Printf("ERROR in input [%s]: %s", input.Name, err)
log.Printf("E! ERROR in input [%s]: %s", input.Name(), err)
}
return
case <-ticker.C:
log.Printf("ERROR: input [%s] took longer to collect than "+
log.Printf("E! ERROR: input [%s] took longer to collect than "+
"collection interval (%s)",
input.Name, timeout)
input.Name(), timeout)
continue
case <-shutdown:
return
@@ -201,13 +190,13 @@ func (a *Agent) Test() error {
}()
for _, input := range a.Config.Inputs {
acc := NewAccumulator(input.Config, metricC)
acc.SetTrace(true)
acc := NewAccumulator(input, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
acc.setDefaultTags(a.Config.Tags)
input.SetTrace(true)
input.SetDefaultTags(a.Config.Tags)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
if input.Config.Interval != 0 {
fmt.Printf("* Internal: %s\n", input.Config.Interval)
}
@@ -221,10 +210,10 @@ func (a *Agent) Test() error {
// Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages.
switch input.Name {
switch input.Name() {
case "cpu", "mongodb", "procstat":
time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
if err := input.Input.Gather(acc); err != nil {
return err
}
@@ -244,7 +233,7 @@ func (a *Agent) flush() {
defer wg.Done()
err := output.Write()
if err != nil {
log.Printf("Error writing to output [%s]: %s\n",
log.Printf("E! Error writing to output [%s]: %s\n",
output.Name, err.Error())
}
}(o)
@@ -257,31 +246,146 @@ func (a *Agent) flush() {
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 200)
time.Sleep(time.Millisecond * 300)
// create an output metric channel and a gorouting that continously passes
// each metric onto the output plugins & aggregators.
outMetricC := make(chan telegraf.Metric, 100)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-shutdown:
if len(outMetricC) > 0 {
// keep going until outMetricC is flushed
continue
}
return
case m := <-outMetricC:
// if dropOriginal is set to true, then we will only send this
// metric to the aggregators, not the outputs.
var dropOriginal bool
if !m.IsAggregate() {
for _, agg := range a.Config.Aggregators {
if ok := agg.Add(copyMetric(m)); ok {
dropOriginal = true
}
}
}
if !dropOriginal {
for i, o := range a.Config.Outputs {
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(copyMetric(m))
}
}
}
}
}
}()
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
for {
select {
case <-shutdown:
log.Println("Hang on, flushing any cached metrics before shutdown")
log.Println("I! Hang on, flushing any cached metrics before shutdown")
// wait for outMetricC to get flushed before flushing outputs
wg.Wait()
a.flush()
return nil
case <-ticker.C:
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
a.flush()
case m := <-metricC:
for i, o := range a.Config.Outputs {
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(copyMetric(m))
}
case metric := <-metricC:
// NOTE potential bottleneck here as we put each metric through the
// processors serially.
mS := []telegraf.Metric{metric}
for _, processor := range a.Config.Processors {
mS = processor.Apply(mS...)
}
for _, m := range mS {
outMetricC <- m
}
}
}
}
// Run runs the agent daemon, gathering every Interval
func (a *Agent) Run(shutdown chan struct{}) error {
var wg sync.WaitGroup
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
"Flush Interval:%s \n",
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating metrics
metricC := make(chan telegraf.Metric, 100)
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
switch p := input.Input.(type) {
case telegraf.ServiceInput:
acc := NewAccumulator(input, metricC)
// Service input plugins should set their own precision of their
// metrics.
acc.SetPrecision(time.Nanosecond, 0)
input.SetDefaultTags(a.Config.Tags)
if err := p.Start(acc); err != nil {
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
input.Name(), err.Error())
return err
}
defer p.Stop()
}
}
// Round collection to nearest interval by sleeping
if a.Config.Agent.RoundInterval {
i := int64(a.Config.Agent.Interval.Duration)
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
}
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil {
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
}()
wg.Add(len(a.Config.Aggregators))
for _, aggregator := range a.Config.Aggregators {
go func(agg *models.RunningAggregator) {
defer wg.Done()
acc := NewAccumulator(agg, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
agg.Run(acc, shutdown)
}(aggregator)
}
wg.Add(len(a.Config.Inputs))
for _, input := range a.Config.Inputs {
interval := a.Config.Agent.Interval.Duration
// overwrite global interval if this plugin has it's own.
if input.Config.Interval != 0 {
interval = input.Config.Interval
}
go func(in *models.RunningInput, interv time.Duration) {
defer wg.Done()
a.gatherer(shutdown, in, interv, metricC)
}(input, interval)
}
wg.Wait()
return nil
}
func copyMetric(m telegraf.Metric) telegraf.Metric {
t := time.Time(m.Time())
@@ -297,68 +401,3 @@ func copyMetric(m telegraf.Metric) telegraf.Metric {
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
return out
}
// Run runs the agent daemon, gathering every Interval
func (a *Agent) Run(shutdown chan struct{}) error {
var wg sync.WaitGroup
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
"Flush Interval:%s \n",
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating metrics
metricC := make(chan telegraf.Metric, 10000)
for _, input := range a.Config.Inputs {
// Start service of any ServicePlugins
switch p := input.Input.(type) {
case telegraf.ServiceInput:
acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug)
// Service input plugins should set their own precision of their
// metrics.
acc.DisablePrecision()
acc.setDefaultTags(a.Config.Tags)
if err := p.Start(acc); err != nil {
log.Printf("Service for input %s failed to start, exiting\n%s\n",
input.Name, err.Error())
return err
}
defer p.Stop()
}
}
// Round collection to nearest interval by sleeping
if a.Config.Agent.RoundInterval {
i := int64(a.Config.Agent.Interval.Duration)
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
}
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil {
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
}()
wg.Add(len(a.Config.Inputs))
for _, input := range a.Config.Inputs {
interval := a.Config.Agent.Interval.Duration
// overwrite global interval if this plugin has it's own.
if input.Config.Interval != 0 {
interval = input.Config.Interval
}
go func(in *models.RunningInput, interv time.Duration) {
defer wg.Done()
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
log.Printf(err.Error())
}
}(input, interval)
}
wg.Wait()
return nil
}

22
aggregator.go Normal file
View File

@@ -0,0 +1,22 @@
package telegraf
// Aggregator is an interface for implementing an Aggregator plugin.
// the RunningAggregator wraps this interface and guarantees that
// Add, Push, and Reset can not be called concurrently, so locking is not
// required when implementing an Aggregator plugin.
type Aggregator interface {
// SampleConfig returns the default configuration of the Input.
SampleConfig() string
// Description returns a one-sentence description on the Input.
Description() string
// Add the metric to the aggregator.
Add(in Metric)
// Push pushes the current aggregates to the accumulator.
Push(acc Accumulator)
// Reset resets the aggregators caches and aggregates.
Reset()
}

View File

@@ -4,17 +4,14 @@ machine:
post:
- sudo service zookeeper stop
- go version
- go version | grep 1.6.2 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz
- go version | grep 1.7.3 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz
- go version
dependencies:
override:
- docker info
post:
- gem install fpm
- sudo apt-get install -y rpm python-boto
test:
override:

View File

@@ -12,15 +12,18 @@ import (
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/logger"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
"github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
_ "github.com/influxdata/telegraf/plugins/processors/all"
"github.com/kardianos/service"
)
var fDebug = flag.Bool("debug", false,
"show metrics as they're generated to stdout")
"turn on debug logging")
var fQuiet = flag.Bool("quiet", false,
"run in quiet mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
@@ -39,6 +42,10 @@ var fOutputFilters = flag.String("output-filter", "",
"filter the outputs to enable, separator is :")
var fOutputList = flag.Bool("output-list", false,
"print available output plugins.")
var fAggregatorFilters = flag.String("aggregator-filter", "",
"filter the aggregators to enable, separator is :")
var fProcessorFilters = flag.String("processor-filter", "",
"filter the processors to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
var fService = flag.String("service", "",
@@ -52,59 +59,57 @@ var (
branch string
)
func init() {
// If commit or branch are not set, make that clear.
if commit == "" {
commit = "unknown"
}
if branch == "" {
branch = "unknown"
}
}
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf <flags>
telegraf [commands|flags]
The flags are:
The commands & flags are:
-config <file> configuration file to load
-test gather metrics once, print them to stdout, and exit
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-input-list print all the plugins inputs
-output-filter filter the output plugins to enable, separator is :
-output-list print all the available outputs
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-debug print metrics as they're generated to stdout
-quiet run in quiet mode
-version print the version to stdout
-service Control the service, ie, 'telegraf -service install (windows only)'
config print out full sample configuration to stdout
version print the version to stdout
In addition to the -config flag, telegraf will also load the config file from
an environment variable or default location. Precedence is:
1. -config flag
2. $TELEGRAF_CONFIG_PATH environment variable
3. $HOME/.telegraf/telegraf.conf
4. /etc/telegraf/telegraf.conf
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--quiet run in quiet mode
Examples:
# generate a telegraf config file:
telegraf -sample-config > telegraf.conf
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf -sample-config -input-filter cpu -output-filter influxdb
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf -config telegraf.conf -test
telegraf --config telegraf.conf -test
# run telegraf with all plugins defined in config file
telegraf -config telegraf.conf
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
`
var logger service.Logger
var stop chan struct{}
var srvc service.Service
var svcConfig *service.Config
type program struct{}
@@ -119,7 +124,6 @@ func reloadLoop(stop chan struct{}, s service.Service) {
reload <- true
for <-reload {
reload <- false
flag.Usage = func() { usageExit(0) }
flag.Parse()
args := flag.Args()
@@ -133,15 +137,29 @@ func reloadLoop(stop chan struct{}, s service.Service) {
outputFilter := strings.TrimSpace(*fOutputFilters)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
var aggregatorFilters []string
if *fAggregatorFilters != "" {
aggregatorFilter := strings.TrimSpace(*fAggregatorFilters)
aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":")
}
var processorFilters []string
if *fProcessorFilters != "" {
processorFilter := strings.TrimSpace(*fProcessorFilters)
processorFilters = strings.Split(":"+processorFilter+":", ":")
}
if len(args) > 0 {
switch args[0] {
case "version":
v := fmt.Sprintf("Telegraf - version %s", version)
fmt.Println(v)
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
return
case "config":
config.PrintSampleConfig(inputFilters, outputFilters)
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
}
}
@@ -161,28 +179,23 @@ func reloadLoop(stop chan struct{}, s service.Service) {
}
return
case *fVersion:
v := fmt.Sprintf("Telegraf - version %s", version)
fmt.Println(v)
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
return
case *fSampleConfig:
config.PrintSampleConfig(inputFilters, outputFilters)
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
case *fUsage != "":
if err := config.PrintInputConfig(*fUsage); err != nil {
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
log.Fatalf("%s and %s", err, err2)
log.Fatalf("E! %s and %s", err, err2)
}
}
return
case *fService != "" && runtime.GOOS == "windows":
if *fConfig != "" {
(*svcConfig).Arguments = []string{"-config", *fConfig}
}
err := service.Control(s, *fService)
if err != nil {
log.Fatal(err)
}
return
}
// If no other options are specified, load the config file and run.
@@ -191,47 +204,45 @@ func reloadLoop(stop chan struct{}, s service.Service) {
c.InputFilters = inputFilters
err := c.LoadConfig(*fConfig)
if err != nil {
fmt.Println(err)
os.Exit(1)
log.Fatal("E! " + err.Error())
}
if *fConfigDirectory != "" {
err = c.LoadDirectory(*fConfigDirectory)
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
}
if len(c.Outputs) == 0 {
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
}
if len(c.Inputs) == 0 {
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
}
ag, err := agent.NewAgent(c)
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
if *fDebug {
ag.Config.Agent.Debug = true
}
if *fQuiet {
ag.Config.Agent.Quiet = true
}
// Setup logging
logger.SetupLogging(
ag.Config.Agent.Debug || *fDebug,
ag.Config.Agent.Quiet || *fQuiet,
ag.Config.Agent.Logfile,
)
if *fTest {
err = ag.Test()
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
return
}
err = ag.Connect()
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
shutdown := make(chan struct{})
@@ -244,7 +255,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
close(shutdown)
}
if sig == syscall.SIGHUP {
log.Printf("Reloading Telegraf config\n")
log.Printf("I! Reloading Telegraf config\n")
<-reload
reload <- true
close(shutdown)
@@ -254,15 +265,15 @@ func reloadLoop(stop chan struct{}, s service.Service) {
}
}()
log.Printf("Starting Telegraf (version %s)\n", version)
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("Tags enabled: %s", c.ListTags())
log.Printf("I! Starting Telegraf (version %s)\n", version)
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("I! Tags enabled: %s", c.ListTags())
if *fPidfile != "" {
f, err := os.Create(*fPidfile)
if err != nil {
log.Fatalf("Unable to create pidfile: %s", err)
log.Fatalf("E! Unable to create pidfile: %s", err)
}
fmt.Fprintf(f, "%d\n", os.Getpid())
@@ -294,8 +305,10 @@ func (p *program) Stop(s service.Service) error {
}
func main() {
flag.Usage = func() { usageExit(0) }
flag.Parse()
if runtime.GOOS == "windows" {
svcConfig = &service.Config{
svcConfig := &service.Config{
Name: "telegraf",
DisplayName: "Telegraf Data Collector Service",
Description: "Collects data using a series of plugins and publishes it to" +
@@ -306,15 +319,23 @@ func main() {
prg := &program{}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal(err)
log.Fatal("E! " + err.Error())
}
logger, err = s.Logger(nil)
if err != nil {
log.Fatal(err)
}
err = s.Run()
if err != nil {
logger.Error(err)
// Handle the -service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if *fConfig != "" {
(*svcConfig).Arguments = []string{"-config", *fConfig}
}
err := service.Control(s, *fService)
if err != nil {
log.Fatal("E! " + err.Error())
}
} else {
err = s.Run()
if err != nil {
log.Println("E! " + err.Error())
}
}
} else {
stop = make(chan struct{})

View File

@@ -1,38 +1,38 @@
# Telegraf Configuration
## Generating a Configuration File
A default Telegraf config file can be generated using the -sample-config flag:
```
telegraf -sample-config > telegraf.conf
```
To generate a file with specific inputs and outputs, you can use the
-input-filter and -output-filter flags:
```
telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka
```
You can see the latest config file with all available plugins here:
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
## Generating a Configuration File
A default Telegraf config file can be auto-generated by telegraf:
```
telegraf config > telegraf.conf
```
To generate a file with specific inputs and outputs, you can use the
--input-filter and --output-filter flags:
```
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
```
## Environment Variables
Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
## `[global_tags]` Configuration
# Global Tags
Global tags can be specified in the `[global_tags]` section of the config file
in key="value" format. All metrics being gathered on this host will be tagged
with the tags specified here.
## `[agent]` Configuration
## Agent Configuration
Telegraf has a few options you can configure under the `agent` section of the
Telegraf has a few options you can configure under the `[agent]` section of the
config.
* **interval**: Default data collection interval for all inputs
@@ -56,13 +56,63 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
This is primarily to avoid
large write spikes for users running a large number of telegraf instances.
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
* **precision**: By default, precision will be set to the same timestamp order
as the collection interval, with the maximum being 1s. Precision will NOT
be used for service inputs, such as logparser and statsd. Valid values are
"ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stdout.
* **debug**: Run telegraf in debug mode.
* **quiet**: Run telegraf in quiet mode.
* **quiet**: Run telegraf in quiet mode (error messages only).
* **hostname**: Override default hostname, if empty use os.Hostname().
* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent.
## Input Configuration
The following config parameters are available for all inputs:
* **interval**: How often to gather this metric. Normal plugins use a single
global interval, but if one particular input should be run less or more often,
you can configure that here.
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
## Output Configuration
There are no generic configuration options available for all outputs.
## Aggregator Configuration
The following config parameters are available for all aggregators:
* **period**: The period on which to flush & clear each aggregator. All metrics
that are sent with timestamps outside of this period will be ignored by the
aggregator.
* **delay**: The delay before each aggregator is flushed. This is to control
how long for aggregators to wait before receiving metrics from input plugins,
in the case that aggregators are flushing and inputs are gathering on the
same interval.
* **drop_original**: If true, the original metric will be dropped by the
aggregator and will not get sent to the output plugins.
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
## Processor Configuration
The following config parameters are available for all processors:
* **order**: This is the order in which the processor(s) get executed. If this
is not specified then processor execution order will be random.
#### Measurement Filtering
Filters can be configured per input or output, see below for examples.
Filters can be configured per input, output, processor, or aggregator,
see below for examples.
* **namepass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against
@@ -86,18 +136,9 @@ as it is more efficient to filter out tags at the ingestion point.
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
the tag keys in the final measurement.
## Input Configuration
Some configuration options are configurable per input:
* **name_override**: Override the base name of the measurement.
(Default is the name of the input).
* **name_prefix**: Specifies a prefix to attach to the measurement name.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
* **interval**: How often to gather this metric. Normal plugins use a single
global interval, but if one particular input should be run less or more often,
you can configure that here.
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
#### Input Configuration Examples
@@ -129,6 +170,10 @@ fields which begin with `time_`.
#### Input Config: tagpass and tagdrop
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
```toml
[[inputs.cpu]]
percpu = true
@@ -246,11 +291,7 @@ to avoid measurement collisions:
fielddrop = ["cpu_time*"]
```
## Output Configuration
Telegraf also supports specifying multiple output sinks to send data to,
configuring each output sink is different, but examples can be
found by running `telegraf -sample-config`.
#### Output Configuration Examples:
```toml
[[outputs.influxdb]]
@@ -275,3 +316,39 @@ found by running `telegraf -sample-config`.
[outputs.influxdb.tagpass]
cpu = ["cpu0"]
```
#### Aggregator Configuration Examples:
This will collect and emit the min/max of the system load1 metric every
30s, dropping the originals.
```toml
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
[[outputs.file]]
files = ["stdout"]
```
This will collect and emit the min/max of the swap metrics every
30s, dropping the originals. The aggregator will not be applied
to the system load metrics due to the `namepass` parameter.
```toml
[[inputs.swap]]
[[inputs.system]]
fieldpass = ["load1"] # collects system load1 metric.
[[aggregators.minmax]]
period = "30s" # send & clear the aggregate every 30s.
drop_original = true # drop the original metrics.
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
[[outputs.file]]
files = ["stdout"]
```

View File

@@ -232,6 +232,16 @@ us.west.cpu.load 100
=> cpu.load,region=us.west value=100
```
Multiple templates can also be specified, but these should be differentiated
using _filters_ (see below for more details)
```toml
templates = [
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
]
```
#### Field Templates:
The field keyword tells Telegraf to give the metric that field name.

View File

@@ -6,19 +6,18 @@ the general steps to set it up.
1. Obtain the telegraf windows distribution
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
location simply specify the `-config` parameter with the desired location)
3. Place the telegraf.exe and the config file into `C:\Program Files\Telegraf`
4. To install the service into the Windows Service Manager, run (as an
administrator):
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
```
> C:\Program Files\Telegraf\telegraf.exe --service install
> C:\"Program Files"\Telegraf\telegraf.exe --service install
```
5. Edit the configuration file to meet your needs
6. To check that it works, run:
```
> C:\Program Files\Telegraf\telegraf.exe --config C:\Program Files\Telegraf\telegraf.conf --test
> C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
```
7. To start collecting data, run:

View File

@@ -30,12 +30,15 @@
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at
## most metric_batch_size metrics.
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
@@ -57,10 +60,15 @@
## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Run telegraf in debug mode
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
@@ -152,7 +160,7 @@
# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
# ## Amazon REGION
# region = 'us-east-1'
# region = "us-east-1"
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
@@ -170,7 +178,7 @@
# #shared_credential_file = ""
#
# ## Namespace for the CloudWatch MetricDatums
# namespace = 'InfluxData/Telegraf'
# namespace = "InfluxData/Telegraf"
# # Configuration for DataDog API to send metrics to.
@@ -357,6 +365,30 @@
# data_format = "influx"
# # Send telegraf measurements to NATS
# [[outputs.nats]]
# ## URLs of NATS servers
# servers = ["nats://localhost:4222"]
# ## Optional credentials
# # username = ""
# # password = ""
# ## NATS subject for producer messages
# subject = "telegraf"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Send telegraf measurements to NSQD
# [[outputs.nsq]]
# ## Location of nsqd instance listening on TCP
@@ -376,13 +408,18 @@
# ## prefix for metrics keys
# prefix = "my.specific.prefix."
#
# ## Telnet Mode ##
# ## DNS name of the OpenTSDB server in telnet mode
# ## DNS name of the OpenTSDB server
# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
# ## telnet API. "http://opentsdb.example.com" will use the Http API.
# host = "opentsdb.example.com"
#
# ## Port of the OpenTSDB server in telnet mode
# ## Port of the OpenTSDB server
# port = 4242
#
# ## Number of data points to send to OpenTSDB in Http requests.
# ## Not used with telnet API.
# httpBatchSize = 50
#
# ## Debug true - Prints OpenTSDB communication
# debug = false
@@ -404,6 +441,30 @@
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
# # Print all metrics that pass through this filter.
# [[processors.printer]]
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
###############################################################################
# INPUT PLUGINS #
###############################################################################
@@ -414,8 +475,8 @@
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## Comment this line if you want the raw CPU time metrics
fielddrop = ["time_*"]
## If true, collect raw CPU time metrics.
collect_cpu_time = false
# Read metrics about disk usage by mount point
@@ -511,6 +572,10 @@
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]]
# ## This is the recommended interval to poll. Too frequent and you will lose
# ## data points due to timeouts during rebalancing and recovery
# interval = '1m'
#
# ## All configuration values are optional, defaults are shown below
#
# ## location of ceph binary
@@ -525,25 +590,40 @@
#
# ## suffix used to identify socket files
# socket_suffix = "asok"
#
# ## Ceph user to authenticate as
# ceph_user = "client.admin"
#
# ## Ceph configuration to use to locate the cluster
# ceph_config = "/etc/ceph/ceph.conf"
#
# ## Whether to gather statistics via the admin socket
# gather_admin_socket_stats = true
#
# ## Whether to gather statistics via ceph commands
# gather_cluster_stats = true
# # Read specific statistics per cgroup
# [[inputs.cgroup]]
# ## Directories in which to look for files, globs are supported.
# # paths = [
# # "/cgroup/memory",
# # "/cgroup/memory/child1",
# # "/cgroup/memory/child2/*",
# # ]
# ## cgroup stat fields, as file names, globs are supported.
# ## these file names are appended to each path from above.
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
# ## Directories in which to look for files, globs are supported.
# ## Consider restricting paths to the set of cgroups you really
# ## want to monitor if you have a large number of cgroups, to avoid
# ## any cardinality issues.
# # paths = [
# # "/cgroup/memory",
# # "/cgroup/memory/child1",
# # "/cgroup/memory/child2/*",
# # ]
# ## cgroup stat fields, as file names, globs are supported.
# ## these file names are appended to each path from above.
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
# # Pull Metric Statistics from Amazon CloudWatch
# [[inputs.cloudwatch]]
# ## Amazon Region
# region = 'us-east-1'
# region = "us-east-1"
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
@@ -561,32 +641,37 @@
# #shared_credential_file = ""
#
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
# period = '1m'
# period = "5m"
#
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
# delay = '1m'
# delay = "5m"
#
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
# ## gaps or overlap in pulled data
# interval = '1m'
# interval = "5m"
#
# ## Configure the TTL for the internal cache of metrics.
# ## Defaults to 1 hr if not specified
# #cache_ttl = '10m'
# #cache_ttl = "10m"
#
# ## Metric Statistic Namespace (required)
# namespace = 'AWS/ELB'
# namespace = "AWS/ELB"
#
# ## Maximum requests per second. Note that the global default AWS rate limit is
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
# ## maximum of 10. Optional - default value is 10.
# ratelimit = 10
#
# ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h
# #[[inputs.cloudwatch.metrics]]
# # names = ['Latency', 'RequestCount']
# # names = ["Latency", "RequestCount"]
# #
# # ## Dimension filters for Metric (optional)
# # [[inputs.cloudwatch.metrics.dimensions]]
# # name = 'LoadBalancerName'
# # value = 'p-example'
# # name = "LoadBalancerName"
# # value = "p-example"
# # Gather health check statuses from services registered in Consul
@@ -694,6 +779,9 @@
# ## specify a list of one or more Elasticsearch servers
# servers = ["http://localhost:9200"]
#
# ## Timeout for HTTP requests to the elastic search server(s)
# http_timeout = "5s"
#
# ## set local to false when you want to read the indices stats from all nodes
# ## within the cluster
# local = true
@@ -789,12 +877,15 @@
# ## An array of address to gather stats about. Specify an ip on hostname
# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
# ## Make sure you specify the complete path to the stats endpoint
# ## ie 10.10.3.33:1936/haproxy?stats
# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
# #
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
# ## Or you can also use local socket
# ## servers = ["socket:/run/haproxy/admin.sock"]
# ##
# ## You can also use local socket with standard wildcard globbing.
# ## Server address not starting with 'http' will be treated as a possible
# ## socket, so both examples below are valid.
# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
# # HTTP/HTTPS request given an address a method and a timeout
@@ -836,6 +927,8 @@
# "http://localhost:9999/stats/",
# "http://localhost:9998/stats/",
# ]
# ## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
#
# ## HTTP method to use: GET or POST (case-sensitive)
# method = "GET"
@@ -875,6 +968,9 @@
# urls = [
# "http://localhost:8086/debug/vars"
# ]
#
# ## http request & header timeout
# timeout = "5s"
# # Read metrics from one or many bare metal servers
@@ -890,6 +986,7 @@
# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
# ## This is the context root used to compose the jolokia url
# ## NOTE that your jolokia security policy must allow for POST requests.
# context = "/jolokia"
#
# ## This specifies the mode used
@@ -933,6 +1030,22 @@
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
# # Read metrics from the kubernetes kubelet api
# [[inputs.kubernetes]]
# ## URL for the kubelet
# url = "http://1.1.1.1:10255"
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
# # ssl_key = /path/to/keyfile
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]]
# ## An array of URI to gather stats about LeoFS.
@@ -1005,8 +1118,6 @@
# # "tasks",
# # "messages",
# # ]
# ## Include mesos tasks statistics, default is false
# # slave_tasks = true
# # Read metrics from one or many MongoDB servers
@@ -1054,13 +1165,13 @@
# ## gather metrics from SHOW BINARY LOGS command output
# gather_binary_logs = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
# gather_table_io_waits = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
# gather_table_lock_waits = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
# gather_index_io_waits = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
@@ -1150,23 +1261,6 @@
# command = "passenger-status -v --show=xml"
# # Read metrics from one or many pgbouncer servers
# [[inputs.pgbouncer]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost:port[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest port=6432 password=... sslmode=... dbname=pgbouncer
# ##
# ## All connection parameters are optional, except for dbname,
# ## you need to set it always as pgbouncer.
# address = "host=localhost user=postgres port=6432 sslmode=disable dbname=pgbouncer"
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# # databases = ["app_production", "testing"]
# # Read metrics of phpfpm, via HTTP status page or socket
# [[inputs.phpfpm]]
# ## An array of addresses to gather stats about. Specify an ip or hostname
@@ -1199,13 +1293,13 @@
# ## urls to ping
# urls = ["www.google.com"] # required
# ## number of pings to send per collection (ping -c <COUNT>)
# count = 1 # required
# # count = 1
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
# ping_interval = 0.0
# # ping_interval = 1.0
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
# timeout = 1.0
# # timeout = 1.0
# ## interface to send ping from (ping -I <INTERFACE>)
# interface = ""
# # interface = ""
# # Read metrics from one or many postgresql servers
@@ -1225,8 +1319,12 @@
# ##
# address = "host=localhost user=postgres sslmode=disable"
#
# ## A list of databases to explicitly ignore. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'databases' option.
# # ignored_databases = ["postgres", "template0", "template1"]
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# ## databases are gathered. Do NOT use with the 'ignore_databases' option.
# # databases = ["app_production", "testing"]
@@ -1393,6 +1491,65 @@
# servers = ["http://localhost:8098"]
# # Retrieves SNMP values from remote agents
# [[inputs.snmp]]
# agents = [ "127.0.0.1:161" ]
# ## Timeout for each SNMP query.
# timeout = "5s"
# ## Number of retries to attempt within timeout.
# retries = 3
# ## SNMP version, values can be 1, 2, or 3
# version = 2
#
# ## SNMP community string.
# community = "public"
#
# ## The GETBULK max-repetitions parameter
# max_repetitions = 10
#
# ## SNMPv3 auth parameters
# #sec_name = "myuser"
# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
# #auth_password = "pass"
# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
# #context_name = ""
# #priv_protocol = "" # Values: "DES", "AES", ""
# #priv_password = ""
#
# ## measurement name
# name = "system"
# [[inputs.snmp.field]]
# name = "hostname"
# oid = ".1.0.0.1.1"
# [[inputs.snmp.field]]
# name = "uptime"
# oid = ".1.0.0.1.2"
# [[inputs.snmp.field]]
# name = "load"
# oid = ".1.0.0.1.3"
# [[inputs.snmp.field]]
# oid = "HOST-RESOURCES-MIB::hrMemorySize"
#
# [[inputs.snmp.table]]
# ## measurement name
# name = "remote_servers"
# inherit_tags = [ "hostname" ]
# [[inputs.snmp.table.field]]
# name = "server"
# oid = ".1.0.0.0.1.0"
# is_tag = true
# [[inputs.snmp.table.field]]
# name = "connections"
# oid = ".1.0.0.0.1.1"
# [[inputs.snmp.table.field]]
# name = "latency"
# oid = ".1.0.0.0.1.2"
#
# [[inputs.snmp.table]]
# ## auto populate table's fields using the MIB
# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
# [[inputs.snmp_legacy]]
# ## Use 'oids.txt' file to translate oids to names
@@ -1565,6 +1722,25 @@
# SERVICE INPUT PLUGINS #
###############################################################################
# # Influx HTTP write listener
# [[inputs.http_listener]]
# ## Address and port to host HTTP listener on
# service_address = ":8186"
#
# ## maximum duration before timing out read of the request
# read_timeout = "10s"
# ## maximum duration before timing out write of the response
# write_timeout = "10s"
#
# ## Maximum allowed http request body size in bytes.
# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
# max_body_size = 0
#
# ## Maximum line size allowed to be sent in bytes.
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]]
# ## topic(s) to consume
@@ -1657,13 +1833,18 @@
# # Read metrics from NATS subject(s)
# [[inputs.nats_consumer]]
# ## urls of NATS servers
# servers = ["nats://localhost:4222"]
# # servers = ["nats://localhost:4222"]
# ## Use Transport Layer Security
# secure = false
# # secure = false
# ## subject(s) to consume
# subjects = ["telegraf"]
# # subjects = ["telegraf"]
# ## name a queue group
# queue_group = "telegraf_consumers"
# # queue_group = "telegraf_consumers"
#
# ## Sets the limits for pending msgs and bytes for each subscription
# ## These shouldn't need to be adjusted except in very high throughput scenarios
# # pending_message_limit = 65536
# # pending_bytes_limit = 67108864
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
@@ -1750,14 +1931,14 @@
# # Generic TCP listener
# [[inputs.tcp_listener]]
# ## Address and port to host TCP listener on
# service_address = ":8094"
# # service_address = ":8094"
#
# ## Number of TCP messages allowed to queue up. Once filled, the
# ## TCP listener will start dropping packets.
# allowed_pending_messages = 10000
# # allowed_pending_messages = 10000
#
# ## Maximum number of concurrent TCP connections to allow
# max_tcp_connections = 250
# # max_tcp_connections = 250
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
@@ -1769,11 +1950,11 @@
# # Generic UDP listener
# [[inputs.udp_listener]]
# ## Address and port to host UDP listener on
# service_address = ":8092"
# # service_address = ":8092"
#
# ## Number of UDP messages allowed to queue up. Once filled, the
# ## UDP listener will start dropping packets.
# allowed_pending_messages = 10000
# # allowed_pending_messages = 10000
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
@@ -1787,6 +1968,9 @@
# ## Address and port to host Webhook listener on
# service_address = ":1619"
#
# [inputs.webhooks.filestack]
# path = "/filestack"
#
# [inputs.webhooks.github]
# path = "/github"
#
@@ -1795,4 +1979,3 @@
#
# [inputs.webhooks.rollbar]
# path = "/rollbar"

View File

@@ -42,10 +42,14 @@
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "/Program Files/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
@@ -85,7 +89,7 @@
# Windows Performance Counters plugin.
# These are the recommended method of monitoring system metrics on windows,
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
# which utilizes a lot of system resources.
# which utilize more system resources.
#
# See more configuration examples at:
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
@@ -95,70 +99,104 @@
# Processor usage, alternative to native, reports on a per core.
ObjectName = "Processor"
Instances = ["*"]
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
Counters = [
"% Idle Time",
"% Interrupt Time",
"% Privileged Time",
"% User Time",
"% Processor Time",
]
Measurement = "win_cpu"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
# Disk times and queues
ObjectName = "LogicalDisk"
Instances = ["*"]
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
Counters = [
"% Idle Time",
"% Disk Time","% Disk Read Time",
"% Disk Write Time",
"% User Time",
"Current Disk Queue Length",
]
Measurement = "win_disk"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
ObjectName = "System"
Counters = ["Context Switches/sec","System Calls/sec"]
Counters = [
"Context Switches/sec",
"System Calls/sec",
]
Instances = ["------"]
Measurement = "win_system"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
# Example query where the Instance portion must be removed to get data back,
# such as from the Memory object.
ObjectName = "Memory"
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
Counters = [
"Available Bytes",
"Cache Faults/sec",
"Demand Zero Faults/sec",
"Page Faults/sec",
"Pages/sec",
"Transition Faults/sec",
"Pool Nonpaged Bytes",
"Pool Paged Bytes",
]
# Use 6 x - to remove the Instance bit from the query.
Instances = ["------"]
Measurement = "win_mem"
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
# Windows system plugins using WMI (disabled by default, using
# win_perf_counters over WMI is recommended)
# Read metrics about cpu usage
#[[inputs.cpu]]
## Whether to report per-cpu stats or not
#percpu = true
## Whether to report total system cpu stats or not
#totalcpu = true
## Comment this line if you want the raw CPU time metrics
#fielddrop = ["time_*"]
# # Read metrics about cpu usage
# [[inputs.cpu]]
# ## Whether to report per-cpu stats or not
# percpu = true
# ## Whether to report total system cpu stats or not
# totalcpu = true
# ## Comment this line if you want the raw CPU time metrics
# fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
#[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
## mount_points=["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
#ignore_fs = ["tmpfs", "devtmpfs"]
# # Read metrics about disk usage by mount point
# [[inputs.disk]]
# ## By default, telegraf gather stats for all mountpoints.
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
# ## mount_points=["/"]
#
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
# ## present on /run, /var/run, /dev/shm or /dev).
# # ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
#[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
## devices = ["sda", "sdb"]
## Uncomment the following line if you do not need disk serial numbers.
## skip_serial_number = true
# Read metrics about memory usage
#[[inputs.mem]]
# no configuration
# # Read metrics about disk IO by device
# [[inputs.diskio]]
# ## By default, telegraf will gather stats for all devices including
# ## disk partitions.
# ## Setting devices will restrict the stats to the specified devices.
# ## devices = ["sda", "sdb"]
# ## Uncomment the following line if you do not need disk serial numbers.
# ## skip_serial_number = true
# Read metrics about swap memory usage
#[[inputs.swap]]
# no configuration
# # Read metrics about memory usage
# [[inputs.mem]]
# # no configuration
# # Read metrics about swap memory usage
# [[inputs.swap]]
# # no configuration

View File

@@ -10,16 +10,16 @@ type Filter interface {
Match(string) bool
}
// CompileFilter takes a list of string filters and returns a Filter interface
// Compile takes a list of string filters and returns a Filter interface
// for matching a given string against the filter list. The filter list
// supports glob matching too, ie:
//
// f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
// f, _ := Compile([]string{"cpu", "mem", "net*"})
// f.Match("cpu") // true
// f.Match("network") // true
// f.Match("memory") // false
//
func CompileFilter(filters []string) (Filter, error) {
func Compile(filters []string) (Filter, error) {
// return if there is nothing to compile
if len(filters) == 0 {
return nil, nil

View File

@@ -6,30 +6,30 @@ import (
"github.com/stretchr/testify/assert"
)
func TestCompileFilter(t *testing.T) {
f, err := CompileFilter([]string{})
func TestCompile(t *testing.T) {
f, err := Compile([]string{})
assert.NoError(t, err)
assert.Nil(t, f)
f, err = CompileFilter([]string{"cpu"})
f, err = Compile([]string{"cpu"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0"))
assert.False(t, f.Match("mem"))
f, err = CompileFilter([]string{"cpu*"})
f, err = Compile([]string{"cpu*"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.True(t, f.Match("cpu0"))
assert.False(t, f.Match("mem"))
f, err = CompileFilter([]string{"cpu", "mem"})
f, err = Compile([]string{"cpu", "mem"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0"))
assert.True(t, f.Match("mem"))
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
f, err = Compile([]string{"cpu", "mem", "net*"})
assert.NoError(t, err)
assert.True(t, f.Match("cpu"))
assert.False(t, f.Match("cpu0"))
@@ -40,7 +40,7 @@ func TestCompileFilter(t *testing.T) {
var benchbool bool
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
f, _ := CompileFilter([]string{"cpu"})
f, _ := Compile([]string{"cpu"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("network")
@@ -49,7 +49,7 @@ func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
}
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
f, _ := CompileFilter([]string{"cpu"})
f, _ := Compile([]string{"cpu"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("cpu")
@@ -58,7 +58,7 @@ func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
}
func BenchmarkFilter(b *testing.B) {
f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
f, _ := Compile([]string{"cpu", "mem", "net*"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("network")
@@ -67,7 +67,7 @@ func BenchmarkFilter(b *testing.B) {
}
func BenchmarkFilterNoGlob(b *testing.B) {
f, _ := CompileFilter([]string{"cpu", "mem", "net"})
f, _ := Compile([]string{"cpu", "mem", "net"})
var tmp bool
for n := 0; n < b.N; n++ {
tmp = f.Match("net")
@@ -76,7 +76,7 @@ func BenchmarkFilterNoGlob(b *testing.B) {
}
func BenchmarkFilter2(b *testing.B) {
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
var tmp bool
for n := 0; n < b.N; n++ {
@@ -86,7 +86,7 @@ func BenchmarkFilter2(b *testing.B) {
}
func BenchmarkFilter2NoGlob(b *testing.B) {
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
var tmp bool
for n := 0; n < b.N; n++ {

View File

@@ -1,6 +1,8 @@
package buffer
import (
"sync"
"github.com/influxdata/telegraf"
)
@@ -11,6 +13,8 @@ type Buffer struct {
drops int
// total metrics added
total int
mu sync.Mutex
}
// NewBuffer returns a Buffer
@@ -61,11 +65,13 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
// the batch will be of maximum length batchSize. It can be less than batchSize,
// if the length of Buffer is less than batchSize.
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
b.mu.Lock()
n := min(len(b.buf), batchSize)
out := make([]telegraf.Metric, n)
for i := 0; i < n; i++ {
out[i] = <-b.buf
}
b.mu.Unlock()
return out
}

View File

@@ -11,15 +11,18 @@ import (
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/config"
@@ -47,9 +50,12 @@ type Config struct {
InputFilters []string
OutputFilters []string
Agent *AgentConfig
Inputs []*models.RunningInput
Outputs []*models.RunningOutput
Agent *AgentConfig
Inputs []*models.RunningInput
Outputs []*models.RunningOutput
Aggregators []*models.RunningAggregator
// Processors have a slice wrapper type because they need to be sorted
Processors models.RunningProcessors
}
func NewConfig() *Config {
@@ -64,6 +70,7 @@ func NewConfig() *Config {
Tags: make(map[string]string),
Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*models.RunningOutput, 0),
Processors: make([]*models.RunningProcessor, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
@@ -125,6 +132,9 @@ type AgentConfig struct {
// Debug is the option for running in debug mode
Debug bool
// Logfile specifies the file to send logs to
Logfile string
// Quiet is the option for running in quiet mode
Quiet bool
Hostname string
@@ -135,7 +145,7 @@ type AgentConfig struct {
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Name)
name = append(name, input.Name())
}
return name
}
@@ -195,12 +205,15 @@ var header = `# Telegraf Configuration
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at
## most metric_batch_size metrics.
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
@@ -222,10 +235,15 @@ var header = `# Telegraf Configuration
## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Run telegraf in debug mode
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
@@ -237,6 +255,20 @@ var header = `# Telegraf Configuration
###############################################################################
`
var processorHeader = `
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
`
var aggregatorHeader = `
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
`
var inputHeader = `
###############################################################################
@@ -252,9 +284,15 @@ var serviceInputHeader = `
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(inputFilters []string, outputFilters []string) {
func PrintSampleConfig(
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
fmt.Printf(header)
// print output plugins
if len(outputFilters) != 0 {
printFilteredOutputs(outputFilters, false)
} else {
@@ -270,6 +308,33 @@ func PrintSampleConfig(inputFilters []string, outputFilters []string) {
printFilteredOutputs(pnames, true)
}
// print processor plugins
fmt.Printf(processorHeader)
if len(processorFilters) != 0 {
printFilteredProcessors(processorFilters, false)
} else {
pnames := []string{}
for pname := range processors.Processors {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredProcessors(pnames, true)
}
// pring aggregator plugins
fmt.Printf(aggregatorHeader)
if len(aggregatorFilters) != 0 {
printFilteredAggregators(aggregatorFilters, false)
} else {
pnames := []string{}
for pname := range aggregators.Aggregators {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredAggregators(pnames, true)
}
// print input plugins
fmt.Printf(inputHeader)
if len(inputFilters) != 0 {
printFilteredInputs(inputFilters, false)
@@ -287,6 +352,42 @@ func PrintSampleConfig(inputFilters []string, outputFilters []string) {
}
}
func printFilteredProcessors(processorFilters []string, commented bool) {
// Filter processors
var pnames []string
for pname := range processors.Processors {
if sliceContains(pname, processorFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Outputs
for _, pname := range pnames {
creator := processors.Processors[pname]
output := creator()
printConfig(pname, output, "processors", commented)
}
}
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
// Filter outputs
var anames []string
for aname := range aggregators.Aggregators {
if sliceContains(aname, aggregatorFilters) {
anames = append(anames, aname)
}
}
sort.Strings(anames)
// Print Outputs
for _, aname := range anames {
creator := aggregators.Aggregators[aname]
output := creator()
printConfig(aname, output, "aggregators", commented)
}
}
func printFilteredInputs(inputFilters []string, commented bool) {
// Filter inputs
var pnames []string
@@ -404,24 +505,21 @@ func PrintOutputConfig(name string) error {
}
func (c *Config) LoadDirectory(path string) error {
directoryEntries, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, entry := range directoryEntries {
if entry.IsDir() {
continue
walkfn := func(thispath string, info os.FileInfo, _ error) error {
if info.IsDir() {
return nil
}
name := entry.Name()
name := info.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" {
continue
return nil
}
err := c.LoadConfig(filepath.Join(path, name))
err := c.LoadConfig(thispath)
if err != nil {
return err
}
return nil
}
return nil
return filepath.Walk(path, walkfn)
}
// Try to find a default config file at these locations (in order):
@@ -438,7 +536,7 @@ func getDefaultConfigPath() (string, error) {
}
for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil {
log.Printf("Using config file: %s", path)
log.Printf("I! Using config file: %s", path)
return path, nil
}
}
@@ -469,7 +567,7 @@ func (c *Config) LoadConfig(path string) error {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("Could not parse [global_tags] config\n")
log.Printf("E! Could not parse [global_tags] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
@@ -482,7 +580,7 @@ func (c *Config) LoadConfig(path string) error {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("Could not parse [agent] config\n")
log.Printf("E! Could not parse [agent] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
@@ -499,6 +597,7 @@ func (c *Config) LoadConfig(path string) error {
case "outputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [outputs.influxdb] support
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
@@ -517,6 +616,7 @@ func (c *Config) LoadConfig(path string) error {
case "inputs", "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [inputs.cpu] support
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
@@ -532,6 +632,34 @@ func (c *Config) LoadConfig(path string) error {
pluginName, path)
}
}
case "processors":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addProcessor(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "aggregators":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addAggregator(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
@@ -540,6 +668,10 @@ func (c *Config) LoadConfig(path string) error {
}
}
}
if len(c.Processors) > 1 {
sort.Sort(c.Processors)
}
return nil
}
@@ -572,6 +704,52 @@ func parseFile(fpath string) (*ast.Table, error) {
return toml.Parse(contents)
}
func (c *Config) addAggregator(name string, table *ast.Table) error {
creator, ok := aggregators.Aggregators[name]
if !ok {
return fmt.Errorf("Undefined but requested aggregator: %s", name)
}
aggregator := creator()
conf, err := buildAggregator(name, table)
if err != nil {
return err
}
if err := config.UnmarshalTable(table, aggregator); err != nil {
return err
}
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
return nil
}
func (c *Config) addProcessor(name string, table *ast.Table) error {
creator, ok := processors.Processors[name]
if !ok {
return fmt.Errorf("Undefined but requested processor: %s", name)
}
processor := creator()
processorConfig, err := buildProcessor(name, table)
if err != nil {
return err
}
if err := config.UnmarshalTable(table, processor); err != nil {
return err
}
rf := &models.RunningProcessor{
Name: name,
Processor: processor,
Config: processorConfig,
}
c.Processors = append(c.Processors, rf)
return nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
@@ -644,7 +822,6 @@ func (c *Config) addInput(name string, table *ast.Table) error {
}
rp := &models.RunningInput{
Name: name,
Input: input,
Config: pluginConfig,
}
@@ -652,6 +829,144 @@ func (c *Config) addInput(name string, table *ast.Table) error {
return nil
}
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
unsupportedFields := []string{"tagexclude", "taginclude"}
for _, field := range unsupportedFields {
if _, ok := tbl.Fields[field]; ok {
return nil, fmt.Errorf("%s is not supported for aggregator plugins (%s).",
field, name)
}
}
conf := &models.AggregatorConfig{
Name: name,
Delay: time.Millisecond * 100,
Period: time.Second * 30,
}
if node, ok := tbl.Fields["period"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Period = dur
}
}
}
if node, ok := tbl.Fields["delay"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Delay = dur
}
}
}
if node, ok := tbl.Fields["drop_original"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
conf.DropOriginal, err = strconv.ParseBool(b.Value)
if err != nil {
log.Printf("Error parsing boolean value for %s: %s\n", name, err)
}
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.NameOverride = str.Value
}
}
}
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
}
}
}
delete(tbl.Fields, "period")
delete(tbl.Fields, "delay")
delete(tbl.Fields, "drop_original")
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "tags")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
conf := &models.ProcessorConfig{Name: name}
unsupportedFields := []string{"tagexclude", "taginclude", "fielddrop", "fieldpass"}
for _, field := range unsupportedFields {
if _, ok := tbl.Fields[field]; ok {
return nil, fmt.Errorf("%s is not supported for processor plugins (%s).",
field, name)
}
}
if node, ok := tbl.Fields["order"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Integer); ok {
var err error
conf.Order, err = strconv.ParseInt(b.Value, 10, 64)
if err != nil {
log.Printf("Error parsing int value for %s: %s\n", name, err)
}
}
}
}
delete(tbl.Fields, "order")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the models.OutputConfig/models.InputConfig
@@ -665,7 +980,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NamePass = append(f.NamePass, str.Value)
f.IsActive = true
}
}
}
@@ -678,7 +992,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NameDrop = append(f.NameDrop, str.Value)
f.IsActive = true
}
}
}
@@ -693,7 +1006,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldPass = append(f.FieldPass, str.Value)
f.IsActive = true
}
}
}
@@ -709,7 +1021,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldDrop = append(f.FieldDrop, str.Value)
f.IsActive = true
}
}
}
@@ -730,7 +1041,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
}
}
f.TagPass = append(f.TagPass, *tagfilter)
f.IsActive = true
}
}
}
@@ -749,7 +1059,6 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
}
}
f.TagDrop = append(f.TagDrop, *tagfilter)
f.IsActive = true
}
}
}
@@ -778,7 +1087,7 @@ func buildFilter(tbl *ast.Table) (models.Filter, error) {
}
}
}
if err := f.CompileFilter(); err != nil {
if err := f.Compile(); err != nil {
return f, err
}
@@ -841,7 +1150,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
log.Printf("E! Could not parse tags for input %s\n", name)
}
}
}

View File

@@ -43,9 +43,8 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
Filter: []string{"mytag"},
},
},
IsActive: true,
}
assert.NoError(t, filter.CompileFilter())
assert.NoError(t, filter.Compile())
mConfig := &models.InputConfig{
Name: "memcached",
Filter: filter,
@@ -83,9 +82,8 @@ func TestConfig_LoadSingleInput(t *testing.T) {
Filter: []string{"mytag"},
},
},
IsActive: true,
}
assert.NoError(t, filter.CompileFilter())
assert.NoError(t, filter.Compile())
mConfig := &models.InputConfig{
Name: "memcached",
Filter: filter,
@@ -130,9 +128,8 @@ func TestConfig_LoadDirectory(t *testing.T) {
Filter: []string{"mytag"},
},
},
IsActive: true,
}
assert.NoError(t, filter.CompileFilter())
assert.NoError(t, filter.Compile())
mConfig := &models.InputConfig{
Name: "memcached",
Filter: filter,

View File

@@ -12,21 +12,23 @@ import (
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
type GlobPath struct {
path string
hasMeta bool
g glob.Glob
root string
path string
hasMeta bool
hasSuperMeta bool
g glob.Glob
root string
}
func Compile(path string) (*GlobPath, error) {
out := GlobPath{
hasMeta: hasMeta(path),
path: path,
hasMeta: hasMeta(path),
hasSuperMeta: hasSuperMeta(path),
path: path,
}
// if there are no glob meta characters in the path, don't bother compiling
// a glob object or finding the root directory. (see short-circuit in Match)
if !out.hasMeta {
if !out.hasMeta || !out.hasSuperMeta {
return &out, nil
}
@@ -48,6 +50,17 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
}
return out
}
if !g.hasSuperMeta {
out := make(map[string]os.FileInfo)
files, _ := filepath.Glob(g.path)
for _, file := range files {
info, err := os.Stat(file)
if !os.IsNotExist(err) {
out[file] = info
}
}
return out
}
return walkFilePath(g.root, g.g)
}
@@ -96,3 +109,8 @@ func findRootDir(path string) string {
func hasMeta(path string) bool {
return strings.IndexAny(path, "*?[") >= 0
}
// hasSuperMeta reports whether path contains any super magic glob characters (**).
func hasSuperMeta(path string) bool {
return strings.Index(path, "**") >= 0
}

View File

@@ -35,12 +35,22 @@ type Duration struct {
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
var err error
// Parse string duration, ie, "1s"
d.Duration, err = time.ParseDuration(string(b[1 : len(b)-1]))
b = bytes.Trim(b, `'`)
// see if we can directly convert it
d.Duration, err = time.ParseDuration(string(b))
if err == nil {
return nil
}
// Parse string duration, ie, "1s"
if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
d.Duration, err = time.ParseDuration(uq)
if err == nil {
return nil
}
}
// First try parsing as integer seconds
sI, err := strconv.ParseInt(string(b), 10, 64)
if err == nil {
@@ -198,7 +208,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
return err
case <-timer.C:
if err := c.Process.Kill(); err != nil {
log.Printf("FATAL error killing process: %s", err)
log.Printf("E! FATAL error killing process: %s", err)
return err
}
// wait for the command to return after killing it

View File

@@ -118,7 +118,7 @@ func TestRandomSleep(t *testing.T) {
s = time.Now()
RandomSleep(time.Millisecond*50, make(chan struct{}))
elapsed = time.Since(s)
assert.True(t, elapsed < time.Millisecond*50)
assert.True(t, elapsed < time.Millisecond*100)
// test that shutdown is respected
s = time.Now()
@@ -131,3 +131,26 @@ func TestRandomSleep(t *testing.T) {
elapsed = time.Since(s)
assert.True(t, elapsed < time.Millisecond*150)
}
func TestDuration(t *testing.T) {
var d Duration
d.UnmarshalTOML([]byte(`"1s"`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`1s`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`'1s'`))
assert.Equal(t, time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`10`))
assert.Equal(t, 10*time.Second, d.Duration)
d = Duration{}
d.UnmarshalTOML([]byte(`1.5`))
assert.Equal(t, time.Second, d.Duration)
}

View File

@@ -3,7 +3,6 @@ package models
import (
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
)
@@ -34,47 +33,59 @@ type Filter struct {
TagInclude []string
tagInclude filter.Filter
IsActive bool
isActive bool
}
// Compile all Filter lists into filter.Filter objects.
func (f *Filter) CompileFilter() error {
func (f *Filter) Compile() error {
if len(f.NameDrop) == 0 &&
len(f.NamePass) == 0 &&
len(f.FieldDrop) == 0 &&
len(f.FieldPass) == 0 &&
len(f.TagInclude) == 0 &&
len(f.TagExclude) == 0 &&
len(f.TagPass) == 0 &&
len(f.TagDrop) == 0 {
return nil
}
f.isActive = true
var err error
f.nameDrop, err = filter.CompileFilter(f.NameDrop)
f.nameDrop, err = filter.Compile(f.NameDrop)
if err != nil {
return fmt.Errorf("Error compiling 'namedrop', %s", err)
}
f.namePass, err = filter.CompileFilter(f.NamePass)
f.namePass, err = filter.Compile(f.NamePass)
if err != nil {
return fmt.Errorf("Error compiling 'namepass', %s", err)
}
f.fieldDrop, err = filter.CompileFilter(f.FieldDrop)
f.fieldDrop, err = filter.Compile(f.FieldDrop)
if err != nil {
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
}
f.fieldPass, err = filter.CompileFilter(f.FieldPass)
f.fieldPass, err = filter.Compile(f.FieldPass)
if err != nil {
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
}
f.tagExclude, err = filter.CompileFilter(f.TagExclude)
f.tagExclude, err = filter.Compile(f.TagExclude)
if err != nil {
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
}
f.tagInclude, err = filter.CompileFilter(f.TagInclude)
f.tagInclude, err = filter.Compile(f.TagInclude)
if err != nil {
return fmt.Errorf("Error compiling 'taginclude', %s", err)
}
for i, _ := range f.TagDrop {
f.TagDrop[i].filter, err = filter.CompileFilter(f.TagDrop[i].Filter)
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
}
}
for i, _ := range f.TagPass {
f.TagPass[i].filter, err = filter.CompileFilter(f.TagPass[i].Filter)
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
if err != nil {
return fmt.Errorf("Error compiling 'tagpass', %s", err)
}
@@ -82,16 +93,52 @@ func (f *Filter) CompileFilter() error {
return nil
}
func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool {
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
// Apply applies the filter to the given measurement name, fields map, and
// tags map. It will return false if the metric should be "filtered out", and
// true if the metric should "pass".
// It will modify tags & fields in-place if they need to be deleted.
func (f *Filter) Apply(
measurement string,
fields map[string]interface{},
tags map[string]string,
) bool {
if !f.isActive {
return true
}
return false
// check if the measurement name should pass
if !f.shouldNamePass(measurement) {
return false
}
// check if the tags should pass
if !f.shouldTagsPass(tags) {
return false
}
// filter fields
for fieldkey, _ := range fields {
if !f.shouldFieldPass(fieldkey) {
delete(fields, fieldkey)
}
}
if len(fields) == 0 {
return false
}
// filter tags
f.filterTags(tags)
return true
}
// ShouldFieldsPass returns true if the metric should pass, false if should drop
func (f *Filter) IsActive() bool {
return f.isActive
}
// shouldNamePass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) ShouldNamePass(key string) bool {
func (f *Filter) shouldNamePass(key string) bool {
if f.namePass != nil {
if f.namePass.Match(key) {
return true
@@ -107,9 +154,9 @@ func (f *Filter) ShouldNamePass(key string) bool {
return true
}
// ShouldFieldsPass returns true if the metric should pass, false if should drop
// shouldFieldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) ShouldFieldsPass(key string) bool {
func (f *Filter) shouldFieldPass(key string) bool {
if f.fieldPass != nil {
if f.fieldPass.Match(key) {
return true
@@ -125,9 +172,9 @@ func (f *Filter) ShouldFieldsPass(key string) bool {
return true
}
// ShouldTagsPass returns true if the metric should pass, false if should drop
// shouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters
func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
if f.TagPass != nil {
for _, pat := range f.TagPass {
if pat.filter == nil {
@@ -161,7 +208,7 @@ func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
// Apply TagInclude and TagExclude filters.
// modifies the tags map in-place.
func (f *Filter) FilterTags(tags map[string]string) {
func (f *Filter) filterTags(tags map[string]string) {
if f.tagInclude != nil {
for k, _ := range tags {
if !f.tagInclude.Match(k) {

View File

@@ -3,12 +3,62 @@ package models
import (
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFilter_ApplyEmpty(t *testing.T) {
f := Filter{}
require.NoError(t, f.Compile())
assert.False(t, f.IsActive())
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
}
func TestFilter_ApplyTagsDontPass(t *testing.T) {
filters := []TagFilter{
TagFilter{
Name: "cpu",
Filter: []string{"cpu-*"},
},
}
f := Filter{
TagDrop: filters,
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
assert.False(t, f.Apply("m",
map[string]interface{}{"value": int64(1)},
map[string]string{"cpu": "cpu-total"}))
}
func TestFilter_ApplyDeleteFields(t *testing.T) {
f := Filter{
FieldDrop: []string{"value"},
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.True(t, f.Apply("m", fields, nil))
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
}
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
f := Filter{
FieldDrop: []string{"value*"},
}
require.NoError(t, f.Compile())
require.NoError(t, f.Compile())
assert.True(t, f.IsActive())
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
assert.False(t, f.Apply("m", fields, nil))
}
func TestFilter_Empty(t *testing.T) {
f := Filter{}
@@ -23,7 +73,7 @@ func TestFilter_Empty(t *testing.T) {
}
for _, measurement := range measurements {
if !f.ShouldFieldsPass(measurement) {
if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
@@ -33,7 +83,7 @@ func TestFilter_NamePass(t *testing.T) {
f := Filter{
NamePass: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
passes := []string{
"foo",
@@ -51,13 +101,13 @@ func TestFilter_NamePass(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldNamePass(measurement) {
if !f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldNamePass(measurement) {
if f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -67,7 +117,7 @@ func TestFilter_NameDrop(t *testing.T) {
f := Filter{
NameDrop: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
drops := []string{
"foo",
@@ -85,13 +135,13 @@ func TestFilter_NameDrop(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldNamePass(measurement) {
if !f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldNamePass(measurement) {
if f.shouldNamePass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -101,7 +151,7 @@ func TestFilter_FieldPass(t *testing.T) {
f := Filter{
FieldPass: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
passes := []string{
"foo",
@@ -119,13 +169,13 @@ func TestFilter_FieldPass(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) {
if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) {
if f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -135,7 +185,7 @@ func TestFilter_FieldDrop(t *testing.T) {
f := Filter{
FieldDrop: []string{"foo*", "cpu_usage_idle"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
drops := []string{
"foo",
@@ -153,13 +203,13 @@ func TestFilter_FieldDrop(t *testing.T) {
}
for _, measurement := range passes {
if !f.ShouldFieldsPass(measurement) {
if !f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to pass", measurement)
}
}
for _, measurement := range drops {
if f.ShouldFieldsPass(measurement) {
if f.shouldFieldPass(measurement) {
t.Errorf("Expected measurement %s to drop", measurement)
}
}
@@ -178,7 +228,7 @@ func TestFilter_TagPass(t *testing.T) {
f := Filter{
TagPass: filters,
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
passes := []map[string]string{
{"cpu": "cpu-total"},
@@ -197,13 +247,13 @@ func TestFilter_TagPass(t *testing.T) {
}
for _, tags := range passes {
if !f.ShouldTagsPass(tags) {
if !f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags)
}
}
for _, tags := range drops {
if f.ShouldTagsPass(tags) {
if f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags)
}
}
@@ -222,7 +272,7 @@ func TestFilter_TagDrop(t *testing.T) {
f := Filter{
TagDrop: filters,
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
drops := []map[string]string{
{"cpu": "cpu-total"},
@@ -241,30 +291,18 @@ func TestFilter_TagDrop(t *testing.T) {
}
for _, tags := range passes {
if !f.ShouldTagsPass(tags) {
if !f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to pass", tags)
}
}
for _, tags := range drops {
if f.ShouldTagsPass(tags) {
if f.shouldTagsPass(tags) {
t.Errorf("Expected tags %v to drop", tags)
}
}
}
func TestFilter_ShouldMetricsPass(t *testing.T) {
m := testutil.TestMetric(1, "testmetric")
f := Filter{
NameDrop: []string{"foobar"},
}
require.NoError(t, f.CompileFilter())
require.True(t, f.ShouldMetricPass(m))
m = testutil.TestMetric(1, "foobar")
require.False(t, f.ShouldMetricPass(m))
}
func TestFilter_FilterTagsNoMatches(t *testing.T) {
pretags := map[string]string{
"host": "localhost",
@@ -273,9 +311,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
f := Filter{
TagExclude: []string{"nomatch"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
f.FilterTags(pretags)
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"host": "localhost",
"mytag": "foobar",
@@ -284,9 +322,9 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) {
f = Filter{
TagInclude: []string{"nomatch"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
f.FilterTags(pretags)
f.filterTags(pretags)
assert.Equal(t, map[string]string{}, pretags)
}
@@ -298,9 +336,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
f := Filter{
TagExclude: []string{"ho*"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
f.FilterTags(pretags)
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"mytag": "foobar",
}, pretags)
@@ -312,9 +350,9 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
f = Filter{
TagInclude: []string{"my*"},
}
require.NoError(t, f.CompileFilter())
require.NoError(t, f.Compile())
f.FilterTags(pretags)
f.filterTags(pretags)
assert.Equal(t, map[string]string{
"mytag": "foobar",
}, pretags)

View File

@@ -0,0 +1,154 @@
package models
import (
"log"
"math"
"time"
"github.com/influxdata/telegraf"
)
// makemetric is used by both RunningAggregator & RunningInput
// to make metrics.
// nameOverride: override the name of the measurement being made.
// namePrefix: add this prefix to each measurement name.
// nameSuffix: add this suffix to each measurement name.
// pluginTags: these are tags that are specific to this plugin.
// daemonTags: these are daemon-wide global tags, and get applied after pluginTags.
// filter: this is a filter to apply to each metric being made.
// applyFilter: if false, the above filter is not applied to each metric.
// This is used by Aggregators, because aggregators use filters
// on incoming metrics instead of on created metrics.
// TODO refactor this to not have such a huge func signature.
func makemetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
nameOverride string,
namePrefix string,
nameSuffix string,
pluginTags map[string]string,
daemonTags map[string]string,
filter Filter,
applyFilter bool,
debug bool,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
if len(fields) == 0 || len(measurement) == 0 {
return nil
}
if tags == nil {
tags = make(map[string]string)
}
// Override measurement name if set
if len(nameOverride) != 0 {
measurement = nameOverride
}
// Apply measurement prefix and suffix if set
if len(namePrefix) != 0 {
measurement = namePrefix + measurement
}
if len(nameSuffix) != 0 {
measurement = measurement + nameSuffix
}
// Apply plugin-wide tags if set
for k, v := range pluginTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply daemon-wide tags if set
for k, v := range daemonTags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
}
// Apply the metric filter(s)
// for aggregators, the filter does not get applied when the metric is made.
// instead, the filter is applied to metric incoming into the plugin.
// ie, it gets applied in the RunningAggregator.Apply function.
if applyFilter {
if ok := filter.Apply(measurement, fields, tags); !ok {
return nil
}
}
for k, v := range fields {
// Validate uint64 and float64 fields
// convert all int & uint types to int64
switch val := v.(type) {
case nil:
// delete nil fields
delete(fields, k)
case uint:
fields[k] = int64(val)
continue
case uint8:
fields[k] = int64(val)
continue
case uint16:
fields[k] = int64(val)
continue
case uint32:
fields[k] = int64(val)
continue
case int:
fields[k] = int64(val)
continue
case int8:
fields[k] = int64(val)
continue
case int16:
fields[k] = int64(val)
continue
case int32:
fields[k] = int64(val)
continue
case uint64:
// InfluxDB does not support writing uint64
if val < uint64(9223372036854775808) {
fields[k] = int64(val)
} else {
fields[k] = int64(9223372036854775807)
}
continue
case float32:
fields[k] = float64(val)
continue
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
if debug {
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
}
delete(fields, k)
continue
}
default:
fields[k] = v
}
}
var m telegraf.Metric
var err error
switch mType {
case telegraf.Counter:
m, err = telegraf.NewCounterMetric(measurement, tags, fields, t)
case telegraf.Gauge:
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, t)
default:
m, err = telegraf.NewMetric(measurement, tags, fields, t)
}
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return nil
}
return m
}

View File

@@ -0,0 +1,164 @@
package models
import (
"time"
"github.com/influxdata/telegraf"
)
type RunningAggregator struct {
a telegraf.Aggregator
Config *AggregatorConfig
metrics chan telegraf.Metric
periodStart time.Time
periodEnd time.Time
}
func NewRunningAggregator(
a telegraf.Aggregator,
conf *AggregatorConfig,
) *RunningAggregator {
return &RunningAggregator{
a: a,
Config: conf,
metrics: make(chan telegraf.Metric, 100),
}
}
// AggregatorConfig containing configuration parameters for the running
// aggregator plugin.
type AggregatorConfig struct {
Name string
DropOriginal bool
NameOverride string
MeasurementPrefix string
MeasurementSuffix string
Tags map[string]string
Filter Filter
Period time.Duration
Delay time.Duration
}
func (r *RunningAggregator) Name() string {
return "aggregators." + r.Config.Name
}
func (r *RunningAggregator) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
m := makemetric(
measurement,
fields,
tags,
r.Config.NameOverride,
r.Config.MeasurementPrefix,
r.Config.MeasurementSuffix,
r.Config.Tags,
nil,
r.Config.Filter,
false,
false,
mType,
t,
)
m.SetAggregate(true)
return m
}
// Add applies the given metric to the aggregator.
// Before applying to the plugin, it will run any defined filters on the metric.
// Apply returns true if the original metric should be dropped.
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
if r.Config.Filter.IsActive() {
// check if the aggregator should apply this metric
name := in.Name()
fields := in.Fields()
tags := in.Tags()
t := in.Time()
if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
// aggregator should not apply this metric
return false
}
in, _ = telegraf.NewMetric(name, tags, fields, t)
}
r.metrics <- in
return r.Config.DropOriginal
}
func (r *RunningAggregator) add(in telegraf.Metric) {
r.a.Add(in)
}
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
r.a.Push(acc)
}
func (r *RunningAggregator) reset() {
r.a.Reset()
}
// Run runs the running aggregator, listens for incoming metrics, and waits
// for period ticks to tell it when to push and reset the aggregator.
func (r *RunningAggregator) Run(
acc telegraf.Accumulator,
shutdown chan struct{},
) {
// The start of the period is truncated to the nearest second.
//
// Every metric then gets it's timestamp checked and is dropped if it
// is not within:
//
// start < t < end + truncation + delay
//
// So if we start at now = 00:00.2 with a 10s period and 0.3s delay:
// now = 00:00.2
// start = 00:00
// truncation = 00:00.2
// end = 00:10
// 1st interval: 00:00 - 00:10.5
// 2nd interval: 00:10 - 00:20.5
// etc.
//
now := time.Now()
r.periodStart = now.Truncate(time.Second)
truncation := now.Sub(r.periodStart)
r.periodEnd = r.periodStart.Add(r.Config.Period)
time.Sleep(r.Config.Delay)
periodT := time.NewTicker(r.Config.Period)
defer periodT.Stop()
for {
select {
case <-shutdown:
if len(r.metrics) > 0 {
// wait until metrics are flushed before exiting
continue
}
return
case m := <-r.metrics:
if m.Time().Before(r.periodStart) ||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
// the metric is outside the current aggregation period, so
// skip it.
continue
}
r.add(m)
case <-periodT.C:
r.periodStart = r.periodEnd
r.periodEnd = r.periodStart.Add(r.Config.Period)
r.push(acc)
r.reset()
}
}
}

View File

@@ -0,0 +1,256 @@
package models
import (
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
func TestAdd(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*150),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if atomic.LoadInt64(&a.sum) > 0 {
break
}
}
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
}
func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
// metric before current period
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(-time.Hour),
)
assert.False(t, ra.Add(m))
// metric after current period
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Hour),
)
assert.False(t, ra.Add(m))
// "now" metric
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*50),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if atomic.LoadInt64(&a.sum) > 0 {
break
}
}
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
}
func TestAddAndPushOnePeriod(t *testing.T) {
a := &TestAggregator{}
ra := NewRunningAggregator(a, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"*"},
},
Period: time.Millisecond * 500,
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
shutdown := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
ra.Run(&acc, shutdown)
}()
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now().Add(time.Millisecond*100),
)
assert.False(t, ra.Add(m))
for {
time.Sleep(time.Millisecond)
if acc.NMetrics() > 0 {
break
}
}
acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)})
close(shutdown)
wg.Wait()
}
func TestAddDropOriginal(t *testing.T) {
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
Name: "TestRunningAggregator",
Filter: Filter{
NamePass: []string{"RI*"},
},
DropOriginal: true,
})
assert.NoError(t, ra.Config.Filter.Compile())
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now(),
)
assert.True(t, ra.Add(m))
// this metric name doesn't match the filter, so Add will return false
m2 := ra.MakeMetric(
"foobar",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
time.Now(),
)
assert.False(t, ra.Add(m2))
}
// make an untyped, counter, & gauge metric
func TestMakeMetricA(t *testing.T) {
now := time.Now()
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
Name: "TestRunningAggregator",
})
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
)
}
type TestAggregator struct {
sum int64
}
func (t *TestAggregator) Description() string { return "" }
func (t *TestAggregator) SampleConfig() string { return "" }
func (t *TestAggregator) Reset() {
atomic.StoreInt64(&t.sum, 0)
}
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
acc.AddFields("TestMetric",
map[string]interface{}{"sum": t.sum},
map[string]string{},
)
}
func (t *TestAggregator) Add(in telegraf.Metric) {
for _, v := range in.Fields() {
if vi, ok := v.(int64); ok {
atomic.AddInt64(&t.sum, vi)
}
}
}

View File

@@ -1,15 +1,19 @@
package models
import (
"fmt"
"time"
"github.com/influxdata/telegraf"
)
type RunningInput struct {
Name string
Input telegraf.Input
Config *InputConfig
trace bool
debug bool
defaultTags map[string]string
}
// InputConfig containing a name, interval, and filter
@@ -22,3 +26,59 @@ type InputConfig struct {
Filter Filter
Interval time.Duration
}
func (r *RunningInput) Name() string {
return "inputs." + r.Config.Name
}
// MakeMetric either returns a metric, or returns nil if the metric doesn't
// need to be created (because of filtering, an error, etc.)
func (r *RunningInput) MakeMetric(
measurement string,
fields map[string]interface{},
tags map[string]string,
mType telegraf.ValueType,
t time.Time,
) telegraf.Metric {
m := makemetric(
measurement,
fields,
tags,
r.Config.NameOverride,
r.Config.MeasurementPrefix,
r.Config.MeasurementSuffix,
r.Config.Tags,
r.defaultTags,
r.Config.Filter,
true,
r.debug,
mType,
t,
)
if r.trace && m != nil {
fmt.Println("> " + m.String())
}
return m
}
func (r *RunningInput) Debug() bool {
return r.debug
}
func (r *RunningInput) SetDebug(debug bool) {
r.debug = debug
}
func (r *RunningInput) Trace() bool {
return r.trace
}
func (r *RunningInput) SetTrace(trace bool) {
r.trace = trace
}
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
r.defaultTags = tags
}

View File

@@ -0,0 +1,352 @@
package models
import (
"fmt"
"math"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
)
func TestMakeMetricNoFields(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
m := ri.MakeMetric(
"RITest",
map[string]interface{}{},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Nil(t, m)
}
// nil fields should get dropped
func TestMakeMetricNilFields(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"value": int(101),
"nil": nil,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
m.String(),
)
}
// make an untyped, counter, & gauge metric
func TestMakeMetric(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
)
}
func TestMakeMetricWithPluginTags(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricFilteredOut(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
Tags: map[string]string{
"foo": "bar",
},
Filter: Filter{NamePass: []string{"foobar"}},
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.NoError(t, ri.Config.Filter.Compile())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
nil,
telegraf.Untyped,
now,
)
assert.Nil(t, m)
}
func TestMakeMetricWithDaemonTags(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri.SetDefaultTags(map[string]string{
"foo": "bar",
})
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
)
}
// make an untyped, counter, & gauge metric
func TestMakeMetricInfFields(t *testing.T) {
inf := math.Inf(1)
ninf := math.Inf(-1)
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"value": int(101),
"inf": inf,
"ninf": ninf,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricAllFieldTypes(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
},
}
ri.SetDebug(true)
assert.Equal(t, true, ri.Debug())
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"a": int(10),
"b": int8(10),
"c": int16(10),
"d": int32(10),
"e": uint(10),
"f": uint8(10),
"g": uint16(10),
"h": uint32(10),
"i": uint64(10),
"j": float32(10),
"k": uint64(9223372036854775810),
"l": "foobar",
"m": true,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest a=10i,b=10i,c=10i,d=10i,e=10i,f=10i,g=10i,h=10i,i=10i,j=10,k=9223372036854775807i,l=\"foobar\",m=true %d", now.UnixNano()),
m.String(),
)
}
func TestMakeMetricNameOverride(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
NameOverride: "foobar",
},
}
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("foobar value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricNamePrefix(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
MeasurementPrefix: "foobar_",
},
}
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("foobar_RITest value=101i %d", now.UnixNano()),
)
}
func TestMakeMetricNameSuffix(t *testing.T) {
now := time.Now()
ri := RunningInput{
Config: &InputConfig{
Name: "TestRunningInput",
MeasurementSuffix: "_foobar",
},
}
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
m.String(),
fmt.Sprintf("RITest_foobar value=101i %d", now.UnixNano()),
)
}

View File

@@ -57,21 +57,17 @@ func NewRunningOutput(
// AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
if ro.Config.Filter.IsActive {
if !ro.Config.Filter.ShouldMetricPass(metric) {
return
}
}
// Filter any tagexclude/taginclude parameters before adding metric
if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
if ro.Config.Filter.IsActive() {
// In order to filter out tags, we need to create a new metric, since
// metrics are immutable once created.
name := metric.Name()
tags := metric.Tags()
fields := metric.Fields()
t := metric.Time()
name := metric.Name()
ro.Config.Filter.FilterTags(tags)
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
return
}
// error is not possible if creating from another metric, so ignore.
metric, _ = telegraf.NewMetric(name, tags, fields, t)
}
@@ -89,7 +85,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
// Write writes all cached points to this output.
func (ro *RunningOutput) Write() error {
if !ro.Quiet {
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
"Total gathered metrics: %d. Total dropped metrics: %d.",
ro.Name,
ro.failMetrics.Len()+ro.metrics.Len(),
@@ -146,7 +142,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
elapsed := time.Since(start)
if err == nil {
if !ro.Quiet {
log.Printf("Output [%s] wrote batch of %d metrics in %s\n",
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
ro.Name, len(metrics), elapsed)
}
}

View File

@@ -31,9 +31,7 @@ var next5 = []telegraf.Metric{
// Benchmark adding metrics.
func BenchmarkRunningOutputAddWrite(b *testing.B) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &perfOutput{}
@@ -49,9 +47,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
// Benchmark adding metrics.
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &perfOutput{}
@@ -69,9 +65,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
// Benchmark adding metrics.
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &perfOutput{}
@@ -88,11 +82,10 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
func TestRunningOutput_DropFilter(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: true,
NameDrop: []string{"metric1", "metric2"},
},
}
assert.NoError(t, conf.Filter.CompileFilter())
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
@@ -114,11 +107,10 @@ func TestRunningOutput_DropFilter(t *testing.T) {
func TestRunningOutput_PassFilter(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: true,
NameDrop: []string{"metric1000", "foo*"},
},
}
assert.NoError(t, conf.Filter.CompileFilter())
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
@@ -140,11 +132,10 @@ func TestRunningOutput_PassFilter(t *testing.T) {
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: true,
TagInclude: []string{"nothing*"},
},
}
assert.NoError(t, conf.Filter.CompileFilter())
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
@@ -162,11 +153,10 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: true,
TagExclude: []string{"tag*"},
},
}
assert.NoError(t, conf.Filter.CompileFilter())
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
@@ -184,11 +174,10 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: true,
TagExclude: []string{"nothing*"},
},
}
assert.NoError(t, conf.Filter.CompileFilter())
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
@@ -206,11 +195,10 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: true,
TagInclude: []string{"tag*"},
},
}
assert.NoError(t, conf.Filter.CompileFilter())
assert.NoError(t, conf.Filter.Compile())
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
@@ -227,9 +215,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
// Test that we can write metrics with simple default setup.
func TestRunningOutputDefault(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
@@ -252,9 +238,7 @@ func TestRunningOutputDefault(t *testing.T) {
// FlushBufferWhenFull is set.
func TestRunningOutputFlushWhenFull(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
@@ -283,9 +267,7 @@ func TestRunningOutputFlushWhenFull(t *testing.T) {
// FlushBufferWhenFull is set, twice.
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
@@ -304,9 +286,7 @@ func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
func TestRunningOutputWriteFail(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
@@ -339,9 +319,7 @@ func TestRunningOutputWriteFail(t *testing.T) {
// Verify that the order of points is preserved during a write failure.
func TestRunningOutputWriteFailOrder(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
@@ -379,9 +357,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
// Verify that the order of points is preserved during many write failures.
func TestRunningOutputWriteFailOrder2(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}
@@ -452,9 +428,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
//
func TestRunningOutputWriteFailOrder3(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{
IsActive: false,
},
Filter: Filter{},
}
m := &mockOutput{}

View File

@@ -0,0 +1,44 @@
package models
import (
"github.com/influxdata/telegraf"
)
type RunningProcessor struct {
Name string
Processor telegraf.Processor
Config *ProcessorConfig
}
type RunningProcessors []*RunningProcessor
func (rp RunningProcessors) Len() int { return len(rp) }
func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
// FilterConfig containing a name and filter
type ProcessorConfig struct {
Name string
Order int64
Filter Filter
}
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
ret := []telegraf.Metric{}
for _, metric := range in {
if rp.Config.Filter.IsActive() {
// check if the filter should be applied to this metric
if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok {
// this means filter should not be applied
ret = append(ret, metric)
continue
}
}
// This metric should pass through the filter, so call the filter Apply
// function and append results to the output slice.
ret = append(ret, rp.Processor.Apply(metric)...)
}
return ret
}

View File

@@ -0,0 +1,117 @@
package models
import (
"testing"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
type TestProcessor struct {
}
func (f *TestProcessor) SampleConfig() string { return "" }
func (f *TestProcessor) Description() string { return "" }
// Apply renames:
// "foo" to "fuz"
// "bar" to "baz"
// And it also drops measurements named "dropme"
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
out := make([]telegraf.Metric, 0)
for _, m := range in {
switch m.Name() {
case "foo":
out = append(out, testutil.TestMetric(1, "fuz"))
case "bar":
out = append(out, testutil.TestMetric(1, "baz"))
case "dropme":
// drop the metric!
default:
out = append(out, m)
}
}
return out
}
func NewTestRunningProcessor() *RunningProcessor {
out := &RunningProcessor{
Name: "test",
Processor: &TestProcessor{},
Config: &ProcessorConfig{Filter: Filter{}},
}
return out
}
func TestRunningProcessor(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
}
expectedNames := []string{
"fuz",
"baz",
"baz",
}
rfp := NewTestRunningProcessor()
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
filteredMetrics[2].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}
func TestRunningProcessor_WithNameDrop(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
testutil.TestMetric(1, "baz"),
}
expectedNames := []string{
"foo",
"baz",
"baz",
}
rfp := NewTestRunningProcessor()
rfp.Config.Filter.NameDrop = []string{"foo"}
assert.NoError(t, rfp.Config.Filter.Compile())
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
filteredMetrics[2].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}
func TestRunningProcessor_DroppedMetric(t *testing.T) {
inmetrics := []telegraf.Metric{
testutil.TestMetric(1, "dropme"),
testutil.TestMetric(1, "foo"),
testutil.TestMetric(1, "bar"),
}
expectedNames := []string{
"fuz",
"baz",
}
rfp := NewTestRunningProcessor()
filteredMetrics := rfp.Apply(inmetrics...)
actualNames := []string{
filteredMetrics[0].Name(),
filteredMetrics[1].Name(),
}
assert.Equal(t, expectedNames, actualNames)
}

58
logger/logger.go Normal file
View File

@@ -0,0 +1,58 @@
package logger
import (
"io"
"log"
"os"
"github.com/influxdata/wlog"
)
// newTelegrafWriter returns a logging-wrapped writer.
func newTelegrafWriter(w io.Writer) io.Writer {
return &telegrafLog{
writer: wlog.NewWriter(w),
}
}
type telegrafLog struct {
writer io.Writer
}
func (t *telegrafLog) Write(p []byte) (n int, err error) {
return t.writer.Write(p)
}
// SetupLogging configures the logging output.
// debug will set the log level to DEBUG
// quiet will set the log level to ERROR
// logfile will direct the logging output to a file. Empty string is
// interpreted as stderr. If there is an error opening the file the
// logger will fallback to stderr.
func SetupLogging(debug, quiet bool, logfile string) {
if debug {
wlog.SetLevel(wlog.DEBUG)
}
if quiet {
wlog.SetLevel(wlog.ERROR)
}
var oFile *os.File
if logfile != "" {
if _, err := os.Stat(logfile); os.IsNotExist(err) {
if oFile, err = os.Create(logfile); err != nil {
log.Printf("E! Unable to create %s (%s), using stderr", logfile, err)
oFile = os.Stderr
}
} else {
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err)
oFile = os.Stderr
}
}
} else {
oFile = os.Stderr
}
log.SetOutput(newTelegrafWriter(oFile))
}

106
metric.go
View File

@@ -4,6 +4,18 @@ import (
"time"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/influxdb/models"
)
// ValueType is an enumeration of metric types that represent a simple value.
type ValueType int
// Possible values for the ValueType enum.
const (
_ ValueType = iota
Counter
Gauge
Untyped
)
type Metric interface {
@@ -16,9 +28,16 @@ type Metric interface {
// Time return the timestamp for the metric
Time() time.Time
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
Type() ValueType
// UnixNano returns the unix nano time of the metric
UnixNano() int64
// HashID returns a non-cryptographic hash of the metric (name + tags)
// NOTE: do not persist & depend on this value to disk.
HashID() uint64
// Fields returns the fields for the metric
Fields() map[string]interface{}
@@ -30,29 +49,82 @@ type Metric interface {
// Point returns a influxdb client.Point object
Point() *client.Point
// SetAggregate sets the metric's aggregate status
// This is so that aggregate metrics don't get re-sent to aggregator plugins
SetAggregate(bool)
// IsAggregate returns true if the metric is an aggregate
IsAggregate() bool
}
// metric is a wrapper of the influxdb client.Point struct
type metric struct {
pt *client.Point
pt models.Point
mType ValueType
isaggregate bool
}
// NewMetric returns a metric with the given timestamp. If a timestamp is not
// given, then data is sent to the database without a timestamp, in which case
// the server will assign local time upon reception. NOTE: it is recommended to
// send data with a timestamp.
func NewMetricFromPoint(pt models.Point) Metric {
return &metric{
pt: pt,
mType: Untyped,
}
}
// NewMetric returns an untyped metric.
func NewMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := client.NewPoint(name, tags, fields, t)
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
pt: pt,
mType: Untyped,
}, nil
}
// NewGaugeMetric returns a gauge metric.
// Gauge metrics should be used when the metric is can arbitrarily go up and
// down. ie, temperature, memory usage, cpu usage, etc.
func NewGaugeMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
mType: Gauge,
}, nil
}
// NewCounterMetric returns a Counter metric.
// Counter metrics should be used when the metric being created is an
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
func NewCounterMetric(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
) (Metric, error) {
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
if err != nil {
return nil, err
}
return &metric{
pt: pt,
mType: Counter,
}, nil
}
@@ -61,13 +133,21 @@ func (m *metric) Name() string {
}
func (m *metric) Tags() map[string]string {
return m.pt.Tags()
return m.pt.Tags().Map()
}
func (m *metric) Time() time.Time {
return m.pt.Time()
}
func (m *metric) Type() ValueType {
return m.mType
}
func (m *metric) HashID() uint64 {
return m.pt.HashID()
}
func (m *metric) UnixNano() int64 {
return m.pt.UnixNano()
}
@@ -85,5 +165,13 @@ func (m *metric) PrecisionString(precison string) string {
}
func (m *metric) Point() *client.Point {
return m.pt
return client.NewPointFrom(m.pt)
}
func (m *metric) IsAggregate() bool {
return m.isaggregate
}
func (m *metric) SetAggregate(b bool) {
m.isaggregate = b
}

View File

@@ -23,6 +23,51 @@ func TestNewMetric(t *testing.T) {
m, err := NewMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Untyped, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewGaugeMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewGaugeMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Gauge, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewCounterMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := NewCounterMetric("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, Counter, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())

View File

@@ -0,0 +1,5 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
)

View File

@@ -0,0 +1,119 @@
package minmax
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type MinMax struct {
cache map[uint64]aggregate
}
func NewMinMax() telegraf.Aggregator {
mm := &MinMax{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]minmax
name string
tags map[string]string
}
type minmax struct {
min float64
max float64
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *MinMax) SampleConfig() string {
return sampleConfig
}
func (m *MinMax) Description() string {
return "Keep the aggregate min/max of each metric passing through."
}
func (m *MinMax) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]minmax),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = minmax{
min: fv,
max: fv,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = minmax{
min: fv,
max: fv,
}
continue
}
if fv < m.cache[id].fields[k].min {
tmp := m.cache[id].fields[k]
tmp.min = fv
m.cache[id].fields[k] = tmp
} else if fv > m.cache[id].fields[k].max {
tmp := m.cache[id].fields[k]
tmp.max = fv
m.cache[id].fields[k] = tmp
}
}
}
}
}
func (m *MinMax) Push(acc telegraf.Accumulator) {
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
fields[k+"_min"] = v.min
fields[k+"_max"] = v.max
}
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
func (m *MinMax) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("minmax", func() telegraf.Aggregator {
return NewMinMax()
})
}

View File

@@ -0,0 +1,162 @@
package minmax
import (
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
)
var m1, _ = telegraf.NewMetric("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": int64(1),
"d": int64(1),
"e": int64(1),
"f": float64(2),
"g": float64(2),
"h": float64(2),
"i": float64(2),
"j": float64(3),
},
time.Now(),
)
var m2, _ = telegraf.NewMetric("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": int64(3),
"d": int64(3),
"e": int64(3),
"f": float64(1),
"g": float64(1),
"h": float64(1),
"i": float64(1),
"j": float64(1),
"k": float64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewMinMax()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestMinMaxWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewMinMax()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(3),
"b_min": float64(1),
"c_max": float64(3),
"c_min": float64(1),
"d_max": float64(3),
"d_min": float64(1),
"e_max": float64(3),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(1),
"g_max": float64(2),
"g_min": float64(1),
"h_max": float64(2),
"h_min": float64(1),
"i_max": float64(2),
"i_min": float64(1),
"j_max": float64(3),
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestMinMaxDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewMinMax()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(1),
"b_min": float64(1),
"c_max": float64(1),
"c_min": float64(1),
"d_max": float64(1),
"d_min": float64(1),
"e_max": float64(1),
"e_min": float64(1),
"f_max": float64(2),
"f_min": float64(2),
"g_max": float64(2),
"g_min": float64(2),
"h_max": float64(2),
"h_min": float64(2),
"i_max": float64(2),
"i_min": float64(2),
"j_max": float64(3),
"j_min": float64(3),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_max": float64(1),
"a_min": float64(1),
"b_max": float64(3),
"b_min": float64(3),
"c_max": float64(3),
"c_min": float64(3),
"d_max": float64(3),
"d_min": float64(3),
"e_max": float64(3),
"e_min": float64(3),
"f_max": float64(1),
"f_min": float64(1),
"g_max": float64(1),
"g_min": float64(1),
"h_max": float64(1),
"h_min": float64(1),
"i_max": float64(1),
"i_min": float64(1),
"j_max": float64(1),
"j_min": float64(1),
"k_max": float64(200),
"k_min": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}

View File

@@ -0,0 +1,11 @@
package aggregators
import "github.com/influxdata/telegraf"
type Creator func() telegraf.Aggregator
var Aggregators = map[string]Creator{}
func Add(name string, creator Creator) {
Aggregators[name] = creator
}

View File

@@ -1,6 +1,8 @@
package aerospike
import (
"errors"
"log"
"net"
"strconv"
"strings"
@@ -11,7 +13,7 @@ import (
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
as "github.com/sparrc/aerospike-client-go"
as "github.com/aerospike/aerospike-client-go"
)
type Aerospike struct {
@@ -82,7 +84,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
return err
}
for k, v := range stats {
fields[strings.Replace(k, "-", "_", -1)] = parseValue(v)
val, err := parseValue(v)
if err == nil {
fields[strings.Replace(k, "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
}
}
acc.AddFields("aerospike_node", fields, tags, time.Now())
@@ -110,7 +117,12 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if len(parts) < 2 {
continue
}
nFields[strings.Replace(parts[0], "-", "_", -1)] = parseValue(parts[1])
val, err := parseValue(parts[1])
if err == nil {
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
}
}
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
}
@@ -118,13 +130,16 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
return nil
}
func parseValue(v string) interface{} {
func parseValue(v string) (interface{}, error) {
if parsed, err := strconv.ParseInt(v, 10, 64); err == nil {
return parsed
return parsed, nil
} else if _, err := strconv.ParseUint(v, 10, 64); err == nil {
// int64 overflow, yet valid uint64
return nil, errors.New("Number is too large")
} else if parsed, err := strconv.ParseBool(v); err == nil {
return parsed
return parsed, nil
} else {
return v
return v, nil
}
}

View File

@@ -10,7 +10,7 @@ import (
func TestAerospikeStatistics(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
t.Skip("Skipping aerospike integration tests.")
}
a := &Aerospike{
@@ -29,7 +29,7 @@ func TestAerospikeStatistics(t *testing.T) {
func TestAerospikeStatisticsPartialErr(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
t.Skip("Skipping aerospike integration tests.")
}
a := &Aerospike{
@@ -48,3 +48,20 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
}
func TestAerospikeParseValue(t *testing.T) {
// uint64 with value bigger than int64 max
val, err := parseValue("18446744041841121751")
assert.Nil(t, val)
assert.Error(t, err)
// int values
val, err = parseValue("42")
assert.NoError(t, err)
assert.Equal(t, val, int64(42), "must be parsed as int")
// string values
val, err = parseValue("BB977942A2CA502")
assert.NoError(t, err)
assert.Equal(t, val, `BB977942A2CA502`, "must be left as string")
}

View File

@@ -23,12 +23,15 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"

View File

@@ -7,7 +7,7 @@
#### Description
The Cassandra plugin collects Cassandra/JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
@@ -38,9 +38,9 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
####measurement = javaGarbageCollector
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
####measurement = javaMemory
@@ -50,13 +50,13 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
####measurement = cassandraCache
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hit
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hit
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hits
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
@@ -67,33 +67,33 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
####measurement = cassandraClientRequest
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
####measurement = cassandraCommitLog
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
####measurement = cassandraCompaction
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTask
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
####measurement = cassandraStorage
- /org.apache.cassandra.metrics:type=Storage,name=Load
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
####measurement = cassandraTable
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
@@ -101,25 +101,25 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
####measurement = cassandraThreadPools
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks

View File

@@ -274,7 +274,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
m = newCassandraMetric(serverTokens["host"], metric, acc)
} else {
// unsupported metric type
log.Printf("Unsupported Cassandra metric [%s], skipping",
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
metric)
continue
}

View File

@@ -2,7 +2,9 @@
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
The plugin works by scanning the configured SocketDir for OSD and MON socket files. When it finds
*Admin Socket Stats*
This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
@@ -27,11 +29,26 @@ Would be parsed into the following metrics, all of which would be tagged with co
- refresh_latency.sum: 5378.794002000
*Cluster Stats*
This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid
ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work
in conjunction to specify these prerequisites). It may be run on any server you wish which has access to
the cluster. The currently supported commands are:
* ceph status
* ceph df
* ceph osd pool stats
### Configuration:
```
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
[[inputs.ceph]]
## This is the recommended interval to poll. Too frequent and you will lose
## data points due to timeouts during rebalancing and recovery
interval = '1m'
## All configuration values are optional, defaults are shown below
## location of ceph binary
@@ -46,15 +63,86 @@ Would be parsed into the following metrics, all of which would be tagged with co
## suffix used to identify socket files
socket_suffix = "asok"
## Ceph user to authenticate as, ceph will search for the corresponding keyring
## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
## client section of ceph.conf for example:
##
## [client.telegraf]
## keyring = /etc/ceph/client.telegraf.keyring
##
## Consult the ceph documentation for more detail on keyring generation.
ceph_user = "client.admin"
## Ceph configuration to use to locate the cluster
ceph_config = "/etc/ceph/ceph.conf"
## Whether to gather statistics via the admin socket
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
## to be specified
gather_cluster_stats = true
```
### Measurements & Fields:
*Admin Socket Stats*
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
*Cluster Stats*
* ceph\_osdmap
* epoch (float)
* full (boolean)
* nearfull (boolean)
* num\_in\_osds (float)
* num\_osds (float)
* num\_remremapped\_pgs (float)
* num\_up\_osds (float)
* ceph\_pgmap
* bytes\_avail (float)
* bytes\_total (float)
* bytes\_used (float)
* data\_bytes (float)
* num\_pgs (float)
* op\_per\_sec (float)
* read\_bytes\_sec (float)
* version (float)
* write\_bytes\_sec (float)
* recovering\_bytes\_per\_sec (float)
* recovering\_keys\_per\_sec (float)
* recovering\_objects\_per\_sec (float)
* ceph\_pgmap\_state
* state name e.g. active+clean (float)
* ceph\_usage
* bytes\_used (float)
* kb\_used (float)
* max\_avail (float)
* objects (float)
* ceph\_pool\_usage
* bytes\_used (float)
* kb\_used (float)
* max\_avail (float)
* objects (float)
* ceph\_pool\_stats
* op\_per\_sec (float)
* read\_bytes\_sec (float)
* write\_bytes\_sec (float)
* recovering\_object\_per\_sec (float)
* recovering\_bytes\_per\_sec (float)
* recovering\_keys\_per\_sec (float)
### Tags:
*Admin Socket Stats*
All measurements will have the following tags:
- type: either 'osd' or 'mon' to indicate which type of node was queried
@@ -96,9 +184,21 @@ All measurements will have the following tags:
- throttle-osd_client_bytes
- throttle-osd_client_messages
*Cluster Stats*
* ceph\_pg\_state has the following tags:
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
* ceph\_pool\_usage has the following tags:
* id
* name
* ceph\_pool\_stats has the following tags:
* id
* name
### Example Output:
*Admin Socket Stats*
<pre>
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
* Plugin: ceph, Collection 1
@@ -107,3 +207,16 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
</pre>
*Cluster Stats*
<pre>
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
</pre>

View File

@@ -23,33 +23,15 @@ const (
)
type Ceph struct {
CephBinary string
OsdPrefix string
MonPrefix string
SocketDir string
SocketSuffix string
}
func (c *Ceph) setDefaults() {
if c.CephBinary == "" {
c.CephBinary = "/usr/bin/ceph"
}
if c.OsdPrefix == "" {
c.OsdPrefix = osdPrefix
}
if c.MonPrefix == "" {
c.MonPrefix = monPrefix
}
if c.SocketDir == "" {
c.SocketDir = "/var/run/ceph"
}
if c.SocketSuffix == "" {
c.SocketSuffix = sockSuffix
}
CephBinary string
OsdPrefix string
MonPrefix string
SocketDir string
SocketSuffix string
CephUser string
CephConfig string
GatherAdminSocketStats bool
GatherClusterStats bool
}
func (c *Ceph) Description() string {
@@ -57,6 +39,10 @@ func (c *Ceph) Description() string {
}
var sampleConfig = `
## This is the recommended interval to poll. Too frequent and you will lose
## data points due to timeouts during rebalancing and recovery
interval = '1m'
## All configuration values are optional, defaults are shown below
## location of ceph binary
@@ -71,6 +57,18 @@ var sampleConfig = `
## suffix used to identify socket files
socket_suffix = "asok"
## Ceph user to authenticate as
ceph_user = "client.admin"
## Ceph configuration to use to locate the cluster
ceph_config = "/etc/ceph/ceph.conf"
## Whether to gather statistics via the admin socket
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands
gather_cluster_stats = true
`
func (c *Ceph) SampleConfig() string {
@@ -78,7 +76,22 @@ func (c *Ceph) SampleConfig() string {
}
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
c.setDefaults()
if c.GatherAdminSocketStats {
if err := c.gatherAdminSocketStats(acc); err != nil {
return err
}
}
if c.GatherClusterStats {
if err := c.gatherClusterStats(acc); err != nil {
return err
}
}
return nil
}
func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
sockets, err := findSockets(c)
if err != nil {
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
@@ -87,12 +100,12 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error {
for _, s := range sockets {
dump, err := perfDump(c.CephBinary, s)
if err != nil {
log.Printf("error reading from socket '%s': %v", s.socket, err)
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
continue
}
data, err := parseDump(dump)
if err != nil {
log.Printf("error parsing dump from socket '%s': %v", s.socket, err)
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
continue
}
for tag, metrics := range *data {
@@ -104,8 +117,46 @@ func (c *Ceph) Gather(acc telegraf.Accumulator) error {
return nil
}
func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error {
jobs := []struct {
command string
parser func(telegraf.Accumulator, string) error
}{
{"status", decodeStatus},
{"df", decodeDf},
{"osd pool stats", decodeOsdPoolStats},
}
// For each job, execute against the cluster, parse and accumulate the data points
for _, job := range jobs {
output, err := c.exec(job.command)
if err != nil {
return fmt.Errorf("error executing command: %v", err)
}
err = job.parser(acc, output)
if err != nil {
return fmt.Errorf("error parsing output: %v", err)
}
}
return nil
}
func init() {
inputs.Add(measurement, func() telegraf.Input { return &Ceph{} })
c := Ceph{
CephBinary: "/usr/bin/ceph",
OsdPrefix: osdPrefix,
MonPrefix: monPrefix,
SocketDir: "/var/run/ceph",
SocketSuffix: sockSuffix,
CephUser: "client.admin",
CephConfig: "/etc/ceph/ceph.conf",
GatherAdminSocketStats: true,
GatherClusterStats: false,
}
inputs.Add(measurement, func() telegraf.Input { return &c })
}
var perfDump = func(binary string, socket *socket) (string, error) {
@@ -242,8 +293,197 @@ func flatten(data interface{}) []*metric {
}
}
default:
log.Printf("Ignoring unexpected type '%T' for value %v", val, val)
log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
}
return metrics
}
func (c *Ceph) exec(command string) (string, error) {
cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"}
cmdArgs = append(cmdArgs, strings.Split(command, " ")...)
cmd := exec.Command(c.CephBinary, cmdArgs...)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return "", fmt.Errorf("error running ceph %v: %s", command, err)
}
output := out.String()
// Ceph doesn't sanitize its output, and may return invalid JSON. Patch this
// up for them, as having some inaccurate data is better than none.
output = strings.Replace(output, "-inf", "0", -1)
output = strings.Replace(output, "inf", "0", -1)
return output, nil
}
func decodeStatus(acc telegraf.Accumulator, input string) error {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(input), &data)
if err != nil {
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
}
err = decodeStatusOsdmap(acc, data)
if err != nil {
return err
}
err = decodeStatusPgmap(acc, data)
if err != nil {
return err
}
err = decodeStatusPgmapState(acc, data)
if err != nil {
return err
}
return nil
}
func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error {
osdmap, ok := data["osdmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
}
fields, ok := osdmap["osdmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement)
}
acc.AddFields("ceph_osdmap", fields, map[string]string{})
return nil
}
func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error {
pgmap, ok := data["pgmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
}
fields := make(map[string]interface{})
for key, value := range pgmap {
switch value.(type) {
case float64:
fields[key] = value
}
}
acc.AddFields("ceph_pgmap", fields, map[string]string{})
return nil
}
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
pgmap, ok := data["pgmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
}
fields := make(map[string]interface{})
for key, value := range pgmap {
switch value.(type) {
case []interface{}:
if key != "pgs_by_state" {
continue
}
for _, state := range value.([]interface{}) {
state_map, ok := state.(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
}
state_name, ok := state_map["state_name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
}
state_count, ok := state_map["count"].(float64)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
}
fields[state_name] = state_count
}
}
}
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
return nil
}
func decodeDf(acc telegraf.Accumulator, input string) error {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(input), &data)
if err != nil {
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
}
// ceph.usage: records global utilization and number of objects
stats_fields, ok := data["stats"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df stats", measurement)
}
acc.AddFields("ceph_usage", stats_fields, map[string]string{})
// ceph.pool.usage: records per pool utilization and number of objects
pools, ok := data["pools"].([]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pools", measurement)
}
for _, pool := range pools {
pool_map, ok := pool.(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pool", measurement)
}
pool_name, ok := pool_map["name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement)
}
fields, ok := pool_map["stats"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement)
}
tags := map[string]string{
"name": pool_name,
}
acc.AddFields("ceph_pool_usage", fields, tags)
}
return nil
}
func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error {
data := make([]map[string]interface{}, 0)
err := json.Unmarshal([]byte(input), &data)
if err != nil {
return fmt.Errorf("failed to parse json: '%s': %v", input, err)
}
// ceph.pool.stats: records pre pool IO and recovery throughput
for _, pool := range data {
pool_name, ok := pool["pool_name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement)
}
// Note: the 'recovery' object looks broken (in hammer), so it's omitted
objects := []string{
"client_io_rate",
"recovery_rate",
}
fields := make(map[string]interface{})
for _, object := range objects {
perfdata, ok := pool[object].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement)
}
for key, value := range perfdata {
fields[key] = value
}
}
tags := map[string]string{
"name": pool_name,
}
acc.AddFields("ceph_pool_stats", fields, tags)
}
return nil
}

View File

@@ -65,12 +65,17 @@ func TestFindSockets(t *testing.T) {
assert.NoError(t, err)
}()
c := &Ceph{
CephBinary: "foo",
SocketDir: tmpdir,
CephBinary: "foo",
OsdPrefix: "ceph-osd",
MonPrefix: "ceph-mon",
SocketDir: tmpdir,
SocketSuffix: "asok",
CephUser: "client.admin",
CephConfig: "/etc/ceph/ceph.conf",
GatherAdminSocketStats: true,
GatherClusterStats: false,
}
c.setDefaults()
for _, st := range sockTestParams {
createTestFiles(tmpdir, st)

View File

@@ -2,6 +2,10 @@
This input plugin will capture specific statistics per cgroup.
Consider restricting paths to the set of cgroups you really
want to monitor if you have a large number of cgroups, to avoid
any cardinality issues.
Following file formats are supported:
* Single value
@@ -33,9 +37,8 @@ KEY1 VAL1\n
### Tags:
Measurements don't have any specific tags unless you define them at the telegraf level (defaults). We
used to have the path listed as a tag, but to keep cardinality in check it's easier to move this
value to a field. Thanks @sebito91!
All measurements have the following tags:
- path
### Configuration:

View File

@@ -11,15 +11,18 @@ type CGroup struct {
}
var sampleConfig = `
## Directories in which to look for files, globs are supported.
# paths = [
# "/cgroup/memory",
# "/cgroup/memory/child1",
# "/cgroup/memory/child2/*",
# ]
## cgroup stat fields, as file names, globs are supported.
## these file names are appended to each path from above.
# files = ["memory.*usage*", "memory.limit_in_bytes"]
## Directories in which to look for files, globs are supported.
## Consider restricting paths to the set of cgroups you really
## want to monitor if you have a large number of cgroups, to avoid
## any cardinality issues.
# paths = [
# "/cgroup/memory",
# "/cgroup/memory/child1",
# "/cgroup/memory/child2/*",
# ]
## cgroup stat fields, as file names, globs are supported.
## these file names are appended to each path from above.
# files = ["memory.*usage*", "memory.limit_in_bytes"]
`
func (g *CGroup) SampleConfig() string {

View File

@@ -56,9 +56,10 @@ func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
return err
}
}
fields["path"] = dir
acc.AddFields(metricName, fields, nil)
tags := map[string]string{"path": dir}
acc.AddFields(metricName, fields, tags)
return nil
}

View File

@@ -3,13 +3,10 @@
package cgroup
import (
"fmt"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"reflect"
)
var cg1 = &CGroup{
@@ -24,32 +21,15 @@ var cg1 = &CGroup{
},
}
func assertContainsFields(a *testutil.Accumulator, t *testing.T, measurement string, fieldSet []map[string]interface{}) {
a.Lock()
defer a.Unlock()
numEquals := 0
for _, p := range a.Metrics {
if p.Measurement == measurement {
for _, fields := range fieldSet {
if reflect.DeepEqual(fields, p.Fields) {
numEquals++
}
}
}
}
if numEquals != len(fieldSet) {
assert.Fail(t, fmt.Sprintf("only %d of %d are equal", numEquals, len(fieldSet)))
}
}
func TestCgroupStatistics_1(t *testing.T) {
var acc testutil.Accumulator
err := cg1.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.stat.cache": 1739362304123123123,
"memory.stat.rss": 1775325184,
@@ -62,9 +42,8 @@ func TestCgroupStatistics_1(t *testing.T) {
"memory.limit_in_bytes": 223372036854771712,
"memory.use_hierarchy": "12-781",
"notify_on_release": 0,
"path": "testdata/memory",
}
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
@@ -80,14 +59,16 @@ func TestCgroupStatistics_2(t *testing.T) {
err := cg2.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/cpu",
}
fields := map[string]interface{}{
"cpuacct.usage_percpu.0": -1452543795404,
"cpuacct.usage_percpu.1": 1376681271659,
"cpuacct.usage_percpu.2": 1450950799997,
"cpuacct.usage_percpu.3": -1473113374257,
"path": "testdata/cpu",
}
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
@@ -103,16 +84,18 @@ func TestCgroupStatistics_3(t *testing.T) {
err := cg3.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"path": "testdata/memory/group_1",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
fieldsTwo := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"path": "testdata/memory/group_2",
tags = map[string]string{
"path": "testdata/memory/group_2",
}
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
@@ -128,22 +111,23 @@ func TestCgroupStatistics_4(t *testing.T) {
err := cg4.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"path": "testdata/memory/group_1/group_1_1",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
fieldsTwo := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"path": "testdata/memory/group_1/group_1_2",
tags = map[string]string{
"path": "testdata/memory/group_1/group_1_2",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
fieldsThree := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"path": "testdata/memory/group_2",
tags = map[string]string{
"path": "testdata/memory/group_2",
}
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo, fieldsThree})
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
@@ -159,16 +143,18 @@ func TestCgroupStatistics_5(t *testing.T) {
err := cg5.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"path": "testdata/memory/group_1/group_1_1",
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
fieldsTwo := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"path": "testdata/memory/group_2/group_1_1",
tags = map[string]string{
"path": "testdata/memory/group_2/group_1_1",
}
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
// ======================================================================
@@ -184,11 +170,13 @@ func TestCgroupStatistics_6(t *testing.T) {
err := cg6.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.usage_in_bytes": 3513667584,
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": 9223372036854771712,
"path": "testdata/memory",
}
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}

View File

@@ -103,9 +103,13 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
tags["stratum"] = valueFields[0]
continue
}
if strings.Contains(strings.ToLower(name), "reference_id") {
tags["reference_id"] = valueFields[0]
continue
}
value, err := strconv.ParseFloat(valueFields[0], 64)
if err != nil {
tags[name] = strings.ToLower(valueFields[0])
tags[name] = strings.ToLower(strings.Join(valueFields, " "))
continue
}
if strings.Contains(stats[1], "slow") {

View File

@@ -27,7 +27,7 @@ func TestGather(t *testing.T) {
tags := map[string]string{
"reference_id": "192.168.1.22",
"leap_status": "normal",
"leap_status": "not synchronized",
"stratum": "3",
}
fields := map[string]interface{}{
@@ -85,7 +85,7 @@ Skew : 0.006 ppm
Root delay : 0.001655 seconds
Root dispersion : 0.003307 seconds
Update interval : 507.2 seconds
Leap status : Normal
Leap status : Not synchronized
`
args := os.Args

View File

@@ -18,36 +18,48 @@ API endpoint. In the following order the plugin will attempt to authenticate.
```toml
[[inputs.cloudwatch]]
## Amazon Region (required)
region = 'us-east-1'
region = "us-east-1"
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
#
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = '1m'
period = "5m"
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = '1m'
delay = "5m"
## Override global run interval (optional - defaults to global interval)
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = '1m'
interval = "5m"
## Metric Statistic Namespace (required)
namespace = 'AWS/ELB'
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 10. Optional - default value is 10.
ratelimit = 10
## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
[[inputs.cloudwatch.metrics]]
names = ['Latency', 'RequestCount']
names = ["Latency", "RequestCount"]
## Dimension filters for Metric (optional)
[[inputs.cloudwatch.metrics.dimensions]]
name = 'LoadBalancerName'
value = 'p-example'
name = "LoadBalancerName"
value = "p-example"
[[inputs.cloudwatch.metrics.dimensions]]
name = 'AvailabilityZone'
value = '*'
name = "AvailabilityZone"
value = "*"
```
#### Requirements and Terminology
@@ -66,16 +78,16 @@ wildcard dimension is ignored.
Example:
```
[[inputs.cloudwatch.metrics]]
names = ['Latency']
names = ["Latency"]
## Dimension filters for Metric (optional)
[[inputs.cloudwatch.metrics.dimensions]]
name = 'LoadBalancerName'
value = 'p-example'
name = "LoadBalancerName"
value = "p-example"
[[inputs.cloudwatch.metrics.dimensions]]
name = 'AvailabilityZone'
value = '*'
name = "AvailabilityZone"
value = "*"
```
If the following ELBs are available:

View File

@@ -33,6 +33,7 @@ type (
Namespace string `toml:"namespace"`
Metrics []*Metric `toml:"metrics"`
CacheTTL internal.Duration `toml:"cache_ttl"`
RateLimit int `toml:"ratelimit"`
client cloudwatchClient
metricCache *MetricCache
}
@@ -62,7 +63,7 @@ type (
func (c *CloudWatch) SampleConfig() string {
return `
## Amazon Region
region = 'us-east-1'
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
@@ -79,33 +80,45 @@ func (c *CloudWatch) SampleConfig() string {
#profile = ""
#shared_credential_file = ""
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
#
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
period = '1m'
period = "5m"
## Collection Delay (required - must account for metrics availability via CloudWatch API)
delay = '1m'
delay = "5m"
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
## gaps or overlap in pulled data
interval = '1m'
interval = "5m"
## Configure the TTL for the internal cache of metrics.
## Defaults to 1 hr if not specified
#cache_ttl = '10m'
#cache_ttl = "10m"
## Metric Statistic Namespace (required)
namespace = 'AWS/ELB'
namespace = "AWS/ELB"
## Maximum requests per second. Note that the global default AWS rate limit is
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
## maximum of 10. Optional - default value is 10.
ratelimit = 10
## Metrics to Pull (optional)
## Defaults to all Metrics in Namespace if nothing is provided
## Refreshes Namespace available metrics every 1h
#[[inputs.cloudwatch.metrics]]
# names = ['Latency', 'RequestCount']
# names = ["Latency", "RequestCount"]
#
# ## Dimension filters for Metric (optional)
# [[inputs.cloudwatch.metrics.dimensions]]
# name = 'LoadBalancerName'
# value = 'p-example'
# name = "LoadBalancerName"
# value = "p-example"
`
}
@@ -127,7 +140,6 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
if !hasWilcard(m.Dimensions) {
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
for k, d := range m.Dimensions {
fmt.Printf("Dimension [%s]:[%s]\n", d.Name, d.Value)
dimensions[k] = &cloudwatch.Dimension{
Name: aws.String(d.Name),
Value: aws.String(d.Value),
@@ -175,7 +187,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
// limit concurrency or we can easily exhaust user connection limit
// see cloudwatch API request limits:
// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
lmtr := limiter.NewRateLimiter(10, time.Second)
lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second)
defer lmtr.Stop()
var wg sync.WaitGroup
wg.Add(len(metrics))
@@ -195,7 +207,8 @@ func init() {
inputs.Add("cloudwatch", func() telegraf.Input {
ttl, _ := time.ParseDuration("1hr")
return &CloudWatch{
CacheTTL: internal.Duration{Duration: ttl},
CacheTTL: internal.Duration{Duration: ttl},
RateLimit: 10,
}
})
}
@@ -222,13 +235,12 @@ func (c *CloudWatch) initializeCloudWatch() error {
/*
* Fetch available metrics for given CloudWatch Namespace
*/
func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err error) {
func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
if c.metricCache != nil && c.metricCache.IsValid() {
metrics = c.metricCache.Metrics
return
return c.metricCache.Metrics, nil
}
metrics = []*cloudwatch.Metric{}
metrics := []*cloudwatch.Metric{}
var token *string
for more := true; more; {
@@ -256,7 +268,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
TTL: c.CacheTTL.Duration,
}
return
return metrics, nil
}
/*

View File

@@ -58,6 +58,7 @@ func TestGather(t *testing.T) {
Namespace: "AWS/ELB",
Delay: internalDuration,
Period: internalDuration,
RateLimit: 10,
}
var acc testutil.Accumulator

View File

@@ -93,13 +93,14 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
contents, err := ioutil.ReadFile(fName)
if err != nil {
log.Printf("failed to read file '%s': %v", fName, err)
log.Printf("E! failed to read file '%s': %v", fName, err)
continue
}
v := strings.TrimSpace(string(contents))
fields[metricKey], err = strconv.ParseFloat(v, 64)
if err != nil {
log.Printf("failed to parse metric, expected number but "+
log.Printf("E! failed to parse metric, expected number but "+
" found '%s': %v", v, err)
}
}

File diff suppressed because one or more lines are too long

View File

@@ -103,6 +103,9 @@ based on the availability of per-cpu stats on your system.
- n_used_file_descriptors
- n_cpus
- n_containers
- n_containers_running
- n_containers_stopped
- n_containers_paused
- n_images
- n_goroutines
- n_listener_events
@@ -153,6 +156,9 @@ based on the availability of per-cpu stats on your system.
> docker n_cpus=8i 1456926671065383978
> docker n_used_file_descriptors=15i 1456926671065383978
> docker n_containers=7i 1456926671065383978
> docker n_containers_running=7i 1456926671065383978
> docker n_containers_stopped=3i 1456926671065383978
> docker n_containers_paused=0i 1456926671065383978
> docker n_images=152i 1456926671065383978
> docker n_goroutines=36i 1456926671065383978
> docker n_listener_events=0i 1456926671065383978

View File

@@ -28,7 +28,8 @@ type Docker struct {
PerDevice bool `toml:"perdevice"`
Total bool `toml:"total"`
client DockerClient
client DockerClient
engine_host string
}
// DockerClient interface, useful for testing
@@ -125,7 +126,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
defer wg.Done()
err := d.gatherContainer(c, acc)
if err != nil {
log.Printf("Error gathering container %s stats: %s\n",
log.Printf("E! Error gathering container %s stats: %s\n",
c.Names, err.Error())
}
}(container)
@@ -147,11 +148,15 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
if err != nil {
return err
}
d.engine_host = info.Name
fields := map[string]interface{}{
"n_cpus": info.NCPU,
"n_used_file_descriptors": info.NFd,
"n_containers": info.Containers,
"n_containers_running": info.ContainersRunning,
"n_containers_stopped": info.ContainersStopped,
"n_containers_paused": info.ContainersPaused,
"n_images": info.Images,
"n_goroutines": info.NGoroutines,
"n_listener_events": info.NEventsListener,
@@ -159,11 +164,11 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// Add metrics
acc.AddFields("docker",
fields,
nil,
map[string]string{"engine_host": d.engine_host},
now)
acc.AddFields("docker",
map[string]interface{}{"memory_total": info.MemTotal},
map[string]string{"unit": "bytes"},
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now)
// Get storage metrics
for _, rawData := range info.DriverStatus {
@@ -177,7 +182,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// pool blocksize
acc.AddFields("docker",
map[string]interface{}{"pool_blocksize": value},
map[string]string{"unit": "bytes"},
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now)
} else if strings.HasPrefix(name, "data_space_") {
// data space
@@ -192,13 +197,13 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
if len(dataFields) > 0 {
acc.AddFields("docker_data",
dataFields,
map[string]string{"unit": "bytes"},
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now)
}
if len(metadataFields) > 0 {
acc.AddFields("docker_metadata",
metadataFields,
map[string]string{"unit": "bytes"},
map[string]string{"unit": "bytes", "engine_host": d.engine_host},
now)
}
return nil
@@ -225,6 +230,7 @@ func (d *Docker) gatherContainer(
imageVersion = imageParts[1]
}
tags := map[string]string{
"engine_host": d.engine_host,
"container_name": cname,
"container_image": imageName,
"container_version": imageVersion,

View File

@@ -256,6 +256,9 @@ type FakeDockerClient struct {
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
env := types.Info{
Containers: 108,
ContainersRunning: 98,
ContainersStopped: 6,
ContainersPaused: 3,
OomKillDisable: false,
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
NEventsListener: 0,
@@ -397,10 +400,13 @@ func TestDockerGatherInfo(t *testing.T) {
"n_cpus": int(4),
"n_used_file_descriptors": int(19),
"n_containers": int(108),
"n_containers_running": int(98),
"n_containers_stopped": int(6),
"n_containers_paused": int(3),
"n_images": int(199),
"n_goroutines": int(39),
},
map[string]string{},
map[string]string{"engine_host": "absol"},
)
acc.AssertContainsTaggedFields(t,
@@ -411,7 +417,8 @@ func TestDockerGatherInfo(t *testing.T) {
"available": int64(36530000000),
},
map[string]string{
"unit": "bytes",
"unit": "bytes",
"engine_host": "absol",
},
)
acc.AssertContainsTaggedFields(t,
@@ -425,6 +432,7 @@ func TestDockerGatherInfo(t *testing.T) {
"container_image": "quay.io/coreos/etcd",
"cpu": "cpu3",
"container_version": "v2.2.2",
"engine_host": "absol",
},
)
acc.AssertContainsTaggedFields(t,
@@ -467,6 +475,7 @@ func TestDockerGatherInfo(t *testing.T) {
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
},
map[string]string{
"engine_host": "absol",
"container_name": "etcd2",
"container_image": "quay.io/coreos/etcd",
"container_version": "v2.2.2",

View File

@@ -8,9 +8,18 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference
```
[[inputs.elasticsearch]]
## specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## set local to false when you want to read the indices stats from all nodes
## within the cluster
local = true
cluster_health = true
## set cluster_health to true when you want to also obtain cluster level stats
cluster_health = false
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"

View File

@@ -62,6 +62,9 @@ const sampleConfig = `
## specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)
http_timeout = "5s"
## set local to false when you want to read the indices stats from all nodes
## within the cluster
local = true
@@ -82,6 +85,7 @@ const sampleConfig = `
type Elasticsearch struct {
Local bool
Servers []string
HttpTimeout internal.Duration
ClusterHealth bool
SSLCA string `toml:"ssl_ca"` // Path to CA file
SSLCert string `toml:"ssl_cert"` // Path to host cert file
@@ -92,7 +96,9 @@ type Elasticsearch struct {
// NewElasticsearch return a new instance of Elasticsearch
func NewElasticsearch() *Elasticsearch {
return &Elasticsearch{}
return &Elasticsearch{
HttpTimeout: internal.Duration{Duration: time.Second * 5},
}
}
// SampleConfig returns sample configuration for this plugin.
@@ -150,12 +156,12 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
return nil, err
}
tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
ResponseHeaderTimeout: e.HttpTimeout.Duration,
TLSClientConfig: tlsCfg,
}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
Timeout: e.HttpTimeout.Duration,
}
return client, nil

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
@@ -114,9 +115,36 @@ func (c CommandRunner) Run(
}
}
out = removeCarriageReturns(out)
return out.Bytes(), nil
}
// removeCarriageReturns removes all carriage returns from the input if the
// OS is Windows. It does not return any errors.
func removeCarriageReturns(b bytes.Buffer) bytes.Buffer {
if runtime.GOOS == "windows" {
var buf bytes.Buffer
for {
byt, er := b.ReadBytes(0x0D)
end := len(byt)
if nil == er {
end -= 1
}
if nil != byt {
buf.Write(byt[:end])
} else {
break
}
if nil != er {
break
}
}
b = buf
}
return b
}
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) {
defer wg.Done()

View File

@@ -1,7 +1,9 @@
package exec
import (
"bytes"
"fmt"
"runtime"
"testing"
"github.com/influxdata/telegraf"
@@ -46,6 +48,29 @@ cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
`
type CarriageReturnTest struct {
input []byte
output []byte
}
var crTests = []CarriageReturnTest{
{[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0d, 0x0a, 0x4c, 0x69,
0x6e, 0x65, 0x20, 0x32, 0x0d, 0x0a, 0x4c, 0x69, 0x6e, 0x65,
0x20, 0x33},
[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}},
{[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33},
[]byte{0x4c, 0x69, 0x6e, 0x65, 0x20, 0x31, 0x0a, 0x4c, 0x69, 0x6e,
0x65, 0x20, 0x32, 0x0a, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x33}},
{[]byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c,
0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20,
0x6c, 0x69, 0x6e, 0x65},
[]byte{0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c,
0x6c, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x62, 0x69, 0x67, 0x20,
0x6c, 0x69, 0x6e, 0x65}},
}
type runnerMock struct {
out []byte
err error
@@ -217,3 +242,21 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) {
}
acc.AssertContainsFields(t, "metric", fields)
}
func TestRemoveCarriageReturns(t *testing.T) {
if runtime.GOOS == "windows" {
// Test that all carriage returns are removed
for _, test := range crTests {
b := bytes.NewBuffer(test.input)
out := removeCarriageReturns(*b)
assert.True(t, bytes.Equal(test.output, out.Bytes()))
}
} else {
// Test that the buffer is returned unaltered
for _, test := range crTests {
b := bytes.NewBuffer(test.input)
out := removeCarriageReturns(*b)
assert.True(t, bytes.Equal(test.input, out.Bytes()))
}
}
}

View File

@@ -0,0 +1,37 @@
# HAproxy Input Plugin
[HAproxy](http://www.haproxy.org/) input plugin gathers metrics directly from any running HAproxy instance. It can do so by using CSV generated by HAproxy status page or from admin socket(s).
### Configuration:
```toml
# SampleConfig
[[inputs.haproxy]]
servers = ["http://1.2.3.4/haproxy?stats", "/var/run/haproxy*.sock"]
```
Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded.
Following examples will all resolve to the same socket:
```
socket:/var/run/haproxy.sock
unix:/var/run/haproxy.sock
foo:/var/run/haproxy.sock
/var/run/haproxy.sock
```
When using socket names, wildcard expansion is supported so plugin can gather stats from multiple sockets at once.
If no servers are specified, then the default address of `http://127.0.0.1:1936/haproxy?stats` will be used.
### Measurements & Fields:
Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1).
### Tags:
- All measurements have the following tags:
- server - address of server data is gathered from
- proxy - proxy name as reported in `pxname`
- sv - service name as reported in `svname`

View File

@@ -7,6 +7,7 @@ import (
"net"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"sync"
@@ -17,7 +18,7 @@ import (
"github.com/influxdata/telegraf/plugins/inputs"
)
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
const (
HF_PXNAME = 0 // 0. pxname [LFBS]: proxy name
HF_SVNAME = 1 // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
@@ -93,12 +94,15 @@ var sampleConfig = `
## An array of address to gather stats about. Specify an ip on hostname
## with optional port. ie localhost, 10.10.3.33:1936, etc.
## Make sure you specify the complete path to the stats endpoint
## ie 10.10.3.33:1936/haproxy?stats
## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
#
## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
servers = ["http://myhaproxy.com:1936/haproxy?stats"]
## Or you can also use local socket
## servers = ["socket:/run/haproxy/admin.sock"]
##
## You can also use local socket with standard wildcard globbing.
## Server address not starting with 'http' will be treated as a possible
## socket, so both examples below are valid.
## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
`
func (r *haproxy) SampleConfig() string {
@@ -116,10 +120,36 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc)
}
endpoints := make([]string, 0, len(g.Servers))
for _, endpoint := range g.Servers {
if strings.HasPrefix(endpoint, "http") {
endpoints = append(endpoints, endpoint)
continue
}
socketPath := getSocketAddr(endpoint)
matches, err := filepath.Glob(socketPath)
if err != nil {
return err
}
if len(matches) == 0 {
endpoints = append(endpoints, socketPath)
} else {
for _, match := range matches {
endpoints = append(endpoints, match)
}
}
}
var wg sync.WaitGroup
errChan := errchan.New(len(g.Servers))
wg.Add(len(g.Servers))
for _, server := range g.Servers {
errChan := errchan.New(len(endpoints))
wg.Add(len(endpoints))
for _, server := range endpoints {
go func(serv string) {
defer wg.Done()
errChan.C <- g.gatherServer(serv, acc)
@@ -131,14 +161,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
}
func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
var socketPath string
socketAddr := strings.Split(addr, ":")
if len(socketAddr) >= 2 {
socketPath = socketAddr[1]
} else {
socketPath = socketAddr[0]
}
socketPath := getSocketAddr(addr)
c, err := net.Dial("unix", socketPath)
@@ -196,6 +219,16 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
return importCsvResult(res.Body, acc, u.Host)
}
func getSocketAddr(sock string) string {
socketAddr := strings.Split(sock, ":")
if len(socketAddr) >= 2 {
return socketAddr[1]
} else {
return socketAddr[0]
}
}
func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
csv := csv.NewReader(r)
result, err := csv.ReadAll()

View File

@@ -72,38 +72,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
"sv": "host0",
}
fields := map[string]interface{}{
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(510913516),
"bout": uint64(2193856571),
"check_duration": uint64(10),
"cli_abort": uint64(73),
"ctime": uint64(2),
"downtime": uint64(0),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(1),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(119534),
"http_response.3xx": uint64(48051),
"http_response.4xx": uint64(2345),
"http_response.5xx": uint64(1056),
"lbtot": uint64(171013),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(0),
"rate": uint64(3),
"rate_max": uint64(12),
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
"wredis": uint64(0),
"wretr": uint64(1),
}
fields := HaproxyGetFieldValues()
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
//Here, we should get error because we don't pass authentication data
@@ -136,102 +105,58 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
"sv": "host0",
}
fields := map[string]interface{}{
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(510913516),
"bout": uint64(2193856571),
"check_duration": uint64(10),
"cli_abort": uint64(73),
"ctime": uint64(2),
"downtime": uint64(0),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(1),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(119534),
"http_response.3xx": uint64(48051),
"http_response.4xx": uint64(2345),
"http_response.5xx": uint64(1056),
"lbtot": uint64(171013),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(0),
"rate": uint64(3),
"rate_max": uint64(12),
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
"wredis": uint64(0),
"wretr": uint64(1),
}
fields := HaproxyGetFieldValues()
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
}
func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
var randomNumber int64
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
sock, err := net.Listen("unix", fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber))
if err != nil {
t.Fatal("Cannot initialize socket ")
var sockets [5]net.Listener
_globmask := "/tmp/test-haproxy*.sock"
_badmask := "/tmp/test-fail-haproxy*.sock"
for i := 0; i < 5; i++ {
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
sockname := fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber)
sock, err := net.Listen("unix", sockname)
if err != nil {
t.Fatal("Cannot initialize socket ")
}
sockets[i] = sock
defer sock.Close()
s := statServer{}
go s.serverSocket(sock)
}
defer sock.Close()
s := statServer{}
go s.serverSocket(sock)
r := &haproxy{
Servers: []string{sock.Addr().String()},
Servers: []string{_globmask},
}
var acc testutil.Accumulator
err = r.Gather(&acc)
err := r.Gather(&acc)
require.NoError(t, err)
tags := map[string]string{
"proxy": "be_app",
"server": sock.Addr().String(),
"sv": "host0",
fields := HaproxyGetFieldValues()
for _, sock := range sockets {
tags := map[string]string{
"proxy": "be_app",
"server": sock.Addr().String(),
"sv": "host0",
}
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
}
fields := map[string]interface{}{
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(510913516),
"bout": uint64(2193856571),
"check_duration": uint64(10),
"cli_abort": uint64(73),
"ctime": uint64(2),
"downtime": uint64(0),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(1),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(119534),
"http_response.3xx": uint64(48051),
"http_response.4xx": uint64(2345),
"http_response.5xx": uint64(1056),
"lbtot": uint64(171013),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(0),
"rate": uint64(3),
"rate_max": uint64(12),
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
"wredis": uint64(0),
"wretr": uint64(1),
}
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
// This mask should not match any socket
r.Servers = []string{_badmask}
err = r.Gather(&acc)
require.Error(t, err)
}
//When not passing server config, we default to localhost
@@ -246,6 +171,42 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv")
}
func HaproxyGetFieldValues() map[string]interface{} {
fields := map[string]interface{}{
"active_servers": uint64(1),
"backup_servers": uint64(0),
"bin": uint64(510913516),
"bout": uint64(2193856571),
"check_duration": uint64(10),
"cli_abort": uint64(73),
"ctime": uint64(2),
"downtime": uint64(0),
"dresp": uint64(0),
"econ": uint64(0),
"eresp": uint64(1),
"http_response.1xx": uint64(0),
"http_response.2xx": uint64(119534),
"http_response.3xx": uint64(48051),
"http_response.4xx": uint64(2345),
"http_response.5xx": uint64(1056),
"lbtot": uint64(171013),
"qcur": uint64(0),
"qmax": uint64(0),
"qtime": uint64(0),
"rate": uint64(3),
"rate_max": uint64(12),
"rtime": uint64(312),
"scur": uint64(1),
"smax": uint64(32),
"srv_abort": uint64(1),
"stot": uint64(171014),
"ttime": uint64(2341),
"wredis": uint64(0),
"wretr": uint64(1),
}
return fields
}
const csvOutputSample = `
# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
fe_app,FRONTEND,,81,288,713,2000,1094063,5557055817,24096715169,1102,80,95740,,,17,19,OPEN,,,,,,,,,2,16,113,13,114,,0,18,0,102,,,,0,1314093,537036,123452,11966,1360,,35,140,1987928,,,0,0,0,0,,,,,,,,

View File

@@ -0,0 +1,24 @@
# HTTP listener service input plugin
The HTTP listener is a service input plugin that listens for messages sent via HTTP POST.
The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported.
The intent of the plugin is to allow Telegraf to serve as a proxy/router for the /write endpoint of the InfluxDB HTTP API.
When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database.
See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx).
Example: curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'
### Configuration:
This is a sample configuration for the plugin.
```toml
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
```

View File

@@ -0,0 +1,43 @@
package http_listener
import (
"sync/atomic"
)
type pool struct {
buffers chan []byte
size int
created int64
}
// NewPool returns a new pool object.
// n is the number of buffers
// bufSize is the size (in bytes) of each buffer
func NewPool(n, bufSize int) *pool {
return &pool{
buffers: make(chan []byte, n),
size: bufSize,
}
}
func (p *pool) get() []byte {
select {
case b := <-p.buffers:
return b
default:
atomic.AddInt64(&p.created, 1)
return make([]byte, p.size)
}
}
func (p *pool) put(b []byte) {
select {
case p.buffers <- b:
default:
}
}
func (p *pool) ncreated() int64 {
return atomic.LoadInt64(&p.created)
}

View File

@@ -0,0 +1,294 @@
package http_listener
import (
"bytes"
"compress/gzip"
"io"
"log"
"net"
"net/http"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers/influx"
)
const (
// DEFAULT_MAX_BODY_SIZE is the default maximum request body size, in bytes.
// if the request body is over this size, we will return an HTTP 413 error.
// 500 MB
DEFAULT_MAX_BODY_SIZE = 500 * 1024 * 1024
// MAX_LINE_SIZE is the maximum size, in bytes, that can be allocated for
// a single InfluxDB point.
// 64 KB
DEFAULT_MAX_LINE_SIZE = 64 * 1024
)
type HTTPListener struct {
ServiceAddress string
ReadTimeout internal.Duration
WriteTimeout internal.Duration
MaxBodySize int64
MaxLineSize int
mu sync.Mutex
wg sync.WaitGroup
listener net.Listener
parser influx.InfluxParser
acc telegraf.Accumulator
pool *pool
}
const sampleConfig = `
## Address and port to host HTTP listener on
service_address = ":8186"
## maximum duration before timing out read of the request
read_timeout = "10s"
## maximum duration before timing out write of the response
write_timeout = "10s"
## Maximum allowed http request body size in bytes.
## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
max_body_size = 0
## Maximum line size allowed to be sent in bytes.
## 0 means to use the default of 65536 bytes (64 kibibytes)
max_line_size = 0
`
func (h *HTTPListener) SampleConfig() string {
return sampleConfig
}
func (h *HTTPListener) Description() string {
return "Influx HTTP write listener"
}
func (h *HTTPListener) Gather(_ telegraf.Accumulator) error {
log.Printf("D! The http_listener has created %d buffers", h.pool.ncreated())
return nil
}
// Start starts the http listener service.
func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
h.mu.Lock()
defer h.mu.Unlock()
if h.MaxBodySize == 0 {
h.MaxBodySize = DEFAULT_MAX_BODY_SIZE
}
if h.MaxLineSize == 0 {
h.MaxLineSize = DEFAULT_MAX_LINE_SIZE
}
h.acc = acc
h.pool = NewPool(200, h.MaxLineSize)
var listener, err = net.Listen("tcp", h.ServiceAddress)
if err != nil {
return err
}
h.listener = listener
h.wg.Add(1)
go func() {
defer h.wg.Done()
h.httpListen()
}()
log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress)
return nil
}
// Stop cleans up all resources
func (h *HTTPListener) Stop() {
h.mu.Lock()
defer h.mu.Unlock()
h.listener.Close()
h.wg.Wait()
log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress)
}
// httpListen sets up an http.Server and calls server.Serve.
// like server.Serve, httpListen will always return a non-nil error, for this
// reason, the error returned should probably be ignored.
// see https://golang.org/pkg/net/http/#Server.Serve
func (h *HTTPListener) httpListen() error {
if h.ReadTimeout.Duration < time.Second {
h.ReadTimeout.Duration = time.Second * 10
}
if h.WriteTimeout.Duration < time.Second {
h.WriteTimeout.Duration = time.Second * 10
}
var server = http.Server{
Handler: h,
ReadTimeout: h.ReadTimeout.Duration,
WriteTimeout: h.WriteTimeout.Duration,
}
return server.Serve(h.listener)
}
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
switch req.URL.Path {
case "/write":
h.serveWrite(res, req)
case "/query":
// Deliver a dummy response to the query endpoint, as some InfluxDB
// clients test endpoint availability with a query
res.Header().Set("Content-Type", "application/json")
res.Header().Set("X-Influxdb-Version", "1.0")
res.WriteHeader(http.StatusOK)
res.Write([]byte("{\"results\":[]}"))
case "/ping":
// respond to ping requests
res.WriteHeader(http.StatusNoContent)
default:
// Don't know how to respond to calls to other endpoints
http.NotFound(res, req)
}
}
func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
// Check that the content length is not too large for us to handle.
if req.ContentLength > h.MaxBodySize {
tooLarge(res)
return
}
now := time.Now()
// Handle gzip request bodies
body := req.Body
var err error
if req.Header.Get("Content-Encoding") == "gzip" {
body, err = gzip.NewReader(req.Body)
defer body.Close()
if err != nil {
log.Println("E! " + err.Error())
badRequest(res)
return
}
}
body = http.MaxBytesReader(res, body, h.MaxBodySize)
var return400 bool
var hangingBytes bool
buf := h.pool.get()
defer h.pool.put(buf)
bufStart := 0
for {
n, err := io.ReadFull(body, buf[bufStart:])
if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
log.Println("E! " + err.Error())
// problem reading the request body
badRequest(res)
return
}
if err == io.EOF {
if return400 {
badRequest(res)
} else {
res.WriteHeader(http.StatusNoContent)
}
return
}
if hangingBytes {
i := bytes.IndexByte(buf, '\n')
if i == -1 {
// still didn't find a newline, keep scanning
continue
}
// rotate the bit remaining after the first newline to the front of the buffer
i++ // start copying after the newline
bufStart = len(buf) - i
if bufStart > 0 {
copy(buf, buf[i:])
}
hangingBytes = false
continue
}
if err == io.ErrUnexpectedEOF {
// finished reading the request body
if err := h.parse(buf[:n+bufStart], now); err != nil {
log.Println("E! " + err.Error())
return400 = true
}
if return400 {
badRequest(res)
} else {
res.WriteHeader(http.StatusNoContent)
}
return
}
// if we got down here it means that we filled our buffer, and there
// are still bytes remaining to be read. So we will parse up until the
// final newline, then push the rest of the bytes into the next buffer.
i := bytes.LastIndexByte(buf, '\n')
if i == -1 {
// drop any line longer than the max buffer size
log.Printf("E! http_listener received a single line longer than the maximum of %d bytes",
len(buf))
hangingBytes = true
return400 = true
bufStart = 0
continue
}
if err := h.parse(buf[:i], now); err != nil {
log.Println("E! " + err.Error())
return400 = true
}
// rotate the bit remaining after the last newline to the front of the buffer
i++ // start copying after the newline
bufStart = len(buf) - i
if bufStart > 0 {
copy(buf, buf[i:])
}
}
}
func (h *HTTPListener) parse(b []byte, t time.Time) error {
metrics, err := h.parser.ParseWithDefaultTime(b, t)
for _, m := range metrics {
h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
return err
}
func tooLarge(res http.ResponseWriter) {
res.Header().Set("Content-Type", "application/json")
res.Header().Set("X-Influxdb-Version", "1.0")
res.WriteHeader(http.StatusRequestEntityTooLarge)
res.Write([]byte(`{"error":"http: request body too large"}`))
}
func badRequest(res http.ResponseWriter) {
res.Header().Set("Content-Type", "application/json")
res.Header().Set("X-Influxdb-Version", "1.0")
res.WriteHeader(http.StatusBadRequest)
res.Write([]byte(`{"error":"http: bad request"}`))
}
func init() {
inputs.Add("http_listener", func() telegraf.Input {
return &HTTPListener{
ServiceAddress: ":8186",
}
})
}

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -2,8 +2,7 @@
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON
plugin like this:
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON plugin like this:
```
[[inputs.httpjson]]
@@ -15,12 +14,17 @@ plugin like this:
# HTTP method to use (case-sensitive)
method = "GET"
# Set response_timeout (default 5 seconds)
response_timeout = "5s"
```
`name` is used as a prefix for the measurements.
`method` specifies HTTP method to use for requests.
`response_timeout` specifies timeout to wait to get the response
You can also specify which keys from server response should be considered tags:
```
@@ -94,8 +98,7 @@ httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stat
# Example 2, Multiple Services:
There is also the option to collect JSON from multiple services, here is an
example doing that.
There is also the option to collect JSON from multiple services, here is an example doing that.
```
[[inputs.httpjson]]

View File

@@ -16,13 +16,15 @@ import (
"github.com/influxdata/telegraf/plugins/parsers"
)
// HttpJson struct
type HttpJson struct {
Name string
Servers []string
Method string
TagKeys []string
Parameters map[string]string
Headers map[string]string
Name string
Servers []string
Method string
TagKeys []string
ResponseTimeout internal.Duration
Parameters map[string]string
Headers map[string]string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
@@ -79,6 +81,8 @@ var sampleConfig = `
"http://localhost:9999/stats/",
"http://localhost:9998/stats/",
]
## Set response_timeout (default 5 seconds)
response_timeout = "5s"
## HTTP method to use: GET or POST (case-sensitive)
method = "GET"
@@ -126,12 +130,12 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
return err
}
tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
ResponseHeaderTimeout: h.ResponseTimeout.Duration,
TLSClientConfig: tlsCfg,
}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
Timeout: h.ResponseTimeout.Duration,
}
h.client.SetHTTPClient(client)
}
@@ -291,6 +295,9 @@ func init() {
inputs.Add("httpjson", func() telegraf.Input {
return &HttpJson{
client: &RealHTTPClient{},
ResponseTimeout: internal.Duration{
Duration: 5 * time.Second,
},
}
})
}

View File

@@ -10,11 +10,16 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type InfluxDB struct {
URLs []string `toml:"urls"`
Timeout internal.Duration
client *http.Client
}
func (*InfluxDB) Description() string {
@@ -32,6 +37,9 @@ func (*InfluxDB) SampleConfig() string {
urls = [
"http://localhost:8086/debug/vars"
]
## http request & header timeout
timeout = "5s"
`
}
@@ -39,6 +47,16 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
if len(i.URLs) == 0 {
i.URLs = []string{"http://localhost:8086/debug/vars"}
}
if i.client == nil {
i.client = &http.Client{
Transport: &http.Transport{
ResponseHeaderTimeout: i.Timeout.Duration,
},
Timeout: i.Timeout.Duration,
}
}
errorChannel := make(chan error, len(i.URLs))
var wg sync.WaitGroup
@@ -104,15 +122,6 @@ type memstats struct {
GCCPUFraction float64 `json:"GCCPUFraction"`
}
var tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
var client = &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
// Gathers data from a particular URL
// Parameters:
// acc : The telegraf Accumulator to use
@@ -127,7 +136,7 @@ func (i *InfluxDB) gatherURL(
shardCounter := 0
now := time.Now()
resp, err := client.Get(url)
resp, err := i.client.Get(url)
if err != nil {
return err
}
@@ -210,9 +219,13 @@ func (i *InfluxDB) gatherURL(
continue
}
if p.Tags == nil {
p.Tags = make(map[string]string)
}
// If the object was a point, but was not fully initialized,
// ignore it and move on.
if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 {
if p.Name == "" || p.Values == nil || len(p.Values) == 0 {
continue
}
@@ -244,6 +257,8 @@ func (i *InfluxDB) gatherURL(
func init() {
inputs.Add("influxdb", func() telegraf.Input {
return &InfluxDB{}
return &InfluxDB{
Timeout: internal.Duration{Duration: time.Second * 5},
}
})
}

View File

@@ -116,6 +116,31 @@ func TestInfluxDB(t *testing.T) {
}, map[string]string{})
}
func TestInfluxDB2(t *testing.T) {
fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/endpoint" {
_, _ = w.Write([]byte(influxReturn2))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
defer fakeInfluxServer.Close()
plugin := &influxdb.InfluxDB{
URLs: []string{fakeInfluxServer.URL + "/endpoint"},
}
var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc))
require.Len(t, acc.Metrics, 34)
acc.AssertContainsTaggedFields(t, "influxdb",
map[string]interface{}{
"n_shards": 1,
}, map[string]string{})
}
func TestErrorHandling(t *testing.T) {
badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/endpoint" {
@@ -241,3 +266,49 @@ const influxReturn = `
"tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}},
"write": {"name": "write", "tags": {}, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}}
}`
// InfluxDB 1.0+ with tags: null instead of tags: {}.
const influxReturn2 = `
{
"cluster": {"name": "cluster", "tags": null, "values": {}},
"cmdline": ["influxd"],
"cq": {"name": "cq", "tags": null, "values": {}},
"database:_internal": {"name": "database", "tags": {"database": "_internal"}, "values": {"numMeasurements": 8, "numSeries": 12}},
"database:udp": {"name": "database", "tags": {"database": "udp"}, "values": {"numMeasurements": 14, "numSeries": 38}},
"hh:/Users/csparr/.influxdb/hh": {"name": "hh", "tags": {"path": "/Users/csparr/.influxdb/hh"}, "values": {}},
"httpd::8086": {"name": "httpd", "tags": {"bind": ":8086"}, "values": {"req": 7, "reqActive": 1, "reqDurationNs": 4488799}},
"measurement:cpu_idle.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_idle"}, "values": {"numSeries": 1}},
"measurement:cpu_usage.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "cpu_usage"}, "values": {"numSeries": 1}},
"measurement:database._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "database"}, "values": {"numSeries": 2}},
"measurement:database.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "database"}, "values": {"numSeries": 2}},
"measurement:httpd.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "httpd"}, "values": {"numSeries": 1}},
"measurement:measurement.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "measurement"}, "values": {"numSeries": 22}},
"measurement:mem.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "mem"}, "values": {"numSeries": 1}},
"measurement:net.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "net"}, "values": {"numSeries": 1}},
"measurement:runtime._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "runtime"}, "values": {"numSeries": 1}},
"measurement:runtime.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "runtime"}, "values": {"numSeries": 1}},
"measurement:shard._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "shard"}, "values": {"numSeries": 2}},
"measurement:shard.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "shard"}, "values": {"numSeries": 1}},
"measurement:subscriber._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "subscriber"}, "values": {"numSeries": 1}},
"measurement:subscriber.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "subscriber"}, "values": {"numSeries": 1}},
"measurement:swap_used.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "swap_used"}, "values": {"numSeries": 1}},
"measurement:tsm1_cache._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}},
"measurement:tsm1_cache.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_cache"}, "values": {"numSeries": 2}},
"measurement:tsm1_wal._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}},
"measurement:tsm1_wal.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "tsm1_wal"}, "values": {"numSeries": 2}},
"measurement:udp._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "udp"}, "values": {"numSeries": 1}},
"measurement:write._internal": {"name": "measurement", "tags": {"database": "_internal", "measurement": "write"}, "values": {"numSeries": 1}},
"measurement:write.udp": {"name": "measurement", "tags": {"database": "udp", "measurement": "write"}, "values": {"numSeries": 1}},
"memstats": {"Alloc":17034016,"TotalAlloc":201739016,"Sys":38537464,"Lookups":77,"Mallocs":570251,"Frees":381008,"HeapAlloc":17034016,"HeapSys":33849344,"HeapIdle":15802368,"HeapInuse":18046976,"HeapReleased":3473408,"HeapObjects":189243,"StackInuse":753664,"StackSys":753664,"MSpanInuse":97440,"MSpanSys":114688,"MCacheInuse":4800,"MCacheSys":16384,"BuckHashSys":1461583,"GCSys":1112064,"OtherSys":1229737,"NextGC":20843042,"LastGC":1460434886475114239,"PauseTotalNs":5132914,"PauseNs":[195052,117751,139370,156933,263089,165249,713747,103904,122015,294408,213753,170864,175845,114221,121563,122409,113098,162219,229257,126726,250774,254235,117206,293588,144279,124306,127053,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"PauseEnd":[1460433856394860455,1460433856398162739,1460433856405888337,1460433856411784017,1460433856417924684,1460433856428385687,1460433856443782908,1460433856456522851,1460433857392743223,1460433866484394564,1460433866494076235,1460433896472438632,1460433957839825106,1460433976473440328,1460434016473413006,1460434096471892794,1460434126470792929,1460434246480428250,1460434366554468369,1460434396471249528,1460434456471205885,1460434476479487292,1460434536471435965,1460434616469784776,1460434736482078216,1460434856544251733,1460434886475114239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"NumGC":27,"GCCPUFraction":4.287178819113636e-05,"EnableGC":true,"DebugGC":false,"BySize":[{"Size":0,"Mallocs":0,"Frees":0},{"Size":8,"Mallocs":1031,"Frees":955},{"Size":16,"Mallocs":308485,"Frees":142064},{"Size":32,"Mallocs":64937,"Frees":54321},{"Size":48,"Mallocs":33012,"Frees":29754},{"Size":64,"Mallocs":20299,"Frees":18173},{"Size":80,"Mallocs":8186,"Frees":7597},{"Size":96,"Mallocs":9806,"Frees":8982},{"Size":112,"Mallocs":5671,"Frees":4850},{"Size":128,"Mallocs":2972,"Frees":2684},{"Size":144,"Mallocs":4106,"Frees":3719},{"Size":160,"Mallocs":1324,"Frees":911},{"Size":176,"Mallocs":2574,"Frees":2391},{"Size":192,"Mallocs":4053,"Frees":3863},{"Size":208,"Mallocs":442,"Frees":307},{"Size":224,"Mallocs":336,"Frees":172},{"Size":240,"Mallocs":143,"Frees":125},{"Size":256,"Mallocs":542,"Frees":497},{"Size":288,"Mallocs":15971,"Frees":14761},{"Size":320,"Mallocs":245,"Frees":30},{"Size":352,"Mallocs":1299,"Frees":1065},{"Size":384,"Mallocs":138,"Frees":2},{"Size":416,"Mallocs":54,"Frees":47},{"Size":448,"Mallocs":75,"Frees":29},{"Size":480,"Mallocs":6,"Frees":4},{"Size":512,"Mallocs":452,"Frees":422},{"Size":576,"Mallocs":486,"Frees":395},{"Size":640,"Mallocs":81,"Frees":67},{"Size":704,"Mallocs":421,"Frees":397},{"Size":768,"Mallocs":469,"Frees":468},{"Size":896,"Mallocs":1049,"Frees":1010},{"Size":1024,"Mallocs":1078,"Frees":960},{"Size":1152,"Mallocs":750,"Frees":498},{"Size":1280,"Mallocs":84,"Frees":72},{"Size":1408,"Mallocs":218,"Frees":187},{"Size":1536,"Mallocs":73,"Frees":48},{"Size":1664,"Mallocs":43,"Frees":30},{"Size":2048,"Mallocs":153,"Frees":57},{"Size":2304,"Mallocs":41,"Frees":30},{"Size":2560,"Mallocs":18,"Frees":15},{"Size":2816,"Mallocs":164,"Frees":157},{"Size":3072,"Mallocs":0,"Frees":0},{"Size":3328,"Mallocs":13,"Frees":6},{"Size":4096,"Mallocs":101,"Frees":82},{"Size":4608,"Mallocs":32,"Frees":26},{"Size":5376,"Mallocs":165,"Frees":151},{"Size":6144,"Mallocs":15,"Frees":9},{"Size":6400,"Mallocs":1,"Frees":1},{"Size":6656,"Mallocs":1,"Frees":0},{"Size":6912,"Mallocs":0,"Frees":0},{"Size":8192,"Mallocs":13,"Frees":13},{"Size":8448,"Mallocs":0,"Frees":0},{"Size":8704,"Mallocs":1,"Frees":1},{"Size":9472,"Mallocs":6,"Frees":4},{"Size":10496,"Mallocs":0,"Frees":0},{"Size":12288,"Mallocs":41,"Frees":35},{"Size":13568,"Mallocs":0,"Frees":0},{"Size":14080,"Mallocs":0,"Frees":0},{"Size":16384,"Mallocs":4,"Frees":4},{"Size":16640,"Mallocs":0,"Frees":0},{"Size":17664,"Mallocs":0,"Frees":0}]},
"queryExecutor": {"name": "queryExecutor", "tags": null, "values": {}},
"shard:/Users/csparr/.influxdb/data/_internal/monitor/2:2": {"name": "shard", "tags": {"database": "_internal", "engine": "tsm1", "id": "2", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}},
"shard:/Users/csparr/.influxdb/data/udp/default/1:1": {"name": "shard", "tags": {"database": "udp", "engine": "tsm1", "id": "1", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"fieldsCreate": 61, "seriesCreate": 33, "writePointsOk": 3613, "writeReq": 110}},
"subscriber": {"name": "subscriber", "tags": null, "values": {"pointsWritten": 3613}},
"tsm1_cache:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_cache", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103932, "cachedBytes": 0, "diskBytes": 0, "memBytes": 40480, "snapshotCount": 0}},
"tsm1_cache:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_cache", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {"WALCompactionTimeMs": 0, "cacheAgeMs": 1103029, "cachedBytes": 0, "diskBytes": 0, "memBytes": 2359472, "snapshotCount": 0}},
"tsm1_filestore:/Users/csparr/.influxdb/data/_internal/monitor/2": {"name": "tsm1_filestore", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/data/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {}},
"tsm1_filestore:/Users/csparr/.influxdb/data/udp/default/1": {"name": "tsm1_filestore", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/data/udp/default/1", "retentionPolicy": "default"}, "values": {}},
"tsm1_wal:/Users/csparr/.influxdb/wal/_internal/monitor/2": {"name": "tsm1_wal", "tags": {"database": "_internal", "path": "/Users/csparr/.influxdb/wal/_internal/monitor/2", "retentionPolicy": "monitor"}, "values": {"currentSegmentDiskBytes": 0, "oldSegmentsDiskBytes": 69532}},
"tsm1_wal:/Users/csparr/.influxdb/wal/udp/default/1": {"name": "tsm1_wal", "tags": {"database": "udp", "path": "/Users/csparr/.influxdb/wal/udp/default/1", "retentionPolicy": "default"}, "values": {"currentSegmentDiskBytes": 193728, "oldSegmentsDiskBytes": 1008330}},
"write": {"name": "write", "tags": null, "values": {"pointReq": 3613, "pointReqLocal": 3613, "req": 110, "subWriteOk": 110, "writeOk": 110}}
}`

View File

@@ -0,0 +1,74 @@
# Iptables Plugin
The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall.
Rules are identified through associated comment. Rules without comment are ignored.
The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You have several options to grant telegraf to run iptables:
* Run telegraf as root. This is strongly discouraged.
* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is the simplest and recommended option.
* Configure sudo to grant telegraf to run iptables. This is the most restrictive option, but require sudo setup.
### Using systemd capabilities
You may run `systemctl edit telegraf.service` and add the following:
```
[Service]
CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN
AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN
```
Since telegraf will fork a process to run iptables, `AmbientCapabilities` is required to transmit the capabilities bounding set to the forked process.
### Using sudo
You may edit your sudo configuration with the following:
```sudo
telegraf ALL=(root) NOPASSWD: /usr/bin/iptables -nvL *
```
### Configuration:
```toml
# use sudo to run iptables
use_sudo = false
# defines the table to monitor:
table = "filter"
# defines the chains to monitor:
chains = [ "INPUT" ]
```
### Measurements & Fields:
- iptables
- pkts (integer, count)
- bytes (integer, bytes)
### Tags:
- All measurements have the following tags:
- table
- chain
- ruleid
The `ruleid` is the comment associated to the rule.
### Example Output:
```
$ iptables -nvL INPUT
Chain INPUT (policy DROP 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
100 1024 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:22 /* ssh */
42 2048 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:80 /* httpd */
```
```
$ ./telegraf -config telegraf.conf -input-filter iptables -test
iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455
iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455
```

View File

@@ -0,0 +1,128 @@
// +build linux
package iptables
import (
"errors"
"os/exec"
"regexp"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// Iptables is a telegraf plugin to gather packets and bytes throughput from Linux's iptables packet filter.
type Iptables struct {
UseSudo bool
Table string
Chains []string
lister chainLister
}
// Description returns a short description of the plugin.
func (ipt *Iptables) Description() string {
return "Gather packets and bytes throughput from iptables"
}
// SampleConfig returns sample configuration options.
func (ipt *Iptables) SampleConfig() string {
return `
## iptables require root access on most systems.
## Setting 'use_sudo' to true will make use of sudo to run iptables.
## Users must configure sudo to allow telegraf user to run iptables with no password.
## iptables can be restricted to only list command "iptables -nvL"
use_sudo = false
## defines the table to monitor:
table = "filter"
## defines the chains to monitor:
chains = [ "INPUT" ]
`
}
// Gather gathers iptables packets and bytes throughput from the configured tables and chains.
func (ipt *Iptables) Gather(acc telegraf.Accumulator) error {
if ipt.Table == "" || len(ipt.Chains) == 0 {
return nil
}
// best effort : we continue through the chains even if an error is encountered,
// but we keep track of the last error.
var err error
for _, chain := range ipt.Chains {
data, e := ipt.lister(ipt.Table, chain)
if e != nil {
err = e
continue
}
e = ipt.parseAndGather(data, acc)
if e != nil {
err = e
continue
}
}
return err
}
func (ipt *Iptables) chainList(table, chain string) (string, error) {
iptablePath, err := exec.LookPath("iptables")
if err != nil {
return "", err
}
var args []string
name := iptablePath
if ipt.UseSudo {
name = "sudo"
args = append(args, iptablePath)
}
args = append(args, "-nvL", chain, "-t", table, "-x")
c := exec.Command(name, args...)
out, err := c.Output()
return string(out), err
}
const measurement = "iptables"
var errParse = errors.New("Cannot parse iptables list information")
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
var valuesRe = regexp.MustCompile(`^\s*([0-9]+)\s+([0-9]+)\s+.*?(/\*\s(.*)\s\*/)?$`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
lines := strings.Split(data, "\n")
if len(lines) < 3 {
return nil
}
mchain := chainNameRe.FindStringSubmatch(lines[0])
if mchain == nil {
return errParse
}
if !fieldsHeaderRe.MatchString(lines[1]) {
return errParse
}
for _, line := range lines[2:] {
mv := valuesRe.FindAllStringSubmatch(line, -1)
// best effort : if line does not match or rule is not commented forget about it
if len(mv) == 0 || len(mv[0]) != 5 || mv[0][4] == "" {
continue
}
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": mv[0][4]}
fields := make(map[string]interface{})
// since parse error is already catched by the regexp,
// we never enter ther error case here => no error check (but still need a test to cover the case)
fields["pkts"], _ = strconv.ParseUint(mv[0][1], 10, 64)
fields["bytes"], _ = strconv.ParseUint(mv[0][2], 10, 64)
acc.AddFields(measurement, fields, tags)
}
return nil
}
type chainLister func(table, chain string) (string, error)
func init() {
inputs.Add("iptables", func() telegraf.Input {
ipt := new(Iptables)
ipt.lister = ipt.chainList
return ipt
})
}

View File

@@ -0,0 +1,3 @@
// +build !linux
package iptables

View File

@@ -0,0 +1,206 @@
// +build linux
package iptables
import (
"errors"
"reflect"
"testing"
"github.com/influxdata/telegraf/testutil"
)
func TestIptables_Gather(t *testing.T) {
tests := []struct {
table string
chains []string
values []string
tags []map[string]string
fields [][]map[string]interface{}
err error
}{
{ // 1 - no configured table => no results
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 2 - no configured chains => no results
table: "filter",
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 3 - pkts and bytes are gathered as integers
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
`},
tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
},
},
{ // 4 - missing fields header => no results
table: "filter",
chains: []string{"INPUT"},
values: []string{`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)`},
},
{ // 5 - invalid chain header => error
table: "filter",
chains: []string{"INPUT"},
values: []string{
`INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
err: errParse,
},
{ // 6 - invalid fields header => error
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
err: errParse,
},
{ // 7 - invalid integer value => best effort, no error
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
`},
},
{ // 8 - Multiple rows, multipe chains => no error
table: "filter",
chains: []string{"INPUT", "FORWARD"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
200 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foo */
`,
`Chain FORWARD (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
300 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* bar */
400 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
500 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
`,
},
tags: []map[string]string{
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"},
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"},
map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}},
{map[string]interface{}{"pkts": uint64(300), "bytes": uint64(4520)}},
{map[string]interface{}{"pkts": uint64(500), "bytes": uint64(4520)}},
},
},
{ // 9 - comments are used as ruleid if any
table: "filter",
chains: []string{"INPUT"},
values: []string{
`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:22 /* foobar */
100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80
`},
tags: []map[string]string{
map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
},
},
}
for i, tt := range tests {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
acc := new(testutil.Accumulator)
err := ipt.Gather(acc)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
}
continue
}
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
}
continue
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
}
continue
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
}
}
}
}
func TestIptables_Gather_listerError(t *testing.T) {
errFoo := errors.New("error foobar")
ipt := &Iptables{
Table: "nat",
Chains: []string{"foo", "bar"},
lister: func(table, chain string) (string, error) {
return "", errFoo
},
}
acc := new(testutil.Accumulator)
err := ipt.Gather(acc)
if !reflect.DeepEqual(err, errFoo) {
t.Errorf("Expected error %#v got\n%#v\n", errFoo, err)
}
}

View File

@@ -52,6 +52,7 @@ type Jolokia struct {
const sampleConfig = `
## This is the context root used to compose the jolokia url
## NOTE that your jolokia security policy must allow for POST requests.
context = "/jolokia"
## This specifies the mode used
@@ -104,7 +105,6 @@ func (j *Jolokia) Description() string {
}
func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) {
resp, err := j.jClient.MakeRequest(req)
if err != nil {
return nil, err

View File

@@ -90,7 +90,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
case "newest":
config.Offsets.Initial = sarama.OffsetNewest
default:
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
k.Offset)
config.Offsets.Initial = sarama.OffsetOldest
}
@@ -115,7 +115,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
// Start the kafka message reader
go k.receiver()
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n",
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
k.ZookeeperPeers, k.Topics)
return nil
}
@@ -128,11 +128,13 @@ func (k *Kafka) receiver() {
case <-k.done:
return
case err := <-k.errs:
log.Printf("Kafka Consumer Error: %s\n", err.Error())
if err != nil {
log.Printf("E! Kafka Consumer Error: %s\n", err)
}
case msg := <-k.in:
metrics, err := k.parser.Parse(msg.Value)
if err != nil {
log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s",
log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s",
string(msg.Value), err.Error())
}
@@ -156,7 +158,7 @@ func (k *Kafka) Stop() {
defer k.Unlock()
close(k.done)
if err := k.Consumer.Close(); err != nil {
log.Printf("Error closing kafka consumer: %s\n", err.Error())
log.Printf("E! Error closing kafka consumer: %s\n", err.Error())
}
}

View File

@@ -43,7 +43,7 @@ func TestRunParser(t *testing.T) {
k.parser, _ = parsers.NewInfluxParser()
go k.receiver()
in <- saramaMsg(testMsg)
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond * 5)
assert.Equal(t, acc.NFields(), 1)
}
@@ -58,7 +58,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
k.parser, _ = parsers.NewInfluxParser()
go k.receiver()
in <- saramaMsg(invalidMsg)
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond * 5)
assert.Equal(t, acc.NFields(), 0)
}
@@ -73,7 +73,7 @@ func TestRunParserAndGather(t *testing.T) {
k.parser, _ = parsers.NewInfluxParser()
go k.receiver()
in <- saramaMsg(testMsg)
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond * 5)
k.Gather(&acc)
@@ -92,7 +92,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
go k.receiver()
in <- saramaMsg(testMsgGraphite)
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond * 5)
k.Gather(&acc)
@@ -111,7 +111,7 @@ func TestRunParserAndGatherJSON(t *testing.T) {
k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil)
go k.receiver()
in <- saramaMsg(testMsgJSON)
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond * 5)
k.Gather(&acc)

View File

@@ -0,0 +1,265 @@
# Kubernetes Input Plugin
**This plugin is experimental and may cause high cardinality issues with moderate to large Kubernetes deployments**
This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet.
To find the ip address of the host you are running on you can issue a command like the following:
```
$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP'
```
In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API.
## Summary Data
```json
{
"node": {
"nodeName": "node1",
"systemContainers": [
{
"name": "kubelet",
"startTime": "2016-08-25T18:46:52Z",
"cpu": {
"time": "2016-09-27T16:57:31Z",
"usageNanoCores": 56652446,
"usageCoreNanoSeconds": 101437561712262
},
"memory": {
"time": "2016-09-27T16:57:31Z",
"usageBytes": 62529536,
"workingSetBytes": 62349312,
"rssBytes": 47509504,
"pageFaults": 4769397409,
"majorPageFaults": 13
},
"rootfs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"logs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"userDefinedMetrics": null
},
{
"name": "bar",
"startTime": "2016-08-25T18:46:52Z",
"cpu": {
"time": "2016-09-27T16:57:31Z",
"usageNanoCores": 56652446,
"usageCoreNanoSeconds": 101437561712262
},
"memory": {
"time": "2016-09-27T16:57:31Z",
"usageBytes": 62529536,
"workingSetBytes": 62349312,
"rssBytes": 47509504,
"pageFaults": 4769397409,
"majorPageFaults": 13
},
"rootfs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"logs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"userDefinedMetrics": null
}
],
"startTime": "2016-08-25T18:46:52Z",
"cpu": {
"time": "2016-09-27T16:57:41Z",
"usageNanoCores": 576996212,
"usageCoreNanoSeconds": 774129887054161
},
"memory": {
"time": "2016-09-27T16:57:41Z",
"availableBytes": 10726387712,
"usageBytes": 12313182208,
"workingSetBytes": 5081538560,
"rssBytes": 35586048,
"pageFaults": 351742,
"majorPageFaults": 1236
},
"network": {
"time": "2016-09-27T16:57:41Z",
"rxBytes": 213281337459,
"rxErrors": 0,
"txBytes": 292869995684,
"txErrors": 0
},
"fs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 16754286592
},
"runtime": {
"imageFs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 5809371475
}
}
},
"pods": [
{
"podRef": {
"name": "foopod",
"namespace": "foons",
"uid": "6d305b06-8419-11e6-825c-42010af000ae"
},
"startTime": "2016-09-26T18:45:42Z",
"containers": [
{
"name": "foocontainer",
"startTime": "2016-09-26T18:46:43Z",
"cpu": {
"time": "2016-09-27T16:57:32Z",
"usageNanoCores": 846503,
"usageCoreNanoSeconds": 56507553554
},
"memory": {
"time": "2016-09-27T16:57:32Z",
"usageBytes": 30789632,
"workingSetBytes": 30789632,
"rssBytes": 30695424,
"pageFaults": 10761,
"majorPageFaults": 0
},
"rootfs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 57344
},
"logs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 24576
},
"userDefinedMetrics": null
}
],
"network": {
"time": "2016-09-27T16:57:34Z",
"rxBytes": 70749124,
"rxErrors": 0,
"txBytes": 47813506,
"txErrors": 0
},
"volume": [
{
"availableBytes": 7903948800,
"capacityBytes": 7903961088,
"usedBytes": 12288,
"name": "volume1"
},
{
"availableBytes": 7903956992,
"capacityBytes": 7903961088,
"usedBytes": 4096,
"name": "volume2"
},
{
"availableBytes": 7903948800,
"capacityBytes": 7903961088,
"usedBytes": 12288,
"name": "volume3"
},
{
"availableBytes": 7903952896,
"capacityBytes": 7903961088,
"usedBytes": 8192,
"name": "volume4"
}
]
}
]
}
```
### Daemonset YAML
```yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: telegraf
namespace: telegraf
spec:
template:
metadata:
labels:
app: telegraf
spec:
serviceAccount: telegraf
containers:
- name: telegraf
image: quay.io/org/image:latest
imagePullPolicy: IfNotPresent
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "HOST_PROC"
value: "/rootfs/proc"
- name: "HOST_SYS"
value: "/rootfs/sys"
volumeMounts:
- name: sysro
mountPath: /rootfs/sys
readOnly: true
- name: procro
mountPath: /rootfs/proc
readOnly: true
- name: varrunutmpro
mountPath: /var/run/utmp
readOnly: true
- name: logger-redis-creds
mountPath: /var/run/secrets/deis/redis/creds
volumes:
- name: sysro
hostPath:
path: /sys
- name: procro
hostPath:
path: /proc
- name: varrunutmpro
hostPath:
path: /var/run/utmp
```
### Line Protocol
#### kubernetes_pod_container
```
kubernetes_pod_container,host=ip-10-0-0-0.ec2.internal,
container_name=deis-controller,namespace=deis,
node_name=ip-10-0-0-0.ec2.internal, pod_name=deis-controller-3058870187-xazsr, cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,
logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,
logsfs_used_bytes=20787200i,memory_major_page_faults=0i,
memory_page_faults=175i,memory_rss_bytes=0i,
memory_usage_bytes=0i,memory_working_set_bytes=0i,
rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,
rootfs_used_bytes=1110016i 1476477530000000000
```
#### kubernetes_pod_volume
```
kubernetes_pod_volume,host=ip-10-0-0-0.ec2.internal,name=default-token-f7wts,
namespace=kube-system,node_name=ip-10-0-0-0.ec2.internal,
pod_name=kubernetes-dashboard-v1.1.1-t4x4t, available_bytes=8415240192i,
capacity_bytes=8415252480i,used_bytes=12288i 1476477530000000000
```
#### kubernetes_pod_network
```
kubernetes_pod_network,host=ip-10-0-0-0.ec2.internal,namespace=deis,
node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr,
rx_bytes=120671099i,rx_errors=0i,
tx_bytes=102451983i,tx_errors=0i 1476477530000000000
```

View File

@@ -0,0 +1,242 @@
package kubernetes
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
)
// Kubernetes represents the config object for the plugin
type Kubernetes struct {
URL string
// Bearer Token authorization file path
BearerToken string `toml:"bearer_token"`
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
RoundTripper http.RoundTripper
}
var sampleConfig = `
## URL for the kubelet
url = "http://1.1.1.1:10255"
## Use bearer token for authorization
# bearer_token = /path/to/bearer/token
## Optional SSL Config
# ssl_ca = /path/to/cafile
# ssl_cert = /path/to/certfile
# ssl_key = /path/to/keyfile
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
`
const (
summaryEndpoint = `%s/stats/summary`
)
func init() {
inputs.Add("kubernetes", func() telegraf.Input {
return &Kubernetes{}
})
}
//SampleConfig returns a sample config
func (k *Kubernetes) SampleConfig() string {
return sampleConfig
}
//Description returns the description of this plugin
func (k *Kubernetes) Description() string {
return "Read metrics from the kubernetes kubelet api"
}
//Gather collects kubernetes metrics from a given URL
func (k *Kubernetes) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
errChan := errchan.New(1)
wg.Add(1)
go func(k *Kubernetes) {
defer wg.Done()
errChan.C <- k.gatherSummary(k.URL, acc)
}(k)
wg.Wait()
return errChan.Error()
}
func buildURL(endpoint string, base string) (*url.URL, error) {
u := fmt.Sprintf(endpoint, base)
addr, err := url.Parse(u)
if err != nil {
return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err)
}
return addr, nil
}
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
url := fmt.Sprintf("%s/stats/summary", baseURL)
var req, err = http.NewRequest("GET", url, nil)
var token []byte
var resp *http.Response
tlsCfg, err := internal.GetTLSConfig(k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
if err != nil {
return err
}
if k.RoundTripper == nil {
k.RoundTripper = &http.Transport{
TLSHandshakeTimeout: 5 * time.Second,
TLSClientConfig: tlsCfg,
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
}
if k.BearerToken != "" {
token, err = ioutil.ReadFile(k.BearerToken)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+string(token))
}
resp, err = k.RoundTripper.RoundTrip(req)
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
}
summaryMetrics := &SummaryMetrics{}
err = json.NewDecoder(resp.Body).Decode(summaryMetrics)
if err != nil {
return fmt.Errorf(`Error parsing response: %s`, err)
}
buildSystemContainerMetrics(summaryMetrics, acc)
buildNodeMetrics(summaryMetrics, acc)
buildPodMetrics(summaryMetrics, acc)
return nil
}
func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
for _, container := range summaryMetrics.Node.SystemContainers {
tags := map[string]string{
"node_name": summaryMetrics.Node.NodeName,
"container_name": container.Name,
}
fields := make(map[string]interface{})
fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores
fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds
fields["memory_usage_bytes"] = container.Memory.UsageBytes
fields["memory_working_set_bytes"] = container.Memory.WorkingSetBytes
fields["memory_rss_bytes"] = container.Memory.RSSBytes
fields["memory_page_faults"] = container.Memory.PageFaults
fields["memory_major_page_faults"] = container.Memory.MajorPageFaults
fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes
fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes
fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes
fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
acc.AddFields("kubernetes_system_container", fields, tags)
}
}
func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
tags := map[string]string{
"node_name": summaryMetrics.Node.NodeName,
}
fields := make(map[string]interface{})
fields["cpu_usage_nanocores"] = summaryMetrics.Node.CPU.UsageNanoCores
fields["cpu_usage_core_nanoseconds"] = summaryMetrics.Node.CPU.UsageCoreNanoSeconds
fields["memory_available_bytes"] = summaryMetrics.Node.Memory.AvailableBytes
fields["memory_usage_bytes"] = summaryMetrics.Node.Memory.UsageBytes
fields["memory_working_set_bytes"] = summaryMetrics.Node.Memory.WorkingSetBytes
fields["memory_rss_bytes"] = summaryMetrics.Node.Memory.RSSBytes
fields["memory_page_faults"] = summaryMetrics.Node.Memory.PageFaults
fields["memory_major_page_faults"] = summaryMetrics.Node.Memory.MajorPageFaults
fields["network_rx_bytes"] = summaryMetrics.Node.Network.RXBytes
fields["network_rx_errors"] = summaryMetrics.Node.Network.RXErrors
fields["network_tx_bytes"] = summaryMetrics.Node.Network.TXBytes
fields["network_tx_errors"] = summaryMetrics.Node.Network.TXErrors
fields["fs_available_bytes"] = summaryMetrics.Node.FileSystem.AvailableBytes
fields["fs_capacity_bytes"] = summaryMetrics.Node.FileSystem.CapacityBytes
fields["fs_used_bytes"] = summaryMetrics.Node.FileSystem.UsedBytes
fields["runtime_image_fs_available_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.AvailableBytes
fields["runtime_image_fs_capacity_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.CapacityBytes
fields["runtime_image_fs_used_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.UsedBytes
acc.AddFields("kubernetes_node", fields, tags)
}
func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
for _, pod := range summaryMetrics.Pods {
for _, container := range pod.Containers {
tags := map[string]string{
"node_name": summaryMetrics.Node.NodeName,
"namespace": pod.PodRef.Namespace,
"container_name": container.Name,
"pod_name": pod.PodRef.Name,
}
fields := make(map[string]interface{})
fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores
fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds
fields["memory_usage_bytes"] = container.Memory.UsageBytes
fields["memory_working_set_bytes"] = container.Memory.WorkingSetBytes
fields["memory_rss_bytes"] = container.Memory.RSSBytes
fields["memory_page_faults"] = container.Memory.PageFaults
fields["memory_major_page_faults"] = container.Memory.MajorPageFaults
fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes
fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes
fields["rootfs_used_bytes"] = container.RootFS.UsedBytes
fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes
fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
fields["logsfs_used_bytes"] = container.LogsFS.UsedBytes
acc.AddFields("kubernetes_pod_container", fields, tags)
}
for _, volume := range pod.Volumes {
tags := map[string]string{
"node_name": summaryMetrics.Node.NodeName,
"pod_name": pod.PodRef.Name,
"namespace": pod.PodRef.Namespace,
"volume_name": volume.Name,
}
fields := make(map[string]interface{})
fields["available_bytes"] = volume.AvailableBytes
fields["capacity_bytes"] = volume.CapacityBytes
fields["used_bytes"] = volume.UsedBytes
acc.AddFields("kubernetes_pod_volume", fields, tags)
}
tags := map[string]string{
"node_name": summaryMetrics.Node.NodeName,
"pod_name": pod.PodRef.Name,
"namespace": pod.PodRef.Namespace,
}
fields := make(map[string]interface{})
fields["rx_bytes"] = pod.Network.RXBytes
fields["rx_errors"] = pod.Network.RXErrors
fields["tx_bytes"] = pod.Network.TXBytes
fields["tx_errors"] = pod.Network.TXErrors
acc.AddFields("kubernetes_pod_network", fields, tags)
}
}

View File

@@ -0,0 +1,93 @@
package kubernetes
import "time"
// SummaryMetrics represents all the summary data about a paritcular node retrieved from a kubelet
type SummaryMetrics struct {
Node NodeMetrics `json:"node"`
Pods []PodMetrics `json:"pods"`
}
// NodeMetrics represents detailed information about a node
type NodeMetrics struct {
NodeName string `json:"nodeName"`
SystemContainers []ContainerMetrics `json:"systemContainers"`
StartTime time.Time `json:"startTime"`
CPU CPUMetrics `json:"cpu"`
Memory MemoryMetrics `json:"memory"`
Network NetworkMetrics `json:"network"`
FileSystem FileSystemMetrics `json:"fs"`
Runtime RuntimeMetrics `json:"runtime"`
}
// ContainerMetrics represents the metric data collect about a container from the kubelet
type ContainerMetrics struct {
Name string `json:"name"`
StartTime time.Time `json:"startTime"`
CPU CPUMetrics `json:"cpu"`
Memory MemoryMetrics `json:"memory"`
RootFS FileSystemMetrics `json:"rootfs"`
LogsFS FileSystemMetrics `json:"logs"`
}
// RuntimeMetrics contains metric data on the runtime of the system
type RuntimeMetrics struct {
ImageFileSystem FileSystemMetrics `json:"imageFs"`
}
// CPUMetrics represents the cpu usage data of a pod or node
type CPUMetrics struct {
Time time.Time `json:"time"`
UsageNanoCores int64 `json:"usageNanoCores"`
UsageCoreNanoSeconds int64 `json:"usageCoreNanoSeconds"`
}
// PodMetrics contains metric data on a given pod
type PodMetrics struct {
PodRef PodReference `json:"podRef"`
StartTime time.Time `json:"startTime"`
Containers []ContainerMetrics `json:"containers"`
Network NetworkMetrics `json:"network"`
Volumes []VolumeMetrics `json:"volume"`
}
// PodReference is how a pod is identified
type PodReference struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
}
// MemoryMetrics represents the memory metrics for a pod or node
type MemoryMetrics struct {
Time time.Time `json:"time"`
AvailableBytes int64 `json:"availableBytes"`
UsageBytes int64 `json:"usageBytes"`
WorkingSetBytes int64 `json:"workingSetBytes"`
RSSBytes int64 `json:"rssBytes"`
PageFaults int64 `json:"pageFaults"`
MajorPageFaults int64 `json:"majorPageFaults"`
}
// FileSystemMetrics represents disk usage metrics for a pod or node
type FileSystemMetrics struct {
AvailableBytes int64 `json:"availableBytes"`
CapacityBytes int64 `json:"capacityBytes"`
UsedBytes int64 `json:"usedBytes"`
}
// NetworkMetrics represents network usage data for a pod or node
type NetworkMetrics struct {
Time time.Time `json:"time"`
RXBytes int64 `json:"rxBytes"`
RXErrors int64 `json:"rxErrors"`
TXBytes int64 `json:"txBytes"`
TXErrors int64 `json:"txErrors"`
}
// VolumeMetrics represents the disk usage data for a given volume
type VolumeMetrics struct {
Name string `json:"name"`
AvailableBytes int64 `json:"availableBytes"`
CapacityBytes int64 `json:"capacityBytes"`
UsedBytes int64 `json:"usedBytes"`
}

View File

@@ -0,0 +1,289 @@
package kubernetes
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
func TestKubernetesStats(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, response)
}))
defer ts.Close()
k := &Kubernetes{
URL: ts.URL,
}
var acc testutil.Accumulator
err := k.Gather(&acc)
require.NoError(t, err)
fields := map[string]interface{}{
"cpu_usage_nanocores": int64(56652446),
"cpu_usage_core_nanoseconds": int64(101437561712262),
"memory_usage_bytes": int64(62529536),
"memory_working_set_bytes": int64(62349312),
"memory_rss_bytes": int64(47509504),
"memory_page_faults": int64(4769397409),
"memory_major_page_faults": int64(13),
"rootfs_available_bytes": int64(84379979776),
"rootfs_capacity_bytes": int64(105553100800),
"logsfs_avaialble_bytes": int64(84379979776),
"logsfs_capacity_bytes": int64(105553100800),
}
tags := map[string]string{
"node_name": "node1",
"container_name": "kubelet",
}
acc.AssertContainsTaggedFields(t, "kubernetes_system_container", fields, tags)
fields = map[string]interface{}{
"cpu_usage_nanocores": int64(576996212),
"cpu_usage_core_nanoseconds": int64(774129887054161),
"memory_usage_bytes": int64(12313182208),
"memory_working_set_bytes": int64(5081538560),
"memory_rss_bytes": int64(35586048),
"memory_page_faults": int64(351742),
"memory_major_page_faults": int64(1236),
"memory_available_bytes": int64(10726387712),
"network_rx_bytes": int64(213281337459),
"network_rx_errors": int64(0),
"network_tx_bytes": int64(292869995684),
"network_tx_errors": int64(0),
"fs_available_bytes": int64(84379979776),
"fs_capacity_bytes": int64(105553100800),
"fs_used_bytes": int64(16754286592),
"runtime_image_fs_available_bytes": int64(84379979776),
"runtime_image_fs_capacity_bytes": int64(105553100800),
"runtime_image_fs_used_bytes": int64(5809371475),
}
tags = map[string]string{
"node_name": "node1",
}
acc.AssertContainsTaggedFields(t, "kubernetes_node", fields, tags)
fields = map[string]interface{}{
"cpu_usage_nanocores": int64(846503),
"cpu_usage_core_nanoseconds": int64(56507553554),
"memory_usage_bytes": int64(30789632),
"memory_working_set_bytes": int64(30789632),
"memory_rss_bytes": int64(30695424),
"memory_page_faults": int64(10761),
"memory_major_page_faults": int64(0),
"rootfs_available_bytes": int64(84379979776),
"rootfs_capacity_bytes": int64(105553100800),
"rootfs_used_bytes": int64(57344),
"logsfs_avaialble_bytes": int64(84379979776),
"logsfs_capacity_bytes": int64(105553100800),
"logsfs_used_bytes": int64(24576),
}
tags = map[string]string{
"node_name": "node1",
"container_name": "foocontainer",
"namespace": "foons",
"pod_name": "foopod",
}
acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags)
fields = map[string]interface{}{
"available_bytes": int64(7903948800),
"capacity_bytes": int64(7903961088),
"used_bytes": int64(12288),
}
tags = map[string]string{
"node_name": "node1",
"volume_name": "volume1",
"namespace": "foons",
"pod_name": "foopod",
}
acc.AssertContainsTaggedFields(t, "kubernetes_pod_volume", fields, tags)
fields = map[string]interface{}{
"rx_bytes": int64(70749124),
"rx_errors": int64(0),
"tx_bytes": int64(47813506),
"tx_errors": int64(0),
}
tags = map[string]string{
"node_name": "node1",
"namespace": "foons",
"pod_name": "foopod",
}
acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags)
}
var response = `
{
"node": {
"nodeName": "node1",
"systemContainers": [
{
"name": "kubelet",
"startTime": "2016-08-25T18:46:52Z",
"cpu": {
"time": "2016-09-27T16:57:31Z",
"usageNanoCores": 56652446,
"usageCoreNanoSeconds": 101437561712262
},
"memory": {
"time": "2016-09-27T16:57:31Z",
"usageBytes": 62529536,
"workingSetBytes": 62349312,
"rssBytes": 47509504,
"pageFaults": 4769397409,
"majorPageFaults": 13
},
"rootfs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"logs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"userDefinedMetrics": null
},
{
"name": "bar",
"startTime": "2016-08-25T18:46:52Z",
"cpu": {
"time": "2016-09-27T16:57:31Z",
"usageNanoCores": 56652446,
"usageCoreNanoSeconds": 101437561712262
},
"memory": {
"time": "2016-09-27T16:57:31Z",
"usageBytes": 62529536,
"workingSetBytes": 62349312,
"rssBytes": 47509504,
"pageFaults": 4769397409,
"majorPageFaults": 13
},
"rootfs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"logs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800
},
"userDefinedMetrics": null
}
],
"startTime": "2016-08-25T18:46:52Z",
"cpu": {
"time": "2016-09-27T16:57:41Z",
"usageNanoCores": 576996212,
"usageCoreNanoSeconds": 774129887054161
},
"memory": {
"time": "2016-09-27T16:57:41Z",
"availableBytes": 10726387712,
"usageBytes": 12313182208,
"workingSetBytes": 5081538560,
"rssBytes": 35586048,
"pageFaults": 351742,
"majorPageFaults": 1236
},
"network": {
"time": "2016-09-27T16:57:41Z",
"rxBytes": 213281337459,
"rxErrors": 0,
"txBytes": 292869995684,
"txErrors": 0
},
"fs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 16754286592
},
"runtime": {
"imageFs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 5809371475
}
}
},
"pods": [
{
"podRef": {
"name": "foopod",
"namespace": "foons",
"uid": "6d305b06-8419-11e6-825c-42010af000ae"
},
"startTime": "2016-09-26T18:45:42Z",
"containers": [
{
"name": "foocontainer",
"startTime": "2016-09-26T18:46:43Z",
"cpu": {
"time": "2016-09-27T16:57:32Z",
"usageNanoCores": 846503,
"usageCoreNanoSeconds": 56507553554
},
"memory": {
"time": "2016-09-27T16:57:32Z",
"usageBytes": 30789632,
"workingSetBytes": 30789632,
"rssBytes": 30695424,
"pageFaults": 10761,
"majorPageFaults": 0
},
"rootfs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 57344
},
"logs": {
"availableBytes": 84379979776,
"capacityBytes": 105553100800,
"usedBytes": 24576
},
"userDefinedMetrics": null
}
],
"network": {
"time": "2016-09-27T16:57:34Z",
"rxBytes": 70749124,
"rxErrors": 0,
"txBytes": 47813506,
"txErrors": 0
},
"volume": [
{
"availableBytes": 7903948800,
"capacityBytes": 7903961088,
"usedBytes": 12288,
"name": "volume1"
},
{
"availableBytes": 7903956992,
"capacityBytes": 7903961088,
"usedBytes": 4096,
"name": "volume2"
},
{
"availableBytes": 7903948800,
"capacityBytes": 7903961088,
"usedBytes": 12288,
"name": "volume3"
},
{
"availableBytes": 7903952896,
"capacityBytes": 7903961088,
"usedBytes": 8192,
"name": "volume4"
}
]
}
]
}`

View File

@@ -202,21 +202,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
case INT:
iv, err := strconv.ParseInt(v, 10, 64)
if err != nil {
log.Printf("ERROR parsing %s to int: %s", v, err)
log.Printf("E! Error parsing %s to int: %s", v, err)
} else {
fields[k] = iv
}
case FLOAT:
fv, err := strconv.ParseFloat(v, 64)
if err != nil {
log.Printf("ERROR parsing %s to float: %s", v, err)
log.Printf("E! Error parsing %s to float: %s", v, err)
} else {
fields[k] = fv
}
case DURATION:
d, err := time.ParseDuration(v)
if err != nil {
log.Printf("ERROR parsing %s to duration: %s", v, err)
log.Printf("E! Error parsing %s to duration: %s", v, err)
} else {
fields[k] = int64(d)
}
@@ -227,14 +227,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
case EPOCH:
iv, err := strconv.ParseInt(v, 10, 64)
if err != nil {
log.Printf("ERROR parsing %s to int: %s", v, err)
log.Printf("E! Error parsing %s to int: %s", v, err)
} else {
timestamp = time.Unix(iv, 0)
}
case EPOCH_NANO:
iv, err := strconv.ParseInt(v, 10, 64)
if err != nil {
log.Printf("ERROR parsing %s to int: %s", v, err)
log.Printf("E! Error parsing %s to int: %s", v, err)
} else {
timestamp = time.Unix(0, iv)
}
@@ -265,7 +265,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
// if we still haven't found a timestamp layout, log it and we will
// just use time.Now()
if !foundTs {
log.Printf("ERROR parsing timestamp [%s], could not find any "+
log.Printf("E! Error parsing timestamp [%s], could not find any "+
"suitable time layouts.", v)
}
case DROP:
@@ -275,7 +275,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
if err == nil {
timestamp = ts
} else {
log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err)
log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
}
}
}

View File

@@ -152,6 +152,31 @@ func TestBuiltinCommonLogFormat(t *testing.T) {
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
}
// common log format
// 127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326
func TestBuiltinCommonLogFormatWithNumbers(t *testing.T) {
p := &Parser{
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
}
assert.NoError(t, p.Compile())
// Parse an influxdb POST request
m, err := p.ParseLine(`127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
require.NotNil(t, m)
assert.NoError(t, err)
assert.Equal(t,
map[string]interface{}{
"resp_bytes": int64(2326),
"auth": "frank1234",
"client_ip": "127.0.0.1",
"http_version": float64(1.0),
"ident": "user1234",
"request": "/apache_pb.gif",
},
m.Fields())
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
}
// combined log format
// 127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"
func TestBuiltinCombinedLogFormat(t *testing.T) {

View File

@@ -53,7 +53,7 @@ RESPONSE_TIME %{DURATION:response_time_ns:duration}
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
# Wider-ranging username matching vs. logstash built-in %{USER}
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+
NGUSER %{NGUSERNAME}
# Wider-ranging client IP matching
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
@@ -64,7 +64,7 @@ CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
# apache & nginx logs, this is also known as the "common log format"
# see https://en.wikipedia.org/wiki/Common_Log_Format
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
# Combined log format is the same as the common log format but with the addition
# of two quoted strings at the end for "referrer" and "agent"

View File

@@ -49,7 +49,7 @@ RESPONSE_TIME %{DURATION:response_time_ns:duration}
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
# Wider-ranging username matching vs. logstash built-in %{USER}
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+
NGUSER %{NGUSERNAME}
# Wider-ranging client IP matching
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
@@ -60,7 +60,7 @@ CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
# apache & nginx logs, this is also known as the "common log format"
# see https://en.wikipedia.org/wiki/Common_Log_Format
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
# Combined log format is the same as the common log format but with the addition
# of two quoted strings at the end for "referrer" and "agent"

View File

@@ -134,7 +134,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
for _, filepath := range l.Files {
g, err := globpath.Compile(filepath)
if err != nil {
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
continue
}
files := g.Match()
@@ -167,7 +167,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
var line *tail.Line
for line = range tailer.Lines {
if line.Err != nil {
log.Printf("ERROR tailing file %s, Error: %s\n",
log.Printf("E! Error tailing file %s, Error: %s\n",
tailer.Filename, line.Err)
continue
}
@@ -216,7 +216,7 @@ func (l *LogParserPlugin) Stop() {
for _, t := range l.tailers {
err := t.Stop()
if err != nil {
log.Printf("ERROR stopping tail on file %s\n", t.Filename)
log.Printf("E! Error stopping tail on file %s\n", t.Filename)
}
t.Cleanup()
}

View File

@@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
req.URL.RawQuery = params.String()
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
if api.Debug {
log.Printf("Request URL: %s", req.URL.String())
log.Printf("D! Request URL: %s", req.URL.String())
}
resp, err := client.Do(req)
@@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
return nil, err
}
if api.Debug {
log.Printf("Response Body:%s", string(body))
log.Printf("D! Response Body:%s", string(body))
}
if err = chimpErrorCheck(body); err != nil {

View File

@@ -35,13 +35,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso
# "tasks",
# "messages",
# ]
## Include mesos tasks statistics, default is false
# slave_tasks = true
```
By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default
values. User needs to specify master/slave nodes this plugin will gather metrics from. Additionally, enabling `slave_tasks` will allow
gathering metrics from tasks running on specified slaves (this option is disabled by default).
values. User needs to specify master/slave nodes this plugin will gather metrics from.
### Measurements & Fields:
@@ -235,45 +232,21 @@ Mesos slave metric groups
- slave/valid_framework_messages
- slave/valid_status_updates
Mesos tasks metric groups
- executor_id
- executor_name
- framework_id
- source
- statistics (all metrics below will have `statistics_` prefix included in their names
- cpus_limit
- cpus_system_time_secs
- cpus_user_time_secs
- mem_anon_bytes
- mem_cache_bytes
- mem_critical_pressure_counter
- mem_file_bytes
- mem_limit_bytes
- mem_low_pressure_counter
- mem_mapped_file_bytes
- mem_medium_pressure_counter
- mem_rss_bytes
- mem_swap_bytes
- mem_total_bytes
- mem_total_memsw_bytes
- mem_unevictable_bytes
- timestamp
### Tags:
- All master/slave measurements have the following tags:
- server
- role (master/slave)
- Tasks measurements have the following tags:
- server
- All master measurements have the extra tags:
- state (leader/follower)
### Example Output:
```
$ telegraf -config ~/mesos.conf -input-filter mesos -test
* Plugin: mesos, Collection 1
mesos,host=172.17.8.102,server=172.17.8.101 allocator/event_queue_dispatches=0,master/cpus_percent=0,
mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101
allocator/event_queue_dispatches=0,master/cpus_percent=0,
master/cpus_revocable_percent=0,master/cpus_revocable_total=0,
master/cpus_revocable_used=0,master/cpus_total=2,
master/cpus_used=0,master/disk_percent=0,master/disk_revocable_percent=0,
@@ -291,15 +264,3 @@ master/mem_used=0,master/messages_authenticate=0,
master/messages_deactivate_framework=0 ...
```
Meoso tasks metrics (if enabled):
```
mesos-tasks,host=172.17.8.102,server=172.17.8.101,task_id=hello-world.e4b5b497-2ccd-11e6-a659-0242fb222ce2
statistics_cpus_limit=0.2,statistics_cpus_system_time_secs=142.49,statistics_cpus_user_time_secs=388.14,
statistics_mem_anon_bytes=359129088,statistics_mem_cache_bytes=3964928,
statistics_mem_critical_pressure_counter=0,statistics_mem_file_bytes=3964928,
statistics_mem_limit_bytes=767557632,statistics_mem_low_pressure_counter=0,
statistics_mem_mapped_file_bytes=114688,statistics_mem_medium_pressure_counter=0,
statistics_mem_rss_bytes=359129088,statistics_mem_swap_bytes=0,statistics_mem_total_bytes=363094016,
statistics_mem_total_memsw_bytes=363094016,statistics_mem_unevictable_bytes=0,
statistics_timestamp=1465486052.70525 1465486053052811792...
```

View File

@@ -30,7 +30,7 @@ type Mesos struct {
MasterCols []string `toml:"master_collections"`
Slaves []string
SlaveCols []string `toml:"slave_collections"`
SlaveTasks bool
//SlaveTasks bool
}
var allMetrics = map[Role][]string{
@@ -66,8 +66,6 @@ var sampleConfig = `
# "tasks",
# "messages",
# ]
## Include mesos tasks statistics, default is false
# slave_tasks = true
`
// SampleConfig returns a sample configuration block
@@ -90,7 +88,7 @@ func (m *Mesos) SetDefaults() {
}
if m.Timeout == 0 {
log.Println("[mesos] Missing timeout value, setting default value (100ms)")
log.Println("I! [mesos] Missing timeout value, setting default value (100ms)")
m.Timeout = 100
}
}
@@ -116,21 +114,21 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
for _, v := range m.Slaves {
wg.Add(1)
go func(c string) {
errorChannel <- m.gatherMainMetrics(c, ":5051", MASTER, acc)
errorChannel <- m.gatherMainMetrics(c, ":5051", SLAVE, acc)
wg.Done()
return
}(v)
if !m.SlaveTasks {
continue
}
// if !m.SlaveTasks {
// continue
// }
wg.Add(1)
go func(c string) {
errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc)
wg.Done()
return
}(v)
// wg.Add(1)
// go func(c string) {
// errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc)
// wg.Done()
// return
// }(v)
}
wg.Wait()
@@ -385,7 +383,7 @@ func getMetrics(role Role, group string) []string {
ret, ok := m[group]
if !ok {
log.Printf("[mesos] Unkown %s metrics group: %s\n", role, group)
log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group)
return []string{}
}
@@ -420,8 +418,15 @@ var client = &http.Client{
Timeout: time.Duration(4 * time.Second),
}
// TaskStats struct for JSON API output /monitor/statistics
type TaskStats struct {
ExecutorID string `json:"executor_id"`
FrameworkID string `json:"framework_id"`
Statistics map[string]interface{} `json:"statistics"`
}
func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc telegraf.Accumulator) error {
var metrics []map[string]interface{}
var metrics []TaskStats
host, _, err := net.SplitHostPort(address)
if err != nil {
@@ -452,16 +457,19 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
}
for _, task := range metrics {
tags["task_id"] = task["executor_id"].(string)
tags["framework_id"] = task.FrameworkID
jf := jsonparser.JSONFlattener{}
err = jf.FlattenJSON("", task)
err = jf.FlattenJSON("", task.Statistics)
if err != nil {
return err
}
acc.AddFields("mesos-tasks", jf.Fields, tags)
timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0)
jf.Fields["executor_id"] = task.ExecutorID
acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp)
}
return nil
@@ -510,6 +518,14 @@ func (m *Mesos) gatherMainMetrics(a string, defaultPort string, role Role, acc t
return err
}
if role == MASTER {
if jf.Fields["master/elected"] != 0.0 {
tags["state"] = "leader"
} else {
tags["state"] = "standby"
}
}
acc.AddFields("mesos", jf.Fields, tags)
return nil

Some files were not shown because too many files have changed in this diff Show More