Compare commits

..

314 Commits
1.3.4 ... 1.4.2

Author SHA1 Message Date
Daniel Nelson
0cc5fc0ce4 Set 1.4.2 release date
(cherry picked from commit 4e0c8e6026)
2017-10-10 13:31:06 -07:00
Daniel Nelson
8011109466 Remove InfluxDB path prefix test
This tests a feature that is not yet on this branch and the test was
mistakenly backported.
2017-10-05 16:37:58 -07:00
Daniel Nelson
588f0c77f8 Update changelog
(cherry picked from commit 13c7802b84)
2017-10-05 16:17:06 -07:00
Daniel Nelson
4301b8e32a Use chunked transfer encoding in InfluxDB output (#3307)
(cherry picked from commit cce40c515a)
2017-10-05 16:17:05 -07:00
Daniel Nelson
3c9d7db0a0 Update changelog
(cherry picked from commit 6e1fa559a3)
2017-10-05 16:06:11 -07:00
Daniel Nelson
f7b3eb1ebd Fix panic in cpu input if number of cpus changes (#3306)
(cherry picked from commit f56dda0ac8)
2017-10-05 16:06:11 -07:00
Daniel Nelson
b8ab827629 Update changelog
(cherry picked from commit 002ccf3295)
2017-10-03 15:27:49 -07:00
Daniel Nelson
d03e2fca32 Add support for proxy environment variables to http_response (#3302)
(cherry picked from commit a163effa6d)
2017-10-03 15:26:55 -07:00
Daniel Nelson
eca00c10e0 Add support for standard proxy env vars in outputs. (#3212)
(cherry picked from commit 7b08f9d099)
2017-10-03 15:26:44 -07:00
Daniel Nelson
9cf19df04e Update changelog
(cherry picked from commit f67350107d)
2017-10-02 17:17:10 -07:00
Daniel Nelson
e77c2b76e7 Fix case sensitivity error in sqlserver input (#3287)
(cherry picked from commit 8e3ed96d6f)
2017-10-02 17:17:10 -07:00
Daniel Nelson
c749c43dab Fix mqtt_consumer connection_timeout test
(cherry picked from commit cdca81c999)
2017-10-02 12:32:05 -07:00
Daniel Nelson
1be17ea5af Update example config 2017-09-29 16:04:02 -07:00
Daniel Nelson
e1155bec20 Update changelog
(cherry picked from commit 29b6f4168c)
2017-09-29 16:01:11 -07:00
Daniel Nelson
cfac750469 Fix format of connection_timeout in mqtt_consumer (#3286)
(cherry picked from commit 3d62e045af)
2017-09-29 16:01:11 -07:00
Daniel Nelson
f10d5b43c4 Update changelog
(cherry picked from commit cadafa6405)
2017-09-26 16:03:30 -07:00
Daniel Nelson
47b2d04d5b Allow JSON data format to contain zero metrics (#3268)
(cherry picked from commit 22a9ffbb9d)
2017-09-26 16:03:30 -07:00
Daniel Nelson
0e0da57b9a Update changelog
(cherry picked from commit 2e1457a496)
2017-09-26 15:38:41 -07:00
Daniel Nelson
8e7cf0109e Fix parsing of JSON with a UTF8 BOM in httpjson (#3267)
(cherry picked from commit 8614445235)
2017-09-26 15:38:41 -07:00
Daniel Nelson
5b791fd2e5 Update changelog
(cherry picked from commit f23d1eb078)
2017-09-26 15:29:19 -07:00
Daniel Nelson
293b1a0093 Fix dmcache tests with 32bit int
(cherry picked from commit ef5c12bd86)
2017-09-26 15:29:01 -07:00
Daniel Nelson
761ea06d6a Fix cgroup tests with 32bit int
(cherry picked from commit c013cc1497)
2017-09-26 15:29:01 -07:00
Daniel Nelson
8fafe9878b Fix ceph tests with 32bit int
(cherry picked from commit bb665cf013)
2017-09-26 15:29:01 -07:00
Daniel Nelson
5da3eef38b Allow 64bit integers in kernel_vmstat
(cherry picked from commit f823fc73f6)
2017-09-26 15:29:00 -07:00
Daniel Nelson
2de7aa23d7 Set 1.4.1 release date in changelog
(cherry picked from commit fd702e6bb8)
2017-09-26 14:19:51 -07:00
Daniel Nelson
52cd38150c Update changelog
(cherry picked from commit 0048bf2120)
2017-09-18 14:25:57 -07:00
Daniel Nelson
c08f492f78 Fix arm64 packages contain 32-bit executable (#3246)
(cherry picked from commit b8e134cd37)
2017-09-18 14:25:57 -07:00
Daniel Nelson
66cfe80e37 Update changelog
(cherry picked from commit b94cda6b46)
2017-09-14 15:30:51 -07:00
Trevor Pounds
ba5e5ec283 Fix panic in statsd p100 calculation (#3230)
(cherry picked from commit 73372872c2)
2017-09-14 15:30:51 -07:00
Daniel Nelson
259f8e4002 Update changelog
(cherry picked from commit 875ab3c4b7)
2017-09-14 15:05:38 -07:00
Mark Wilkinson - m82labs
558ab0c730 Fix duplicate keys in perf counters sqlserver query (#3175)
(cherry picked from commit 1c5ebd4be3)
2017-09-14 15:05:38 -07:00
Daniel Nelson
8d4fbe29e7 Update changelog
(cherry picked from commit 103d24bfba)
2017-09-14 15:01:28 -07:00
Daniel Nelson
72337a1c97 Fix skipped line with empty target in iptables (#3235)
(cherry picked from commit d5f48e3e96)
2017-09-14 15:01:21 -07:00
Daniel Nelson
86537899b2 Update changelog
(cherry picked from commit 7a41d2c586)
2017-09-14 13:07:30 -07:00
Trevor Pounds
a727d5d1f0 Fix counter and gauge metric types. (#3232)
(cherry picked from commit fa1982323a)
2017-09-14 13:07:30 -07:00
Daniel Nelson
7ec194a482 Update changelog
(cherry picked from commit cdf63c5776)
2017-09-13 17:32:03 -07:00
Daniel Nelson
5a77d28837 Whitelist allowed char classes for opentsdb output. (#3227)
(cherry picked from commit 0a8c2e0b3b)
2017-09-13 17:32:03 -07:00
Daniel Nelson
47927c353d Fix fluentd test
(cherry picked from commit eebee9759f)
2017-09-12 17:58:29 -07:00
Daniel Nelson
b9e7fa27aa Update changelog
(cherry picked from commit c5cfde667a)
2017-09-12 17:18:29 -07:00
Daniel Nelson
0d437140bd Fix optional field types in fluentd input
(cherry picked from commit 8a68e7424c)
2017-09-12 17:18:29 -07:00
Daniel Nelson
36969a63c2 Update changelog
(cherry picked from commit cc63b3b667)
2017-09-11 12:28:37 -07:00
DanKans
e9a12bb694 Fix MQTT input exits if Broker is not available on startup (#3202)
(cherry picked from commit 5488f4b3ac)
2017-09-11 12:28:12 -07:00
Daniel Nelson
34b7a4c361 Add 1.4.0 release date
(cherry picked from commit ab1c11b06d)
2017-09-05 17:15:06 -07:00
Daniel Nelson
f46370d982 Sort metrics before comparing in graphite test
(cherry picked from commit 98e784faf3)
2017-09-05 12:50:55 -07:00
Daniel Nelson
07b7e09749 Update changelog
(cherry picked from commit f43af72785)
2017-08-31 13:44:05 -07:00
Daniel Nelson
e54795795d Fix panic when handling string fields with escapes (#3188)
(cherry picked from commit 28d16188b3)
2017-08-30 21:17:10 -07:00
Daniel Nelson
b2b2bd8a27 Update changelog 2017-08-29 16:30:25 -07:00
Daniel Nelson
f96cbb48c7 Convert bool fields to int in graphite serializer 2017-08-29 16:30:25 -07:00
Seua Polyakov
9077cb83bc Skip non-numerical values in graphite format (#3179) 2017-08-29 16:30:25 -07:00
Daniel Nelson
0f188f280f Update changelog 2017-08-28 17:18:00 -07:00
Dylan Meissner
b9420e73bd HTTP headers can be added to InfluxDB output (#3182)
(cherry picked from commit a9a40cbf87)
2017-08-28 17:15:43 -07:00
Daniel Nelson
1e43e5e7ae Update changelog
(cherry picked from commit 5fd8ab36d3)
2017-08-28 17:09:08 -07:00
Jeff Nickoloff
5e104ad974 Added CloudWatch metric constraint validation (#3183)
(cherry picked from commit ac1fa05672)
2017-08-28 17:09:08 -07:00
Daniel Nelson
cc9d8c700c Update changelog
(cherry picked from commit a98496591a)
2017-08-25 18:08:55 -07:00
Ashton Kinslow
b15ec21ba7 Fix NSQ input plugin when used with version 1.0.0-compat
(cherry picked from commit 0a6541dfa8)
2017-08-25 18:08:55 -07:00
Daniel Nelson
a9abfe8f08 Update changelog
(cherry picked from commit 6abecd0ac7)
2017-08-25 12:59:51 -07:00
Rickard von Essen
307210242c Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149)
(cherry picked from commit 0502b65316)
2017-08-25 12:59:38 -07:00
Daniel Nelson
0a41db16f1 Update changelog
(cherry picked from commit e400fcf5da)
2017-08-25 11:56:30 -07:00
Jan Willem Janssen
7480267fd2 Fix parsing of SHM remotes in ntpq input (#3163)
(cherry picked from commit d449833de9)
2017-08-25 11:56:27 -07:00
Daniel Nelson
30949c4596 Update fail2ban documentation
(cherry picked from commit 58751fa4df)
2017-08-25 11:43:49 -07:00
Daniel Nelson
47264bc860 Fix amqp_consumer data_format documentation
closes #3164

(cherry picked from commit 656ce31d98)
2017-08-24 13:18:23 -07:00
Daniel Nelson
67e693e9a8 Update changelog
(cherry picked from commit f95c239a3f)
2017-08-23 15:22:29 -07:00
Daniel Nelson
851352bc8a Escape backslash within string fields (#3161)
(cherry picked from commit ae24a0754b)
2017-08-23 15:22:25 -07:00
Daniel Nelson
c807452c14 Update changelog
(cherry picked from commit f253623231)
2017-08-23 15:16:40 -07:00
Rickard von Essen
48e00f7ea0 Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.

(cherry picked from commit f0db4fd901)
2017-08-23 15:16:27 -07:00
Daniel Nelson
8ce901aaa4 Update changelog
(cherry picked from commit 8c68bd9ddb)
2017-08-22 17:03:28 -07:00
Daniel Nelson
78d1715601 Don't start Telegraf on install in Amazon Linux (#3156)
(cherry picked from commit 9fc7220c2e)
2017-08-22 17:03:17 -07:00
Daniel Nelson
1b0a18897d Update changelog
(cherry picked from commit 6597b55477)
2017-08-22 16:55:37 -07:00
Daniel Nelson
257b6a09d9 Don't retry points beyond retention policy (#3155)
(cherry picked from commit 1f4a997164)
2017-08-22 16:55:33 -07:00
Rickard von Essen
e6feac735c Enable fail2ban on all platforms (#3151)
(cherry picked from commit 371638ce56)
2017-08-22 12:59:54 -07:00
Rickard von Essen
6616065acf Enable chrony for all platforms (#3152)
(cherry picked from commit 53c5d3a290)
2017-08-22 11:50:16 -07:00
Daniel Nelson
98774d60e2 Cache intermediate objects during build
(cherry picked from commit ccf17a9f93)
2017-08-21 17:28:20 -07:00
Chris Goller
d4cd1b7eb4 Add JSON input support to zipkin plugin (#3150)
(cherry picked from commit 13a6b917c3)
2017-08-21 17:28:14 -07:00
Daniel Nelson
7254111d37 Add win_services to the readme
(cherry picked from commit 1f1e9cc49f)
2017-08-18 17:58:18 -07:00
Daniel Nelson
4551efb459 Update histogram aggregator documentation (#3133)
(cherry picked from commit 70c2b83f00)
2017-08-18 13:25:22 -07:00
Daniel Nelson
2610eba0e3 Remove version test
(cherry picked from commit 4de264ffc8)
2017-08-18 11:09:34 -07:00
Daniel Nelson
c277dc27a6 Update example config
(cherry picked from commit 36c2c88fd2)
2017-08-17 18:54:44 -07:00
Daniel Nelson
a4f5c6fbc3 Update sample config 2017-08-16 16:48:10 -07:00
Daniel Nelson
7608251633 Add tomcat input to changelog and readme 2017-08-16 15:36:56 -07:00
Daniel Nelson
1e9d7cd6e9 Add error status handle to tomcat input 2017-08-16 15:33:47 -07:00
mlindes
a91457e001 Add tomcat input plugin (#3112) 2017-08-16 15:33:20 -07:00
Daniel Nelson
fd3a9bf46a Update changelog 2017-08-16 12:26:00 -07:00
Daniel Nelson
ca394fcfb2 Discard logging from tail library (#3128) 2017-08-16 12:06:07 -07:00
Daniel Nelson
3819607511 Allow using system plugin in Windows (#3127) 2017-08-16 12:05:46 -07:00
Daniel Nelson
eb0215c382 Remove log message on ping timeout (#3126) 2017-08-16 11:59:41 -07:00
Daniel Nelson
09153c815c Move http_response headers to end of configuration.
If the subtable comes before other options, they will be placed in the
subtable.
2017-08-15 11:50:08 -07:00
Daniel Nelson
9bc13f143e Test for nil metric before reading tags in logparser 2017-08-15 11:43:16 -07:00
Daniel Nelson
032348c7a5 Update changelog 2017-08-14 14:51:28 -07:00
Bob Shannon
5fbdd09aaf Add gzip content-encoding support to influxdb output (#2978) 2017-08-14 14:50:15 -07:00
Daniel Nelson
7d5dae5a08 Improve apache input docs (#3120) 2017-08-11 17:50:51 -07:00
Daniel Nelson
54be037911 Use double hyphen in cli examples 2017-08-11 16:26:54 -07:00
Daniel Nelson
5003809e97 Merge LDFLAGS from env into build 2017-08-11 16:26:54 -07:00
G-Research
1b50f14d55 Build NTPQ input on Windows (#3117) 2017-08-11 13:36:25 -07:00
Patrick Hemmer
b0109b3550 Add weighted_io_time to diskio input (#3119) 2017-08-11 11:49:42 -07:00
Daniel Nelson
257b460f61 Update changelog 2017-08-10 12:41:09 -07:00
Daniel Nelson
287a44de5e Skip compilcation of logparser and tail on solaris (#3113)
Allows compilation for solaris
2017-08-10 12:36:11 -07:00
Daniel Nelson
73897d1f1c Update changelog 2017-08-10 10:22:11 -07:00
Daniel Nelson
1e2d594af0 Converge to typed value in prometheus output (#3104) 2017-08-10 10:19:28 -07:00
Daniel Nelson
83c003e594 Update changelog 2017-08-09 11:48:36 -07:00
Daniel Nelson
84ce9629a8 Tweak formatting of varnish README 2017-08-09 11:48:12 -07:00
Daniel Nelson
3c14b46f6f Fix ordering of all target 2017-08-09 11:47:55 -07:00
Benjamin Stromski
8a2373e8c8 Add option to run varnish under sudo (#3097) 2017-08-09 11:38:54 -07:00
Daniel Nelson
cb04fa1e9c Add diskio %util sample query 2017-08-09 11:28:27 -07:00
Seva Poliakov
92af42a847 Remove tag_env duplicate from docker README (#3109) 2017-08-09 10:21:22 -07:00
Daniel Nelson
bceb020d72 Update changelog and readme 2017-08-08 11:50:16 -07:00
Rodolphe Blancho
d9deb266df Add salesforce input plugin (#3075) 2017-08-08 11:48:01 -07:00
Slawomir Skowron
f3435f1c59 Add TCP listener for statsd input (#2293) 2017-08-08 11:41:26 -07:00
Daniel Nelson
f9573ad969 Remove Godeps_windows from build.py 2017-08-07 17:43:06 -07:00
Daniel Nelson
40aacd9046 Fix artifact redirection 2017-08-07 17:41:52 -07:00
Daniel Nelson
5e73f3e816 Only upload nightly if on master branch 2017-08-07 17:24:35 -07:00
Daniel Nelson
a1e7a5f474 Upload as nightly builds if PACKAGE set 2017-08-07 17:16:34 -07:00
Daniel Nelson
828c5817f9 Update changelog 2017-08-07 16:18:01 -07:00
Daniel Nelson
3e27134872 Add path tag to logparser containing path of logfile (#3098) 2017-08-07 16:16:31 -07:00
Daniel Nelson
1fb5373962 Build releases with -w -s ldflags 2017-08-07 15:47:20 -07:00
Daniel Nelson
75e6ebcf93 Update changelog 2017-08-07 14:39:22 -07:00
Vlasta Hajek
e21f2de8b8 Add Windows Services input plugin (#3023) 2017-08-07 14:36:15 -07:00
Daniel Nelson
795f02ab88 Cleanup Makefile (#3089) 2017-08-03 11:54:05 -07:00
Daniel Nelson
360d03e301 Update changelog and readme 2017-08-02 18:02:41 -07:00
Daniel Nelson
137b312fa9 Add Zipkin input plugin (#3080) 2017-08-02 17:58:26 -07:00
Daniel Nelson
ce12913bc2 Update precision documentation and examples
Precision is no longer used by the InfluxDB output.

closes #3079
2017-08-01 15:02:36 -07:00
Daniel Nelson
d82c5062b8 Add Appveyor continuous integration (#3074) 2017-07-31 16:12:09 -07:00
Daniel Nelson
6666e6a5a7 Update changelog 2017-07-31 11:37:32 -07:00
Vladislav Mugultyanov
9c0aadf445 Add histogram aggregator plugin (#2387) 2017-07-31 11:33:51 -07:00
Daniel Nelson
3bd14ed229 Update changelog 2017-07-31 11:30:27 -07:00
DanKans
5e95367f6c Sanitize password from couchbase metric (#3033) 2017-07-31 11:29:14 -07:00
Jeff Ashton
c31e7d0b91 Fix win_perf_counters tests (#3068) 2017-07-31 11:03:26 -07:00
Oscar Sironi
f8c84302a4 Add config file path troubleshooting advice for Windows (#3071) 2017-07-31 10:58:12 -07:00
Daniel Nelson
9143670d6e Update changelog 2017-07-27 17:19:33 -07:00
Daniel Nelson
f0bd69d904 Add tls options to docker input (#3063) 2017-07-27 17:18:44 -07:00
Daniel Nelson
7179290dea Update changelog 2017-07-27 15:21:52 -07:00
Daniel Nelson
c4297f40ad Allow iptable entries with trailing text (#3060) 2017-07-27 15:21:06 -07:00
Daniel Nelson
0d4c954e01 Update changelog 2017-07-27 15:15:11 -07:00
Daniel Nelson
d6cf9f4f30 Fix docker memory and cpu reporting in Windows (#3043) 2017-07-27 15:12:29 -07:00
Daniel Nelson
5f88be022c Add circleci parameter to build packages 2017-07-26 17:13:50 -07:00
Daniel Nelson
284ab79a37 Set 1.3.5 release date 2017-07-26 15:53:49 -07:00
Daniel Nelson
2bd6c80506 Update changelog 2017-07-25 17:12:45 -07:00
Daniel Nelson
0ca936a12e Default to localhost if zookeeper has no servers set (#3056) 2017-07-25 17:08:32 -07:00
Daniel Nelson
a26fc52181 Fix panic in logparser if file cannot be opened (#3055) 2017-07-25 17:08:03 -07:00
Daniel Nelson
83f575fcea Add redis_version field to redis input (#3054) 2017-07-25 17:07:43 -07:00
Daniel Nelson
ffd1f25b75 Update changelog 2017-07-25 16:09:48 -07:00
Daniel Nelson
1658404cea Update changelog 2017-07-25 15:43:13 -07:00
Daniel Nelson
82ea04f188 Fix prometheus output cannot be reloaded (#3053) 2017-07-25 15:41:18 -07:00
xin053
273d0b85b0 Correct spelling of toml field in mysql input (#3051) 2017-07-25 10:57:27 -07:00
Théophile Helleboid - chtitux
f3917ec5ff Fix typo in postgresql_extensible/README.md (#3052) 2017-07-25 10:39:14 -07:00
Daniel Nelson
428455e032 Update changelog 2017-07-24 18:26:29 -07:00
Daniel Nelson
573bd4aa32 Start first aggregator period at startup time (#3050)
Fixes issue where metrics collected immediately after startup would not
be aggregated.
2017-07-24 18:25:05 -07:00
Oskar
ab5205f8c3 Fix go vet under windows (#3046) 2017-07-24 12:36:33 -07:00
Daniel Nelson
85aa212467 Update changelog 2017-07-21 16:57:28 -07:00
Daniel Nelson
840d19db35 Add network option to dns_query (#3042) 2017-07-21 16:56:08 -07:00
Daniel Nelson
1c267e9b16 Update changelog 2017-07-21 15:46:22 -07:00
Andy Cobaugh
1ff6e92193 Add input plugin for OpenLDAP (#2612) 2017-07-21 15:44:20 -07:00
Daniel Nelson
c82c0e596b Update changelog 2017-07-21 14:31:25 -07:00
Daniel Nelson
31ce98fa91 Don't match pattern on any error (#3040)
This prevents a pattern with no wildcards from matching in case
permissions is denied.
2017-07-21 14:28:14 -07:00
Daniel Nelson
4d66db1603 Update changelog 2017-07-21 14:26:39 -07:00
Yann Cézard
681d20083a Only report cpu usage for online cpus in docker input (#3035) 2017-07-21 14:25:17 -07:00
Daniel Nelson
4ee74ff54b Document GNU make requirement 2017-07-21 11:15:00 -07:00
Daniel Nelson
16073e4172 Update changelog 2017-07-21 10:57:39 -07:00
Daniel Nelson
3c204d409d Line wrap documentation 2017-07-21 10:57:12 -07:00
DanKans
d903a9142d Fix filtering when both pass and drop match an item (#3036)
Adjust logic in functions responsible for passing metrics in order to be able
to process them correctly in case where pass and drop are defined together.
2017-07-21 10:53:57 -07:00
Daniel Nelson
a2d4453269 Update changelog 2017-07-19 13:09:49 -07:00
DanKans
34c042c7dc Fix combined tagdrop/tagpass filtering (#3031) 2017-07-19 13:08:40 -07:00
Daniel Nelson
4dfe2312d0 Switch skipped kafka test 2017-07-18 18:18:57 -07:00
Daniel Nelson
c740dce36d Update download information in readme 2017-07-18 13:54:38 -07:00
Daniel Nelson
475a926d43 Update changelog 2017-07-18 11:03:07 -07:00
DanKans
d2626f1da6 Fix ntpq parse issue when using dns_lookup (#3026) 2017-07-18 11:01:08 -07:00
soldierkam
f5a8415c78 Add read timeout to socket_listener 2017-07-17 18:34:36 -07:00
Daniel Nelson
1d416a4213 Remove command in example output 2017-07-17 15:08:17 -07:00
Daniel Nelson
731ab9773d Update changelog 2017-07-17 12:01:35 -07:00
Daniel Nelson
d8f7b76253 Prevent startup if intervals are 0 2017-07-17 11:58:47 -07:00
Daniel Nelson
dbe2f79019 Update changelog 2017-07-14 10:45:32 -07:00
Bob Shannon
ef63908541 Add result_type field to net_response input plugin (#2990) 2017-07-14 10:43:36 -07:00
Daniel Nelson
27e47614c6 Add credits for new plugins to changelog 2017-07-13 16:14:18 -07:00
Daniel Nelson
dc4a133b11 Update changelog 2017-07-13 16:00:09 -07:00
DanKans
f4d67d8c3c Add fluentd input plugin (#2661) 2017-07-13 15:58:20 -07:00
Daniel Nelson
785798611e Update changelog 2017-07-13 15:39:45 -07:00
Daniel Nelson
b165ce4cd5 Prevent possible deadlock when using aggregators (#3016)
Looping the metrics back through the same channel could result in a
deadlock, by using a new channel and locking the processor we can ensure
that all stages can make continual progress.
2017-07-13 15:34:21 -07:00
Daniel Nelson
d9d1ca5a46 Add release date for 1.3.4 2017-07-12 17:15:38 -07:00
Daniel Nelson
2c10806fef Update changelog 2017-07-12 12:04:43 -07:00
Daniel Nelson
5d2c093105 Prevent Write from being called concurrently (#3011) 2017-07-12 12:03:23 -07:00
Daniel Nelson
f68bab1667 Update changelog 2017-07-11 15:55:44 -07:00
Daniel Nelson
1388e2cf92 Do not allow metrics with trailing slashes (#3007)
It is not possible to encode a measurement, tag, or field whose last
character is a backslash due to it being an unescapable character.
Because the tight coupling between line protocol and the internal metric
model, prevent metrics like this from being created.

Measurements with a trailing slash are not allowed and the point will be
dropped.  Tags and fields with a trailing a slash will be dropped from
the point.
2017-07-11 15:54:38 -07:00
Daniel Nelson
af318f4959 Update changelog 2017-07-11 14:10:09 -07:00
JSH
9f244cf1ac Fix chrony plugin does not track system time offset (#2989) 2017-07-11 14:08:40 -07:00
Daniel Nelson
885aa8e6e1 Update changelog 2017-07-10 19:07:28 -07:00
Daniel Nelson
945446b36f Fix handling of escapes within fieldset (#3003)
Line protocol does not require or allow escaping of backslash, the only
requirement for a byte to be escaped is if it is an escapable char and
preceeded immediately by a slash.
2017-07-10 19:05:18 -07:00
Daniel Nelson
4209ebfa6e Update changelog 2017-07-10 12:23:16 -07:00
Daniel Nelson
79f8ed874a Update elastic version to 5.0.41 (#2999) 2017-07-10 12:18:56 -07:00
Daniel Nelson
739d97639a Update dependencies 2017-07-10 12:01:22 -07:00
Wesley Merkel
ac8e28f436 Add link to Graylog input to README.md (#2995) 2017-07-10 11:22:37 -07:00
Daniel Nelson
2740a3ba44 Update changelog 2017-07-05 14:29:59 -07:00
Song Wenhao
0f850400f2 Display error message if prometheus output fails to listen (#2984) 2017-07-05 14:28:44 -07:00
Daniel Nelson
74a764d549 Update changelog 2017-06-29 16:17:08 -07:00
Aleksey Shirokih
a8a637809e Change default prometheus_client port (#2973) 2017-06-29 14:03:42 -07:00
Daniel Nelson
75dbf2b0f8 Set release date for 1.3.3 2017-06-28 13:05:06 -07:00
Daniel Nelson
90909ae708 Fix build on Windows (#2972) 2017-06-27 16:31:28 -07:00
Daniel Nelson
d40e441240 Use git sha1 as version if not tagged (#2969) 2017-06-27 13:24:06 -07:00
Adam Perlin
cc3d420551 Fix several bugs in minecraft input (#2970) 2017-06-27 13:14:07 -07:00
Daniel Nelson
f2bb4acd4a Update changelog 2017-06-26 15:25:06 -07:00
Bob Shannon
a7595c918a Fix panic in elasticsearch input if cannot determine master (#2954) 2017-06-26 15:23:53 -07:00
Daniel Nelson
a52f90122b Update changelog 2017-06-26 15:15:31 -07:00
Bob Shannon
d217cdc1a6 Add optional usage_active and time_active CPU metrics (#2943) 2017-06-26 15:13:38 -07:00
Daniel Nelson
d5b6f92f3f Log aerospike field value on error 2017-06-26 14:48:22 -07:00
Daniel Nelson
1a636abaaf Update changelog 2017-06-26 14:31:17 -07:00
vodolaz095
1fdbfa4719 Add support for RethinkDB 1.0 handshake protocol (#2963)
Allow rethinkdb input plugin to work with RethinkDB 2.3.5+ databases that requires username,password authorization and Handshake protocol v1.0

* remove top level header not required in sample config

* remove top level header not required in sample config
2017-06-26 14:29:48 -07:00
Daniel Nelson
22fc130e97 Update changelog 2017-06-23 16:56:36 -07:00
Ayrdrie
a726579d50 Add Minecraft input plugin (#2960) 2017-06-23 16:54:12 -07:00
Daniel Nelson
d774c2a170 Update changelog 2017-06-23 11:13:00 -07:00
MatthewCh
6d5bb35f84 Support HOST_PROC in processes and linux_sysctl_fs inputs (#2924) 2017-06-23 11:11:33 -07:00
Daniel Nelson
e028f10586 Update changelog 2017-06-23 11:04:13 -07:00
Daniel Nelson
9276318faf Fix bug parsing default timestamps with modified precision (#2949) 2017-06-23 10:59:04 -07:00
Daniel Nelson
82a04d904d Use strings.Join in statsd input (#2947) 2017-06-21 16:24:23 -07:00
Daniel Nelson
364da9a83d Update changelog 2017-06-21 12:46:57 -07:00
grugrut
ca9cec2c84 Add input plugin for Fail2ban (#2875) 2017-06-21 12:42:13 -07:00
Daniel Nelson
9211985c63 Update changelog 2017-06-21 12:39:09 -07:00
Daniel Nelson
929ba0a637 Remove label value sanitization in prometheus output (#2939) 2017-06-21 12:36:29 -07:00
Daniel Nelson
dcdcb70cb1 Update changelog 2017-06-19 11:52:53 -07:00
Eugene Shilin
cb5a12de3d Add standard SSL options to mysql input (#2933) 2017-06-19 11:42:43 -07:00
Artem Kovardin
193e8fa5ad More explicit 404 error in cassandra input (#2936) 2017-06-19 11:06:49 -07:00
trastle
00b37a7c0d Update README for Prometheus Client Output (#2452) 2017-06-19 11:04:08 -07:00
Daniel Nelson
736322dfc9 Set default ping count in Windows
fixes #2934
2017-06-16 13:39:55 -07:00
Daniel Nelson
ba364988de Document that ping_interval is non-linux only 2017-06-16 13:32:04 -07:00
Daniel Nelson
a729a44284 Update changelog 2017-06-16 13:18:27 -07:00
Daniel Nelson
3ecfd32df5 Allow dos line endings in tail and logparser (#2920)
Parsing dos line ending delimited line protocol is still illegal in most
cases.
2017-06-16 13:16:48 -07:00
Daniel Nelson
ea1888bd26 Update changelog 2017-06-16 12:06:40 -07:00
Simone Rotondo
674c24f987 Add HTTP Proxy support to influxdb output (#2929) 2017-06-16 12:05:08 -07:00
Daniel Nelson
ca72df5868 Update 1.3.2 release date 2017-06-14 12:16:47 -07:00
Daniel Nelson
ea787b83bf Update changelog 2017-06-13 18:07:12 -07:00
Daniel Nelson
949072e8dc Ensure prometheus metrics have same set of labels (#2857) 2017-06-13 18:04:26 -07:00
Daniel Nelson
246f342e6a Update changelog 2017-06-13 17:19:33 -07:00
Daniel Nelson
619b5d4c14 Change node_name to be a tag in aerospike input (#2918) 2017-06-13 17:09:38 -07:00
Daniel Nelson
b0efc22140 Update changelog 2017-06-13 14:10:33 -07:00
Heston Kan
5d1efdbfda Add min/max response time on linux/darwin to ping (#2908) 2017-06-13 14:09:17 -07:00
Daniel Nelson
e3ccd473d2 Update changelog 2017-06-13 13:44:07 -07:00
Dheeraj Dwivedi
f0cbfe4d67 Add secure connection support to graphite output (#2602) 2017-06-13 13:42:11 -07:00
Daniel Nelson
40d8e582ee Update changelog 2017-06-12 18:32:50 -07:00
Daniel Nelson
02b55fe77f Update aws-sdk-go dependency to latest release. (#2912) 2017-06-12 18:31:27 -07:00
Daniel Nelson
0c53de6700 Update changelog 2017-06-08 16:55:27 -07:00
Daniel Nelson
b277e6e2d7 Fix support for mongodb/leofs urls without scheme (#2900)
This was broken by changes in go 1.8 to url.Parse.  This change allows
the string but prompts the user to move to the correct url string.
2017-06-08 16:52:01 -07:00
Daniel Nelson
de4a312eba Update changelog 2017-06-08 13:20:44 -07:00
Matteo Cerutti
4b3b16ef1a Add wildcard support for container inclusion/exclusion (#2793) 2017-06-08 13:17:31 -07:00
Daniel Nelson
4c534433aa Skip kafka_consumer_integration_test due to issue on CircleCI 2017-06-07 18:31:52 -07:00
Daniel Nelson
f9447d01d4 Add release note to changelog regarding kafka_consumer 2017-06-07 18:27:12 -07:00
Seuf
2092443cd7 Add Kafka 0.9+ consumer support (#2487) 2017-06-07 18:22:28 -07:00
Bob Shannon
1c73caba04 Add SSL/TLS support to nginx input plugin (#2883) 2017-06-07 17:52:10 -07:00
Daniel Nelson
84dbf8bb25 Update changelog 2017-06-07 13:46:06 -07:00
Daniel Nelson
a275e6792a Fix metric splitting edge cases (#2896)
Metrics needing one extra byte to fit the output buffer would not be split, so we would emit lines without a line ending. Metrics which overflowed by exactly one field length would be split one field too late, causing truncated fields.
2017-06-07 13:37:54 -07:00
Daniel Nelson
de7fb2acfe Update changelog 2017-06-06 13:55:11 -07:00
Frederick Roth
91f2764cd5 Add result_type field for http_response input (#2814) 2017-06-06 13:39:07 -07:00
Daniel Nelson
4e91b18bbe Update changelog 2017-06-06 11:56:19 -07:00
Mariusz Brzeski
56a7ffe0e4 Fix timeout option in Windows ping input sample configuration (#2885) 2017-06-06 11:55:01 -07:00
Daniel Nelson
f9462d4fff Update changelog 2017-06-05 14:47:34 -07:00
Sebastian Borza
035905d65e Add timezone support to logparser timestamps (#2882) 2017-06-05 14:45:11 -07:00
Daniel Nelson
a47e6e6efe Update changelog 2017-06-05 12:46:50 -07:00
Daniel Nelson
5bab4616ff Fix udp metric splitting (#2880) 2017-06-05 12:44:29 -07:00
Daniel Nelson
37e01808b5 Set 1.3.1 release date 2017-05-31 15:00:31 -07:00
Daniel Nelson
0b6db905ff Generate sha256 hashes when packaging 2017-05-31 12:29:39 -07:00
Daniel Nelson
9529199a44 Update changelog 2017-05-30 17:40:37 -07:00
Daniel Nelson
be03abd464 Fix length calculation of split metric buffer (#2869) 2017-05-30 17:38:32 -07:00
Daniel Nelson
04aa732e94 Update changelog 2017-05-30 11:04:39 -07:00
Steve Nardone
e7f9db297e Fix panic in mongo input (#2848) 2017-05-30 11:02:26 -07:00
Daniel Nelson
24ea9fdc4d Update changelog 2017-05-26 12:12:18 -07:00
Matteo Cerutti
02d168705c MySQL input: log and continue on field parse error (#2855) 2017-05-26 12:09:43 -07:00
Daniel Nelson
7d7206b3e2 Update changelog 2017-05-25 16:20:29 -07:00
Daniel Nelson
03ca3975b5 Update gopsutil version
fixes #2856
2017-05-25 16:11:49 -07:00
Daniel Nelson
e1088b9eee Update changelog 2017-05-25 13:39:16 -07:00
Daniel Nelson
f47924ffc5 Fix influxdb output database quoting (#2851) 2017-05-25 13:25:52 -07:00
Olivier Lambert
a96f85c847 Add documentation for fetching metrics on Caddy HTTP and Prometheus (#2853) 2017-05-25 13:07:49 -07:00
Sylvain Boily
9148871608 Documentation privilege requirements for specific procstat metrics (#2787) 2017-05-25 13:06:27 -07:00
Matteo Cerutti
7d198f0a68 Add timeout option to ipmi_sensor plugin - solves #2817 (#2818) 2017-05-22 13:41:34 -07:00
Daniel Nelson
1459fab4d6 Remove changelog item from pull request template
Person who merges PR is now expected to update the CHANGELOG.
2017-05-22 12:06:48 -07:00
Daniel Nelson
b0bd4d55f5 Update CHANGELOG with fixed issue #1137 2017-05-22 12:01:22 -07:00
Steven Burgart
9ab688d62c Fix multiple plugin loading in win_perf_counters (#2800) 2017-05-22 11:58:00 -07:00
Daniel Nelson
8fdc2aec80 Update dependency license file 2017-05-19 18:03:49 -07:00
Lukasz Jagiello
91690b1d3e Consul plugin README typo (#2829) 2017-05-19 11:37:31 -07:00
Daniel Nelson
c61cd73eff Update changelog 2017-05-18 18:11:49 -07:00
rsingh2411
93e638d63e Add Docker container environment variables as tags. Only whitelisted #2580 (#2581) 2017-05-18 16:58:34 -07:00
mced
501c22478e [enh] set db_version at 0 if query version fails (#2819) 2017-05-18 13:52:56 -07:00
Daniel Nelson
7155e90f66 Update changelog for #2815 2017-05-16 17:37:51 -07:00
Timo Mihaljov
c53d9fa9b7 Handle process termination during read from /proc (#2816)
Fixes #2815.
2017-05-16 17:33:35 -07:00
Frederick Roth
ac5ac3161f Fixed inconsistency between HasIntField and IntField (#2813) 2017-05-16 15:25:30 -07:00
Daniel Nelson
bfeb3020a3 Add release date for 1.3.0 2017-05-15 19:52:35 -07:00
Daniel Nelson
b01ecdccff Add back the changelog entry for 2141 2017-05-15 12:54:03 -07:00
Daniel Nelson
da99777f6f Only split metrics if there is an udp output (#2799) 2017-05-12 15:34:05 -07:00
Zack Zatkin-Gold
dd537b3382 Fix telegraf example arguments (#2788)
Many of the examples provided within documentation are using a single
dash for the command line arguments, but the telegraf executable
explicitly has two dashes.

There are also some inconsistencies with the ordering of the command
line argument examples.  I've ordered them so that the examples will
show: config, config-directory, input-filter, test
2017-05-12 15:22:29 -07:00
Sebastian Borza
f74687dcc0 split metrics based on UDPPayload size (#2795) 2017-05-12 14:45:50 -07:00
Daniel Nelson
a47aa0dcc2 Merge branch 'reuse-transport' 2017-05-10 18:19:21 -07:00
Daniel Nelson
17d883c602 Ensure keep-alive is not used in http_response input.
Using Keep-Alive would change the timing for already established
connections.  Previous to this commit, Keep-Alive worked only when using
a response_string_match due to failure to close the request body.
2017-05-10 14:40:55 -07:00
Daniel Nelson
a1446a60f7 Update changelog 2017-05-10 13:11:33 -07:00
Daniel Nelson
1931aac284 Fix http_response input creation of transport on every gather 2017-05-09 16:23:38 -07:00
Daniel Nelson
b88eb0f59d Fix prometheus input creation of transport on every gather 2017-05-09 16:21:49 -07:00
Daniel Nelson
e7ad2d0463 Fix apache input creation of transport on every gather. 2017-05-09 16:19:56 -07:00
Daniel Nelson
c28ffb11cb Merge branch 'update-readme' 2017-05-09 13:50:19 -07:00
Daniel Nelson
018fd5ce5b Add missing plugins to README 2017-05-09 13:50:12 -07:00
Daniel Nelson
cd0ec0185a Update contributing section
Hoping this will encourage more non-plugin contributions.
2017-05-09 13:50:12 -07:00
Adrian Sadłocha
8124cfa3ed Improve PostgreSQL plugin documentation (#2777) 2017-05-09 12:58:43 -07:00
Lukasz Jagiello
5af985ef5f Add support for self-signed certs to InfluxDB input plugin (#2773) 2017-05-08 15:20:24 -07:00
Sylvain Boily
1ebd1aaa41 Systemd does not see all shutdowns as failures (#2716) 2017-05-08 11:48:29 -07:00
Daniel Nelson
de3f52b990 Update cloudwatch documentation
Mention that some metrics are available only at larger intervals than 5
minutes.  Update dead links to new locations and example config.

closes #1907
2017-05-08 11:31:20 -07:00
Daniel Nelson
4200018a0b Enable s390x builds
closes #2766
2017-05-05 14:39:56 -07:00
Daniel Nelson
67cd1669cc Add SLES11 support to rpm package (#2768) 2017-05-05 14:29:40 -07:00
Sébastien
a8cfe03ba8 fix systemd path in order to add compatibility with SuSe (#2499) 2017-05-05 14:04:33 -07:00
ceseuron
e2983383e4 Fixed sqlserver input to work with case sensitive server collation. (#2749)
Fixed a problem with sqlserver input where database properties are not returned by Telegraf when SQL Server has been set up with a case sensitive server-level collation.

* Added bugfix entry to CHANGELOG.md for sqlserver collation input fix.
2017-05-04 10:47:03 -07:00
Daniel Nelson
8cf0dc769b Add 1.4 section to changelog 2017-05-03 17:29:34 -07:00
Daniel Nelson
613de8a80d Remove documentation in kafka_consumer for metric_buffer 2017-05-03 11:51:49 -07:00
Damien Krotkine
f5c890cc1d reflect zookeeper chroot config in readme (#2759) 2017-05-03 11:50:08 -07:00
Daniel Nelson
f7f1eaef65 Return an error if no valid patterns. (#2753) 2017-05-02 14:54:38 -07:00
Alexander Blagoev
188703e204 Improve redis input documentation (#2708) 2017-05-02 11:43:07 -07:00
Patrick Hemmer
52c19af0ba fix close on closed socket_writer (#2748) 2017-05-02 11:06:49 -07:00
Daniel Nelson
5c88965084 Add initial documentation for rabbitmq input. (#2745) 2017-05-01 18:55:48 -07:00
Daniel Nelson
6e76731b7e Don't log error creating database on connect (#2740)
closes #2739
2017-04-28 15:58:46 -07:00
Daniel Nelson
c7a0e40c87 Update telegraf.conf 2017-04-28 13:47:32 -07:00
Daniel Nelson
086a2f5f12 Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:46:23 -07:00
Daniel Nelson
1da1c4753e Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:40:58 -07:00
Daniel Nelson
a083e1af7d Use go 1.8.1 for CI and Release builds (#2732) 2017-04-27 16:18:11 -07:00
269 changed files with 15361 additions and 1897 deletions

View File

@@ -1,5 +1,5 @@
### Required for all PRs:
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
- [ ] README.md updated (if adding a new plugin)
- [ ] Signed [CLA](https://influxdata.com/community/cla/).
- [ ] Associated README.md updated.
- [ ] Has appropriate unit tests.

View File

@@ -1,3 +1,146 @@
## v1.4.2 [2017-10-10]
### Bugfixes
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
## v1.4.1 [2017-09-26]
### Bugfixes
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
## v1.4 [2017-09-05]
### Release Notes
- The `kafka_consumer` input has been updated to support Kafka 0.9 and
above style consumer offset handling. The previous version of this plugin
supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy`
plugin.
- In the `aerospike` input the `node_name` field has been changed to be a tag
for both the `aerospike_node` and `aerospike_namespace` measurements.
- The default prometheus_client port has been changed to 9273.
### New Plugins
- [fail2ban](./plugins/inputs/fail2ban/README.md) - Thanks to @grugrut
- [fluentd](./plugins/inputs/fluentd/README.md) - Thanks to @DanKans
- [histogram](./plugins/aggregators/histogram/README.md) - Thanks to @vlamug
- [minecraft](./plugins/inputs/minecraft/README.md) - Thanks to @adamperlin & @Ayrdrie
- [openldap](./plugins/inputs/openldap/README.md) - Thanks to @cobaugh
- [salesforce](./plugins/inputs/salesforce/README.md) - Thanks to @rody
- [tomcat](./plugins/inputs/tomcat/README.md) - Thanks to @mlindes
- [win_services](./plugins/inputs/win_services/README.md) - Thanks to @vlastahajek
- [zipkin](./plugins/inputs/zipkin/README.md) - Thanks to @adamperlin & @Ayrdrie
### Features
- [#2487](https://github.com/influxdata/telegraf/pull/2487): Add Kafka 0.9+ consumer support
- [#2773](https://github.com/influxdata/telegraf/pull/2773): Add support for self-signed certs to InfluxDB input plugin
- [#2293](https://github.com/influxdata/telegraf/pull/2293): Add TCP listener for statsd input
- [#2581](https://github.com/influxdata/telegraf/pull/2581): Add Docker container environment variables as tags. Only whitelisted
- [#2817](https://github.com/influxdata/telegraf/pull/2817): Add timeout option to IPMI sensor plugin
- [#2883](https://github.com/influxdata/telegraf/pull/2883): Add support for an optional SSL/TLS configuration to nginx input plugin
- [#2882](https://github.com/influxdata/telegraf/pull/2882): Add timezone support for logparser timestamps.
- [#2814](https://github.com/influxdata/telegraf/pull/2814): Add result_type field for http_response input.
- [#2734](https://github.com/influxdata/telegraf/pull/2734): Add include/exclude filters for docker containers.
- [#2602](https://github.com/influxdata/telegraf/pull/2602): Add secure connection support to graphite output.
- [#2908](https://github.com/influxdata/telegraf/pull/2908): Add min/max response time on linux/darwin to ping.
- [#2929](https://github.com/influxdata/telegraf/pull/2929): Add HTTP Proxy support to influxdb output.
- [#2933](https://github.com/influxdata/telegraf/pull/2933): Add standard SSL options to mysql input.
- [#2875](https://github.com/influxdata/telegraf/pull/2875): Add input plugin for fail2ban.
- [#2924](https://github.com/influxdata/telegraf/pull/2924): Support HOST_PROC in processes and linux_sysctl_fs inputs.
- [#2960](https://github.com/influxdata/telegraf/pull/2960): Add Minecraft input plugin.
- [#2963](https://github.com/influxdata/telegraf/pull/2963): Add support for RethinkDB 1.0 handshake protocol.
- [#2943](https://github.com/influxdata/telegraf/pull/2943): Add optional usage_active and time_active CPU metrics.
- [#2973](https://github.com/influxdata/telegraf/pull/2973): Change default prometheus_client port.
- [#2661](https://github.com/influxdata/telegraf/pull/2661): Add fluentd input plugin.
- [#2990](https://github.com/influxdata/telegraf/pull/2990): Add result_type field to net_response input plugin.
- [#2571](https://github.com/influxdata/telegraf/pull/2571): Add read timeout to socket_listener
- [#2612](https://github.com/influxdata/telegraf/pull/2612): Add input plugin for OpenLDAP.
- [#3042](https://github.com/influxdata/telegraf/pull/3042): Add network option to dns_query.
- [#3054](https://github.com/influxdata/telegraf/pull/3054): Add redis_version field to redis input.
- [#3063](https://github.com/influxdata/telegraf/pull/3063): Add tls options to docker input.
- [#2387](https://github.com/influxdata/telegraf/pull/2387): Add histogram aggregator plugin.
- [#3080](https://github.com/influxdata/telegraf/pull/3080): Add zipkin input plugin.
- [#3023](https://github.com/influxdata/telegraf/pull/3023): Add Windows Services input plugin.
- [#3098](https://github.com/influxdata/telegraf/pull/3098): Add path tag to logparser containing path of logfile.
- [#3075](https://github.com/influxdata/telegraf/pull/3075): Add salesforce input plugin.
- [#3097](https://github.com/influxdata/telegraf/pull/3097): Add option to run varnish under sudo.
- [#3119](https://github.com/influxdata/telegraf/pull/3119): Add weighted_io_time to diskio input.
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
### Bugfixes
- [#2607](https://github.com/influxdata/telegraf/issues/2607): Improve logging of errors in Cassandra input.
- [#2819](https://github.com/influxdata/telegraf/pull/2819): [enh] set db_version at 0 if query version fails
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
- [#2716](https://github.com/influxdata/telegraf/pull/2716): Systemd does not see all shutdowns as failures
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix multiple plugin loading in win_perf_counters.
- [#2855](https://github.com/influxdata/telegraf/pull/2855): MySQL input: log and continue on field parse error.
- [#2885](https://github.com/influxdata/telegraf/pull/2885): Fix timeout option in Windows ping input sample configuration.
- [#2911](https://github.com/influxdata/telegraf/issues/2911): Fix Kinesis output plugin in govcloud.
- [#2917](https://github.com/influxdata/telegraf/issues/2917): Fix Aerospike input adds all nodes to a single series.
- [#2452](https://github.com/influxdata/telegraf/pull/2452): Improve Prometheus Client output documentation.
- [#2984](https://github.com/influxdata/telegraf/pull/2984): Display error message if prometheus output fails to listen.
- [#2997](https://github.com/influxdata/telegraf/issues/2997): Fix elasticsearch output content type detection warning.
- [#2914](https://github.com/influxdata/telegraf/issues/2914): Prevent possible deadlock when using aggregators.
- [#2860](https://github.com/influxdata/telegraf/issues/2860): Fix combined tagdrop/tagpass filtering.
- [#3036](https://github.com/influxdata/telegraf/pull/3036): Fix filtering when both pass and drop match an item.
- [#2964](https://github.com/influxdata/telegraf/issues/2964): Only report cpu usage for online cpus in docker input.
- [#3050](https://github.com/influxdata/telegraf/pull/3050): Start first aggregator period at startup time.
- [#2906](https://github.com/influxdata/telegraf/issues/2906): Fix panic in logparser if file cannot be opened.
- [#2886](https://github.com/influxdata/telegraf/issues/2886): Default to localhost if zookeeper has no servers set.
- [#2457](https://github.com/influxdata/telegraf/issues/2457): Fix docker memory and cpu reporting in Windows.
- [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text.
- [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric.
- [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output.
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
## v1.3.5 [2017-07-26]
### Bugfixes
- [#3049](https://github.com/influxdata/telegraf/issues/3049): Fix prometheus output cannot be reloaded.
- [#3037](https://github.com/influxdata/telegraf/issues/3037): Fix filestat reporting exists when cannot list directory.
- [#2386](https://github.com/influxdata/telegraf/issues/2386): Fix ntpq parse issue when using dns_lookup.
- [#2554](https://github.com/influxdata/telegraf/issues/2554): Fix panic when agent.interval = "0s".
## v1.3.4 [2017-07-12]
### Bugfixes

38
Godeps
View File

@@ -1,44 +1,58 @@
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/aws/aws-sdk-go 7524cb911daddd6e5c9195def8e59ae892bef8d9
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker b89aff1afa1f61993ab2ba18fd62d9375a195f5d
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/klauspost/crc32 cb6bfca970f6908083f26f39a79009d608efd5cd
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
@@ -47,8 +61,13 @@ github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
@@ -57,9 +76,14 @@ github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
gopkg.in/olivere/elastic.v5 ee3ebceab960cf68ab9a89ee6d78c031ef5b4a4e
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6

View File

@@ -1,11 +0,0 @@
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8

135
Makefile
View File

@@ -1,56 +1,72 @@
VERSION := $(shell sh -c 'git describe --always --tags')
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
PREFIX := /usr/local
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
COMMIT := $(shell git rev-parse --short HEAD)
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
endif
# Standard Telegraf build
default: prepare build
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
# Windows build
windows: prepare-windows build-windows
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
endif
# Only run the build (no dependency grabbing)
build:
go install -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
build-windows:
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
all:
$(MAKE) deps
$(MAKE) telegraf
build-for-docker:
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
deps:
go get github.com/sparrc/gdm
gdm restore
telegraf:
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
install: telegraf
mkdir -p $(DESTDIR)$(PREFIX)/bin/
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
test:
go test -short ./...
test-windows:
go test ./plugins/inputs/ping/...
go test ./plugins/inputs/win_perf_counters/...
go test ./plugins/inputs/win_services/...
lint:
go vet ./...
test-all: lint
go test ./...
# run package script
package:
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
# Get dependencies and use gdm to checkout changesets
prepare:
go get github.com/sparrc/gdm
gdm restore
clean:
-rm -f telegraf
-rm -f telegraf.exe
# Use the windows godeps file to prepare dependencies
prepare-windows:
go get github.com/sparrc/gdm
gdm restore
gdm restore -f Godeps_windows
# Run all docker containers necessary for unit tests
# Run all docker containers necessary for integration tests
docker-run:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
--link zookeeper:zookeeper \
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
-e KAFKA_ADVERTISED_PORT=9092 \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CREATE_TOPICS="test:1:1" \
-p "9092:9092" \
-d wurstmeister/kafka
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
docker run --name memcached -p "11211:11211" -d memcached
@@ -61,38 +77,41 @@ docker-run:
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
docker run --name nats -p "4222:4222" -d nats
docker run --name openldap \
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
# Run docker containers necessary for CircleCI unit tests
# Run docker containers necessary for integration tests; skipping services provided
# by CircleCI
docker-run-circle:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
--link zookeeper:zookeeper \
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
-e KAFKA_ADVERTISED_PORT=9092 \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CREATE_TOPICS="test:1:1" \
-p "9092:9092" \
-d wurstmeister/kafka
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
docker run --name nats -p "4222:4222" -d nats
docker run --name openldap \
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
-e SLAPD_CONFIG_ROOTPW="secret" \
-p "389:389" -p "636:636" \
-d cobaugh/openldap-alpine
# Kill all docker containers, ignore errors
docker-kill:
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
openldap postgres rabbitmq redis riemann zookeeper
# Run full unit tests using docker containers (includes setup and teardown)
test: vet docker-kill docker-run
# Sleeping for kafka leadership election, TSDB setup, etc.
sleep 60
# SUCCESS, running tests
go test -race ./...
# Run "short" unit tests
test-short: vet
go test -short ./...
vet:
go vet ./...
.PHONY: test test-short vet build default
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
package clean docker-run docker-run-circle docker-kill

View File

@@ -20,20 +20,20 @@ For more information on Processor and Aggregator plugins please [read this](./do
New plugins are designed to be easy to contribute,
we'll eagerly accept pull
requests and will manage the set of plugins that Telegraf supports.
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
new plugins.
## Contributing
There are many ways to contribute:
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
- [Contribute plugins](CONTRIBUTING.md)
## Installation:
You can either download the binaries directly from the
[downloads](https://www.influxdata.com/downloads) page.
A few alternate installs are available here as well:
### FreeBSD tarball:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-VERSION_freebsd_amd64.tar.gz
You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page
or from the [releases](https://github.com/influxdata/telegraf/releases) section.
### Ansible Role:
@@ -41,13 +41,14 @@ Ansible role: https://github.com/rossmcdonald/telegraf
### From Source:
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
which gets installed via the Makefile
if you don't have it already. You also must build with golang version 1.8+.
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
which is installed by the Makefile if you don't have it already.
1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
3. Run `go get github.com/influxdata/telegraf`
3. Run `go get -d github.com/influxdata/telegraf`
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
@@ -56,37 +57,37 @@ if you don't have it already. You also must build with golang version 1.8+.
See usage with:
```
telegraf --help
./telegraf --help
```
#### Generate a telegraf config file:
```
telegraf config > telegraf.conf
./telegraf config > telegraf.conf
```
#### Generate config with only cpu input & influxdb output plugins defined
#### Generate config with only cpu input & influxdb output plugins defined:
```
telegraf --input-filter cpu --output-filter influxdb config
./telegraf --input-filter cpu --output-filter influxdb config
```
#### Run a single telegraf collection, outputing metrics to stdout
#### Run a single telegraf collection, outputing metrics to stdout:
```
telegraf --config telegraf.conf -test
./telegraf --config telegraf.conf --test
```
#### Run telegraf with all plugins defined in config file
#### Run telegraf with all plugins defined in config file:
```
telegraf --config telegraf.conf
./telegraf --config telegraf.conf
```
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
```
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
```
@@ -117,7 +118,10 @@ configuration options.
* [dovecot](./plugins/inputs/dovecot)
* [elasticsearch](./plugins/inputs/elasticsearch)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [fail2ban](./plugins/inputs/fail2ban)
* [filestat](./plugins/inputs/filestat)
* [fluentd](./plugins/inputs/fluentd)
* [graylog](./plugins/inputs/graylog)
* [haproxy](./plugins/inputs/haproxy)
* [hddtemp](./plugins/inputs/hddtemp)
* [http_response](./plugins/inputs/http_response)
@@ -135,6 +139,7 @@ configuration options.
* [mailchimp](./plugins/inputs/mailchimp)
* [memcached](./plugins/inputs/memcached)
* [mesos](./plugins/inputs/mesos)
* [minecraft](./plugins/inputs/minecraft)
* [mongodb](./plugins/inputs/mongodb)
* [mysql](./plugins/inputs/mysql)
* [net_response](./plugins/inputs/net_response)
@@ -142,6 +147,7 @@ configuration options.
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [openldap](./plugins/inputs/openldap)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
@@ -149,22 +155,25 @@ configuration options.
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [powerdns](./plugins/inputs/powerdns)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
* [puppetagent](./plugins/inputs/puppetagent)
* [rabbitmq](./plugins/inputs/rabbitmq)
* [raindrops](./plugins/inputs/raindrops)
* [redis](./plugins/inputs/redis)
* [rethinkdb](./plugins/inputs/rethinkdb)
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
* [snmp](./plugins/inputs/snmp)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu
@@ -198,6 +207,7 @@ Telegraf can also collect metrics via the following service plugins:
* [mandrill](./plugins/inputs/webhooks/mandrill)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [zipkin](./plugins/inputs/zipkin)
Telegraf is able to parse the following input data formats into metrics, these
formats may be used with input plugins supporting the `data_format` option:
@@ -216,6 +226,7 @@ formats may be used with input plugins supporting the `data_format` option:
## Aggregator Plugins
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
## Output Plugins
@@ -243,9 +254,3 @@ formats may be used with input plugins supporting the `data_format` option:
* [socket_writer](./plugins/outputs/socket_writer)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
## Contributing
Please see the
[contributing guide](CONTRIBUTING.md)
for details on contributing a plugin to Telegraf.

View File

@@ -247,7 +247,7 @@ func (a *Agent) flush() {
}
// flusher monitors the metrics input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 300)
@@ -291,6 +291,29 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-shutdown:
if len(aggC) > 0 {
// keep going until aggC is flushed
continue
}
return
case metric := <-aggC:
metrics := []telegraf.Metric{metric}
for _, processor := range a.Config.Processors {
metrics = processor.Apply(metrics...)
}
for _, m := range metrics {
outMetricC <- m
}
}
}
}()
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
semaphore := make(chan struct{}, 1)
for {
@@ -339,6 +362,9 @@ func (a *Agent) Run(shutdown chan struct{}) error {
// channel shared between all input threads for accumulating metrics
metricC := make(chan telegraf.Metric, 100)
aggC := make(chan telegraf.Metric, 100)
now := time.Now()
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
@@ -367,7 +393,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil {
if err := a.flusher(shutdown, metricC, aggC); err != nil {
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
@@ -377,10 +403,10 @@ func (a *Agent) Run(shutdown chan struct{}) error {
for _, aggregator := range a.Config.Aggregators {
go func(agg *models.RunningAggregator) {
defer wg.Done()
acc := NewAccumulator(agg, metricC)
acc := NewAccumulator(agg, aggC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
agg.Run(acc, shutdown)
agg.Run(acc, now, shutdown)
}(aggregator)
}

32
appveyor.yml Normal file
View File

@@ -0,0 +1,32 @@
version: "{build}"
cache:
- C:\Cache
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
environment:
GOPATH: C:\gopath
platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.8.1.msi" curl -o "C:\Cache\go1.8.1.msi" https://storage.googleapis.com/golang/go1.8.1.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.8.1.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version
- go env
build_script:
- cmd: C:\GnuWin32\bin\make
test_script:
- cmd: C:\GnuWin32\bin\make test-windows
artifacts:
- path: telegraf.exe

View File

@@ -51,16 +51,18 @@ var fAggregatorFilters = flag.String("aggregator-filter", "",
var fProcessorFilters = flag.String("processor-filter", "",
"filter the processors to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
"print usage for a plugin, ie, 'telegraf --usage mysql'")
var fService = flag.String("service", "",
"operate on the service")
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
var (
version string
commit string
branch string
nextVersion = "1.4.0"
version string
commit string
branch string
)
func init() {
@@ -81,8 +83,8 @@ Usage:
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
@@ -103,7 +105,7 @@ Examples:
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf -test
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
@@ -151,6 +153,16 @@ func reloadLoop(
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
}
if int64(c.Agent.Interval.Duration) <= 0 {
log.Fatalf("E! Agent interval must be positive, found %s",
c.Agent.Interval.Duration)
}
if int64(c.Agent.FlushInterval.Duration) <= 0 {
log.Fatalf("E! Agent flush_interval must be positive; found %s",
c.Agent.Interval.Duration)
}
ag, err := agent.NewAgent(c)
if err != nil {
log.Fatal("E! " + err.Error())
@@ -196,7 +208,7 @@ func reloadLoop(
}
}()
log.Printf("I! Starting Telegraf (version %s)\n", version)
log.Printf("I! Starting Telegraf %s\n", displayVersion())
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("I! Tags enabled: %s", c.ListTags())
@@ -254,6 +266,13 @@ func (p *program) Stop(s service.Service) error {
return nil
}
func displayVersion() string {
if version == "" {
return fmt.Sprintf("v%s~pre%s", nextVersion, commit)
}
return "v" + version
}
func main() {
flag.Usage = func() { usageExit(0) }
flag.Parse()
@@ -295,7 +314,7 @@ func main() {
if len(args) > 0 {
switch args[0] {
case "version":
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case "config":
config.PrintSampleConfig(
@@ -323,7 +342,7 @@ func main() {
}
return
case *fVersion:
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case *fSampleConfig:
config.PrintSampleConfig(

View File

@@ -66,10 +66,13 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
This is primarily to avoid
large write spikes for users running a large number of telegraf instances.
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
* **precision**: By default, precision will be set to the same timestamp order
as the collection interval, with the maximum being 1s. Precision will NOT
be used for service inputs, such as logparser and statsd. Valid values are
"ns", "us" (or "µs"), "ms", "s".
* **precision**:
By default or when set to "0s", precision will be set to the same
timestamp order as the collection interval, with the maximum being 1s.
Precision will NOT be used for service inputs. It is up to each individual
service input to set the timestamp at the appropriate precision.
Valid time units are "ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stderr.
* **debug**: Run telegraf in debug mode.
* **quiet**: Run telegraf in quiet mode (error messages only).
@@ -134,8 +137,9 @@ is tested on points after they have passed the `namepass` test.
An array of glob pattern strings. Only fields whose field key matches a
pattern in this list are emitted. Not available for outputs.
* **fielddrop**:
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the point. Not available for outputs.
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the point. This is tested on points after
they have passed the `fieldpass` test. Not available for outputs.
* **tagpass**:
A table mapping tag keys to arrays of glob pattern strings. Only points
that contain a tag key in the table and a tag value matching one of its
@@ -177,7 +181,6 @@ fields which begin with `time_`.
[[outputs.influxdb]]
url = "http://192.168.59.103:8086" # required.
database = "telegraf" # required.
precision = "s"
# INPUTS
[[inputs.cpu]]
@@ -316,21 +319,18 @@ to avoid measurement collisions:
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf"
precision = "s"
# Drop all measurements that start with "aerospike"
namedrop = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-aerospike-data"
precision = "s"
# Only accept aerospike data:
namepass = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-cpu0-data"
precision = "s"
# Only store measurements where the tag "cpu" matches the value "cpu0"
[outputs.influxdb.tagpass]
cpu = ["cpu0"]

View File

@@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
### Graphite Configuration:
```toml

View File

@@ -1,33 +1,102 @@
# List
- collectd.org [MIT LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
# Licenses of dependencies
When distributed in a binary form, Telegraf may contain portions of the
following works:
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)

View File

@@ -37,3 +37,9 @@ Telegraf can manage its own service through the --service flag:
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
Trobleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
--config C:\"Program Files"\Telegraf\telegraf.conf

View File

@@ -118,6 +118,15 @@
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy Config
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
# # Configuration for Amon Server to send metrics to.
# [[outputs.amon]]
@@ -270,6 +279,13 @@
# template = "host.tags.measurement.field"
# ## timeout in seconds for the write connection to graphite
# timeout = 2
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Send telegraf metrics to graylog(s)
@@ -498,7 +514,7 @@
# # Configuration for the Prometheus client to spawn
# [[outputs.prometheus_client]]
# ## Address to listen on
# # listen = ":9126"
# # listen = ":9273"
#
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
# # expiration_interval = "60s"
@@ -589,6 +605,32 @@
# AGGREGATOR PLUGINS #
###############################################################################
# # Create aggregate histograms.
# [[aggregators.histogram]]
# ## The period in which to flush the aggregator.
# period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
#
# ## Example config that aggregates all fields of the metric.
# # [[aggregators.histogram.config]]
# # ## The set of buckets.
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# # ## The name of metric.
# # measurement_name = "cpu"
#
# ## Example config that aggregates only specific fields of the metric.
# # [[aggregators.histogram.config]]
# # ## The set of buckets.
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# # ## The name of metric.
# # measurement_name = "diskio"
# # ## The concrete fields of metric
# # fields = ["io_time", "read_time", "write_time"]
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
@@ -612,6 +654,8 @@
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point
@@ -687,15 +731,17 @@
# # Read Apache status information (mod_status)
# [[inputs.apache]]
# ## An array of Apache status URI to gather stats.
# ## An array of URLs to gather from, must be directed at the machine
# ## readable version of the mod_status page including the auto query string.
# ## Default is "http://localhost/server-status?auto".
# urls = ["http://localhost/server-status?auto"]
# ## user credentials for basic HTTP authentication
# username = "myuser"
# password = "mypassword"
#
# ## Timeout to the complete conection and reponse time in seconds
# response_timeout = "25s" ## default to 5 seconds
# ## Credentials for basic HTTP authentication.
# # username = "myuser"
# # password = "mypassword"
#
# ## Maximum time to receive response.
# # response_timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
@@ -813,7 +859,7 @@
#
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# # metrics are made available to the 1 minute period. Some are collected at
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# # Note that if a period is configured that is smaller than the minimum for a
# # particular metric, that metric will not be returned by the Cloudwatch API
# # and will not be collected by Telegraf.
@@ -925,20 +971,23 @@
# # Query given DNS server and gives statistics
# [[inputs.dns_query]]
# ## servers to query
# servers = ["8.8.8.8"] # required
# servers = ["8.8.8.8"]
#
# ## Domains or subdomains to query. "."(root) is default
# domains = ["."] # optional
# ## Network is the network protocol name.
# # network = "udp"
#
# ## Query record type. Default is "A"
# ## Domains or subdomains to query.
# # domains = ["."]
#
# ## Query record type.
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A" # optional
# # record_type = "A"
#
# ## Dns server port. 53 is default
# port = 53 # optional
# ## Dns server port.
# # port = 53
#
# ## Query timeout in seconds. Default is 2 seconds
# timeout = 2 # optional
# ## Query timeout in seconds.
# # timeout = 2
# # Read metrics about docker containers
@@ -947,8 +996,15 @@
# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
# endpoint = "unix:///var/run/docker.sock"
#
# ## Only collect metrics for these containers, collect all if empty
# container_names = []
#
# ## Containers to include and exclude. Globs accepted.
# ## Note that an empty array for both will include all containers
# container_name_include = []
# container_name_exclude = []
#
# ## Timeout for docker list, info, and stats commands
# timeout = "5s"
#
@@ -957,11 +1013,20 @@
# perdevice = true
# ## Whether to report for each container total blkio and network stats or not
# total = false
# ## Which environment variables should we use as a tag
# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
#
# ## docker labels to include and exclude as tags. Globs accepted.
# ## Note that an empty array for both will include all labels as tags
# docker_label_include = []
# docker_label_exclude = []
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read statistics from one or many dovecot servers
@@ -1031,6 +1096,12 @@
# data_format = "influx"
# # Read metrics from fail2ban.
# [[inputs.fail2ban]]
# ## Use sudo to run fail2ban-client
# use_sudo = false
# # Read stats about given file(s)
# [[inputs.filestat]]
# ## Files to gather stats about.
@@ -1047,6 +1118,22 @@
# md5 = false
# # Read metrics exposed by fluentd in_monitor plugin
# [[inputs.fluentd]]
# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
# ##
# ## Endpoint:
# ## - only one URI is allowed
# ## - https is not supported
# endpoint = "http://localhost:24220/api/plugins.json"
#
# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
# exclude = [
# "monitor_agent",
# "dummy",
# ]
# # Read flattened metrics from one or more GrayLog HTTP endpoints
# [[inputs.graylog]]
# ## API endpoint, currently supported API:
@@ -1128,25 +1215,26 @@
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
# ## Server address (default http://localhost)
# address = "http://github.com"
# # address = "http://localhost"
#
# ## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
# # response_timeout = "5s"
#
# ## HTTP Request Method
# method = "GET"
# # method = "GET"
#
# ## Whether to follow redirects from the server (defaults to false)
# follow_redirects = true
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
# # follow_redirects = false
#
# ## Optional HTTP Request Body
# # body = '''
# # {'fake':'data'}
# # '''
#
# ## Optional substring or regex match in body of the response
# ## response_string_match = "\"service_status\": \"up\""
# ## response_string_match = "ok"
# ## response_string_match = "\".*_status\".?:.?\"up\""
# # response_string_match = "\"service_status\": \"up\""
# # response_string_match = "ok"
# # response_string_match = "\".*_status\".?:.?\"up\""
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
@@ -1154,6 +1242,10 @@
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
# # Read flattened metrics from one or more JSON HTTP endpoints
@@ -1216,6 +1308,13 @@
# "http://localhost:8086/debug/vars"
# ]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## http request & header timeout
# timeout = "5s"
@@ -1246,6 +1345,13 @@
# ## if no servers are specified, local machine sensor stats will be queried
# ##
# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
#
# ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
# ## gaps or overlap in pulled data
# interval = "30s"
#
# ## Timeout for the ipmitool command to complete
# timeout = "20s"
# # Gather packets and bytes throughput from iptables
@@ -1365,9 +1471,9 @@
# # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]]
# ## An array of URI to gather stats about LeoFS.
# ## Specify an ip or hostname with port. ie 127.0.0.1:4020
# servers = ["127.0.0.1:4021"]
# ## An array of URLs of the form:
# ## "udp://" host [ ":" port]
# servers = ["udp://127.0.0.1:4020"]
# # Provides Linux sysctl fs metrics
@@ -1442,14 +1548,24 @@
# # ]
# # Collects scores from a minecraft server's scoreboard using the RCON protocol
# [[inputs.minecraft]]
# ## server address for minecraft
# # server = "localhost"
# ## port for RCON
# # port = "25575"
# ## password RCON for mincraft server
# # password = ""
# # Read metrics from one or many MongoDB servers
# [[inputs.mongodb]]
# ## An array of URI to gather stats about. Specify an ip or hostname
# ## with optional port add password. ie,
# ## An array of URLs of the form:
# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
# ## For example:
# ## mongodb://user:auth_key@10.10.3.30:27017,
# ## mongodb://10.10.3.33:18832,
# ## 10.0.0.1:10000, etc.
# servers = ["127.0.0.1:27017"]
# servers = ["mongodb://127.0.0.1:27017"]
# gather_perdb_stats = false
#
# ## Optional SSL Config
@@ -1463,7 +1579,7 @@
# # Read metrics from one or many mysql servers
# [[inputs.mysql]]
# ## specify servers via a url matching:
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
# ## e.g.
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
@@ -1520,6 +1636,11 @@
# #
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
# interval_slow = "30m"
#
# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
# # Read metrics about network interface usage
@@ -1561,8 +1682,17 @@
# # Read Nginx's basic status information (ngx_http_stub_status_module)
# [[inputs.nginx]]
# ## An array of Nginx stub_status URI to gather stats.
# urls = ["http://localhost/status"]
# # An array of Nginx stub_status URI to gather stats.
# urls = ["http://localhost/server_status"]
#
# # TLS/SSL configuration
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.cer"
# ssl_key = "/etc/telegraf/key.key"
# insecure_skip_verify = false
#
# # HTTP response timeout (default: 5s)
# response_timeout = "5s"
# # Read NSQ topic and channel statistics.
@@ -1589,6 +1719,27 @@
# dns_lookup = true
# # OpenLDAP cn=Monitor plugin
# [[inputs.openldap]]
# host = "localhost"
# port = 389
#
# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
# # note that port will likely need to be changed to 636 for ldaps
# # valid options: "" | "starttls" | "ldaps"
# ssl = ""
#
# # skip peer certificate verification. Default is false.
# insecure_skip_verify = false
#
# # Path to PEM-encoded Root certificate to use to verify server certificate
# ssl_ca = "/etc/ssl/certs.pem"
#
# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
# bind_dn = ""
# bind_password = ""
# # Read metrics of passenger using passenger-status
# [[inputs.passenger]]
# ## Path of passenger-status.
@@ -1782,10 +1933,13 @@
# location = "/var/lib/puppet/state/last_run_summary.yaml"
# # Read metrics from one or many RabbitMQ servers via the management API
# # Reads metrics from RabbitMQ servers via the Management Plugin
# [[inputs.rabbitmq]]
# ## Management Plugin url. (default: http://localhost:15672)
# # url = "http://localhost:15672"
# # name = "rmq-server-1" # optional tag
# ## Tag added to rabbitmq_overview series; deprecated: use tags
# # name = "rmq-server-1"
# ## Credentials
# # username = "guest"
# # password = "guest"
#
@@ -1839,6 +1993,14 @@
# ## rethinkdb://10.10.3.33:18832,
# ## 10.0.0.1:10000, etc.
# servers = ["127.0.0.1:28015"]
# ##
# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
# ##
# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
# ## have to be named "rethinkdb".
# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
# # Read metrics one or many Riak servers
@@ -1847,6 +2009,26 @@
# servers = ["http://localhost:8098"]
# # Read API usage and limits for a Salesforce organisation
# [[inputs.salesforce]]
# ## specify your credentials
# ##
# username = "your_username"
# password = "your_password"
# ##
# ## (optional) security token
# # security_token = "your_security_token"
# ##
# ## (optional) environment type (sandbox or production)
# ## default is: production
# ##
# # environment = "production"
# ##
# ## (optional) API version (default: "39.0")
# ##
# # version = "39.0"
# # Monitor sensors, requires lm-sensors package
# [[inputs.sensors]]
# ## Remove numbers from field names.
@@ -2092,6 +2274,26 @@
# # vg = "rootvg"
# # Gather metrics from the Tomcat server status page.
# [[inputs.tomcat]]
# ## URL of the Tomcat server status
# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
#
# ## HTTP Basic Auth Credentials
# # username = "tomcat"
# # password = "s3cret"
#
# ## Request timeout
# # timeout = "5s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Inserts sine and cosine waves for demonstration purposes
# [[inputs.trig]]
# ## Set the amplitude
@@ -2108,6 +2310,9 @@
# # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]]
# ## If running as a restricted user you can prepend sudo for additional access:
# #use_sudo = false
#
# ## The default location of the varnishstat binary can be overridden with:
# binary = "/usr/bin/varnishstat"
#
@@ -2173,10 +2378,10 @@
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
@@ -2201,6 +2406,40 @@
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]]
# ## kafka servers
# brokers = ["localhost:9092"]
# ## topic(s) to consume
# topics = ["telegraf"]
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional SASL Config
# # sasl_username = "kafka"
# # sasl_password = "secret"
#
# ## the name of the consumer group
# consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
#
# ## Data format to consume.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
#
# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
# ## larger messages are dropped
# max_message_len = 65536
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer_legacy]]
# ## topic(s) to consume
# topics = ["telegraf"]
# ## an array of Zookeeper connection strings
@@ -2232,6 +2471,7 @@
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
# ## /var/log/apache.log -> only tail the apache log file
# files = ["/var/log/apache/access.log"]
#
# ## Read files that currently exist from the beginning. Files that are created
# ## while telegraf is running (and that match the "files" globs) will always
# ## be read from the beginning.
@@ -2247,12 +2487,26 @@
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
# patterns = ["%{COMBINED_LOG_FORMAT}"]
#
# ## Name of the outputted measurement name.
# measurement = "apache_access_log"
#
# ## Full path(s) to custom pattern files.
# custom_pattern_files = []
#
# ## Custom patterns can also be defined here. Put one pattern per line.
# custom_patterns = '''
#
# ## Timezone allows you to provide an override for timestamps that
# ## don't already include an offset
# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
# ##
# ## Default: "" which renders UTC
# ## Options are as follows:
# ## 1. Local -- interpret based on machine localtime
# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
# timezone = "Canada/Eastern"
# '''
@@ -2261,6 +2515,8 @@
# servers = ["localhost:1883"]
# ## MQTT QoS, must be 0, 1, or 2
# qos = 0
# ## Connection timeout for initial connection in seconds
# connection_timeout = "30s"
#
# ## Topics to subscribe to
# topics = [
@@ -2351,6 +2607,11 @@
# ## 0 (default) is unlimited.
# # max_connections = 1024
#
# ## Read timeout.
# ## Only applies to stream sockets (e.g. TCP).
# ## 0 (default) is unlimited.
# # read_timeout = "30s"
#
# ## Maximum socket buffer size in bytes.
# ## For stream sockets, once the buffer fills up, the sender will start backing up.
# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
@@ -2370,8 +2631,14 @@
# # data_format = "influx"
# # Statsd Server
# # Statsd UDP/TCP Server
# [[inputs.statsd]]
# ## Protocol, must be "tcp" or "udp" (default=udp)
# protocol = "udp"
#
# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
# max_tcp_connections = 250
#
# ## Address and port to host UDP listener on
# service_address = ":8125"
#
@@ -2472,3 +2739,9 @@
# [inputs.webhooks.papertrail]
# path = "/papertrail"
# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
# [[inputs.zipkin]]
# # path = "/api/v1/spans" # URL path for span data
# # port = 9411 # Port on which Telegraf listens

View File

@@ -143,19 +143,31 @@
[[inputs.diskio]]
# no configuration
# read metrics from a Kafka topic
# read metrics from a Kafka 0.9+ topic
[[inputs.kafka_consumer]]
# topic(s) to consume
## kafka brokers
brokers = ["localhost:9092"]
## topic(s) to consume
topics = ["telegraf"]
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# read metrics from a Kafka legacy topic
[[inputs.kafka_consumer_legacy]]
## topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
# the name of the consumer group
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
# Maximum number of points to buffer between collection intervals
point_buffer = 100000
# Offset (must be either "oldest" or "newest")
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
# An array of URI to gather stats about LeoFS.

View File

@@ -45,7 +45,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
if !g.hasMeta {
out := make(map[string]os.FileInfo)
info, err := os.Stat(g.path)
if !os.IsNotExist(err) {
if err == nil {
out[g.path] = info
}
return out
@@ -55,7 +55,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
files, _ := filepath.Glob(g.path)
for _, file := range files {
info, err := os.Stat(file)
if !os.IsNotExist(err) {
if err == nil {
out[file] = info
}
}

View File

@@ -1,6 +1,7 @@
package globpath
import (
"os"
"runtime"
"strings"
"testing"
@@ -70,3 +71,20 @@ func getTestdataDir() string {
_, filename, _, _ := runtime.Caller(1)
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
}
func TestMatch_ErrPermission(t *testing.T) {
tests := []struct {
input string
expected map[string]os.FileInfo
}{
{"/root/foo", map[string]os.FileInfo{}},
{"/root/f*", map[string]os.FileInfo{}},
}
for _, test := range tests {
glob, err := Compile(test.input)
require.NoError(t, err)
actual := glob.Match()
require.Equal(t, test.expected, actual)
}
}

View File

@@ -132,6 +132,7 @@ func (f *Filter) Apply(
return true
}
// IsActive checking if filter is active
func (f *Filter) IsActive() bool {
return f.isActive
}
@@ -139,43 +140,66 @@ func (f *Filter) IsActive() bool {
// shouldNamePass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldNamePass(key string) bool {
if f.namePass != nil {
pass := func(f *Filter) bool {
if f.namePass.Match(key) {
return true
}
return false
}
if f.nameDrop != nil {
drop := func(f *Filter) bool {
if f.nameDrop.Match(key) {
return false
}
return true
}
if f.namePass != nil && f.nameDrop != nil {
return pass(f) && drop(f)
} else if f.namePass != nil {
return pass(f)
} else if f.nameDrop != nil {
return drop(f)
}
return true
}
// shouldFieldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldFieldPass(key string) bool {
if f.fieldPass != nil {
pass := func(f *Filter) bool {
if f.fieldPass.Match(key) {
return true
}
return false
}
if f.fieldDrop != nil {
drop := func(f *Filter) bool {
if f.fieldDrop.Match(key) {
return false
}
return true
}
if f.fieldPass != nil && f.fieldDrop != nil {
return pass(f) && drop(f)
} else if f.fieldPass != nil {
return pass(f)
} else if f.fieldDrop != nil {
return drop(f)
}
return true
}
// shouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
if f.TagPass != nil {
pass := func(f *Filter) bool {
for _, pat := range f.TagPass {
if pat.filter == nil {
continue
@@ -189,7 +213,7 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
return false
}
if f.TagDrop != nil {
drop := func(f *Filter) bool {
for _, pat := range f.TagDrop {
if pat.filter == nil {
continue
@@ -203,6 +227,18 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
return true
}
// Add additional logic in case where both parameters are set.
// see: https://github.com/influxdata/telegraf/issues/2860
if f.TagPass != nil && f.TagDrop != nil {
// return true only in case when tag pass and won't be dropped (true, true).
// in case when the same tag should be passed and dropped it will be dropped (true, false).
return pass(f) && drop(f)
} else if f.TagPass != nil {
return pass(f)
} else if f.TagDrop != nil {
return drop(f)
}
return true
}

View File

@@ -357,3 +357,88 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
"mytag": "foobar",
}, pretags)
}
// TestFilter_FilterNamePassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
inputData := []string{"name1", "name2", "name3", "name4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
NamePass: []string{"name1", "name2"},
NameDrop: []string{"name1", "name3"},
}
require.NoError(t, f.Compile())
for i, name := range inputData {
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
}
}
// TestFilter_FilterFieldPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
inputData := []string{"field1", "field2", "field3", "field4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
FieldPass: []string{"field1", "field2"},
FieldDrop: []string{"field1", "field3"},
}
require.NoError(t, f.Compile())
for i, field := range inputData {
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
}
}
// TestFilter_FilterTagsPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
inputData := []map[string]string{
{"tag1": "1", "tag2": "3"},
{"tag1": "1", "tag2": "2"},
{"tag1": "2", "tag2": "1"},
{"tag1": "4", "tag2": "1"},
}
expectedResult := []bool{false, true, false, false}
filterPass := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"1", "4"},
},
}
filterDrop := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"4"},
},
TagFilter{
Name: "tag2",
Filter: []string{"3"},
},
}
f := Filter{
TagDrop: filterDrop,
TagPass: filterPass,
}
require.NoError(t, f.Compile())
for i, tag := range inputData {
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
}
}

View File

@@ -150,12 +150,6 @@ func makemetric(
continue
}
case string:
if strings.HasSuffix(val, `\`) {
log.Printf("D! Measurement [%s] field [%s] has a value "+
"ending with a backslash, skipping", measurement, k)
delete(fields, k)
continue
}
fields[k] = v
default:
fields[k] = v

View File

@@ -114,6 +114,7 @@ func (r *RunningAggregator) reset() {
// for period ticks to tell it when to push and reset the aggregator.
func (r *RunningAggregator) Run(
acc telegraf.Accumulator,
now time.Time,
shutdown chan struct{},
) {
// The start of the period is truncated to the nearest second.
@@ -132,7 +133,6 @@ func (r *RunningAggregator) Run(
// 2nd interval: 00:10 - 00:20.5
// etc.
//
now := time.Now()
r.periodStart = now.Truncate(time.Second)
truncation := now.Sub(r.periodStart)
r.periodEnd = r.periodStart.Add(r.Config.Period)

View File

@@ -24,7 +24,7 @@ func TestAdd(t *testing.T) {
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
go ra.Run(&acc, time.Now(), make(chan struct{}))
m := ra.MakeMetric(
"RITest",
@@ -55,7 +55,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
})
assert.NoError(t, ra.Config.Filter.Compile())
acc := testutil.Accumulator{}
go ra.Run(&acc, make(chan struct{}))
go ra.Run(&acc, time.Now(), make(chan struct{}))
// metric before current period
m := ra.MakeMetric(
@@ -113,7 +113,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
ra.Run(&acc, shutdown)
ra.Run(&acc, time.Now(), shutdown)
}()
m := ra.MakeMetric(

View File

@@ -370,16 +370,17 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
expectedTags: map[string]string{},
},
{
name: "Field value with trailing slash dropped",
name: "Field value with trailing slash okay",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
"bad": `xyzzy\`,
"ok": `xyzzy\`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
expectedTags: map[string]string{},
},
@@ -387,7 +388,7 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
name: "Must have one field after dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"bad": `xyzzy\`,
"bad": math.NaN(),
},
tags: map[string]string{},
expectedNil: true,

View File

@@ -1,11 +1,15 @@
package models
import (
"sync"
"github.com/influxdata/telegraf"
)
type RunningProcessor struct {
Name string
Name string
sync.Mutex
Processor telegraf.Processor
Config *ProcessorConfig
}
@@ -24,6 +28,9 @@ type ProcessorConfig struct {
}
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
rp.Lock()
defer rp.Unlock()
ret := []telegraf.Metric{}
for _, metric := range in {

View File

@@ -20,8 +20,14 @@ var (
// stringFieldEscaper is for escaping string field values only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
stringFieldEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
stringFieldUnEscaper = strings.NewReplacer(
`\"`, `"`,
`\\`, `\`,
)
)
func escape(s string, t string) string {

View File

@@ -21,14 +21,14 @@ func New(
t time.Time,
mType ...telegraf.ValueType,
) (telegraf.Metric, error) {
if len(fields) == 0 {
return nil, fmt.Errorf("Metric cannot be made without any fields")
}
if len(name) == 0 {
return nil, fmt.Errorf("Metric cannot be made with an empty name")
return nil, fmt.Errorf("missing measurement name")
}
if len(fields) == 0 {
return nil, fmt.Errorf("%s: must have one or more fields", name)
}
if strings.HasSuffix(name, `\`) {
return nil, fmt.Errorf("Metric cannot have measurement name ending with a backslash")
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
}
var thisType telegraf.ValueType
@@ -49,10 +49,10 @@ func New(
taglen := 0
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have tag key ending with a backslash")
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
}
if strings.HasSuffix(v, `\`) {
return nil, fmt.Errorf("Metric cannot have tag value ending with a backslash")
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
}
if len(k) == 0 || len(v) == 0 {
@@ -77,15 +77,9 @@ func New(
// pre-allocate capacity of the fields slice
fieldlen := 0
for k, v := range fields {
for k, _ := range fields {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
}
switch val := v.(type) {
case string:
if strings.HasSuffix(val, `\`) {
return nil, fmt.Errorf("Metric cannot have field value ending with a backslash")
}
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
}
// 10 bytes is completely arbitrary, but will at least prevent some
@@ -108,7 +102,8 @@ func New(
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Returns -1 if not found.
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
// not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
@@ -128,6 +123,46 @@ func indexUnescapedByte(buf []byte, b byte) int {
return keyi
}
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
// to b in buf that is not escaped. Allows for the escape char `\` to be
// escaped. Returns -1 if not found.
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if countBackslashes(buf, keyi-1)%2 == 0 {
break
} else {
keyi++
}
}
return keyi
}
// countBackslashes counts the number of preceding backslashes starting at
// the 'start' index.
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
} else {
break
}
}
return count
}
type metric struct {
name []byte
tags []byte
@@ -289,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}

View File

@@ -257,6 +257,8 @@ func TestNewMetric_Fields(t *testing.T) {
"string": "test",
"quote_string": `x"y`,
"backslash_quote_string": `x\"y`,
"backslash": `x\y`,
"ends_with_backslash": `x\`,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
@@ -708,12 +710,6 @@ func TestNewMetric_TrailingSlash(t *testing.T) {
`value\`: "x",
},
},
{
name: "cpu",
fields: map[string]interface{}{
"value": `x\`,
},
},
{
name: "cpu",
tags: map[string]string{

View File

@@ -4,6 +4,7 @@ import (
"io"
"io/ioutil"
"regexp"
"strings"
"testing"
"time"
@@ -620,6 +621,83 @@ func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
}
}
func TestReader_Read(t *testing.T) {
epoch := time.Unix(0, 0)
type args struct {
name string
tags map[string]string
fields map[string]interface{}
t time.Time
mType []telegraf.ValueType
}
tests := []struct {
name string
args args
expected []byte
}{
{
name: "escape backslashes in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\`},
t: epoch,
},
expected: []byte(`cpu value="test\\" 0`),
},
{
name: "escape quote in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test"`},
t: epoch,
},
expected: []byte(`cpu value="test\"" 0`),
},
{
name: "escape quote and backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\"`},
t: epoch,
},
expected: []byte(`cpu value="test\\\"" 0`),
},
{
name: "escape multiple backslash in string field",
args: args{
name: "cpu",
tags: map[string]string{},
fields: map[string]interface{}{"value": `test\\`},
t: epoch,
},
expected: []byte(`cpu value="test\\\\" 0`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := make([]byte, 512)
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
require.NoError(t, err)
r := NewReader([]telegraf.Metric{m})
num, err := r.Read(buf)
if err != io.EOF {
require.NoError(t, err)
}
line := string(buf[:num])
// This is done so that we can use raw strings in the test spec
noeol := strings.TrimRight(line, "\n")
require.Equal(t, string(tt.expected), noeol)
require.Equal(t, len(tt.expected)+1, num)
})
}
}
func TestMetricRoundtrip(t *testing.T) {
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000

View File

@@ -1,5 +1,6 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
)

View File

@@ -0,0 +1,97 @@
# Histogram Aggregator Plugin
The histogram aggregator plugin creates histograms containing the counts of
field values within a range.
Values added to a bucket are also added to the larger buckets in the
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
Bucket counts however are not reset between periods and will be non-strictly
increasing while Telegraf is running.
#### Design
Each metric is passed to the aggregator and this aggregator searches
histogram buckets for those fields, which have been specified in the
config. If buckets are found, the aggregator will increment +1 to the appropriate
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
seconds this data will be forwarded to the outputs.
The algorithm of hit counting to buckets was implemented on the base
of the algorithm which is implemented in the Prometheus
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
### Configuration
```toml
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
```
The user is responsible for defining the bounds of the histogram bucket as
well as the measurement name and fields to aggregate.
Each histogram config section must contain a `buckets` and `measurement_name`
option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
The `buckets` option contains a list of floats which specify the bucket
boundaries. Each float value defines the inclusive upper bound of the bucket.
The `+Inf` bucket is added automatically and does not need to be defined.
### Measurements & Fields:
The postfix `bucket` will be added to each field key.
- measurement1
- field1_bucket
- field2_bucket
### Tags:
All measurements are given the tag `le`. This tag has the border value of
bucket. It means that the metric value is less than or equal to the value of
this tag. For example, let assume that we have the metric value 10 and the
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
10, because the metrics value is passed into bucket with right border value
`10`.
### Example Output:
```
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000
```

View File

@@ -0,0 +1,315 @@
package histogram
import (
"sort"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
// bucketTag is the tag, which contains right bucket border
const bucketTag = "le"
// bucketInf is the right bucket border for infinite values
const bucketInf = "+Inf"
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
type HistogramAggregator struct {
Configs []config `toml:"config"`
buckets bucketsByMetrics
cache map[uint64]metricHistogramCollection
}
// config is the config, which contains name, field of metric and histogram buckets.
type config struct {
Metric string `toml:"measurement_name"`
Fields []string `toml:"fields"`
Buckets buckets `toml:"buckets"`
}
// bucketsByMetrics contains the buckets grouped by metric and field name
type bucketsByMetrics map[string]bucketsByFields
// bucketsByFields contains the buckets grouped by field name
type bucketsByFields map[string]buckets
// buckets contains the right borders buckets
type buckets []float64
// metricHistogramCollection aggregates the histogram data
type metricHistogramCollection struct {
histogramCollection map[string]counts
name string
tags map[string]string
}
// counts is the number of hits in the bucket
type counts []int64
// groupedByCountFields contains grouped fields by their count and fields values
type groupedByCountFields struct {
name string
tags map[string]string
fieldsWithCount map[string]int64
}
// NewHistogramAggregator creates new histogram aggregator
func NewHistogramAggregator() telegraf.Aggregator {
h := &HistogramAggregator{}
h.buckets = make(bucketsByMetrics)
h.resetCache()
return h
}
var sampleConfig = `
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
`
// SampleConfig returns sample of config
func (h *HistogramAggregator) SampleConfig() string {
return sampleConfig
}
// Description returns description of aggregator plugin
func (h *HistogramAggregator) Description() string {
return "Create aggregate histograms."
}
// Add adds new hit to the buckets
func (h *HistogramAggregator) Add(in telegraf.Metric) {
bucketsByField := make(map[string][]float64)
for field := range in.Fields() {
buckets := h.getBuckets(in.Name(), field)
if buckets != nil {
bucketsByField[field] = buckets
}
}
if len(bucketsByField) == 0 {
return
}
id := in.HashID()
agr, ok := h.cache[id]
if !ok {
agr = metricHistogramCollection{
name: in.Name(),
tags: in.Tags(),
histogramCollection: make(map[string]counts),
}
}
for field, value := range in.Fields() {
if buckets, ok := bucketsByField[field]; ok {
if agr.histogramCollection[field] == nil {
agr.histogramCollection[field] = make(counts, len(buckets)+1)
}
if value, ok := convert(value); ok {
index := sort.SearchFloat64s(buckets, value)
agr.histogramCollection[field][index]++
}
}
}
h.cache[id] = agr
}
// Push returns histogram values for metrics
func (h *HistogramAggregator) Push(acc telegraf.Accumulator) {
metricsWithGroupedFields := []groupedByCountFields{}
for _, aggregate := range h.cache {
for field, counts := range aggregate.histogramCollection {
h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts)
}
}
for _, metric := range metricsWithGroupedFields {
acc.AddFields(metric.name, makeFieldsWithCount(metric.fieldsWithCount), metric.tags)
}
}
// groupFieldsByBuckets groups fields by metric buckets which are represented as tags
func (h *HistogramAggregator) groupFieldsByBuckets(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
tags map[string]string,
counts []int64,
) {
count := int64(0)
for index, bucket := range h.getBuckets(name, field) {
count += counts[index]
tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64)
h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags))
}
count += counts[len(counts)-1]
tags[bucketTag] = bucketInf
h.groupField(metricsWithGroupedFields, name, field, count, tags)
}
// groupField groups field by count value
func (h *HistogramAggregator) groupField(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
count int64,
tags map[string]string,
) {
for key, metric := range *metricsWithGroupedFields {
if name == metric.name && isTagsIdentical(tags, metric.tags) {
(*metricsWithGroupedFields)[key].fieldsWithCount[field] = count
return
}
}
fieldsWithCount := map[string]int64{
field: count,
}
*metricsWithGroupedFields = append(
*metricsWithGroupedFields,
groupedByCountFields{name: name, tags: tags, fieldsWithCount: fieldsWithCount},
)
}
// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has
// small value, we will get a histogram with a small amount of the distribution.
func (h *HistogramAggregator) Reset() {}
// resetCache resets cached counts(hits) in the buckets
func (h *HistogramAggregator) resetCache() {
h.cache = make(map[uint64]metricHistogramCollection)
}
// getBuckets finds buckets and returns them
func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 {
if buckets, ok := h.buckets[metric][field]; ok {
return buckets
}
for _, config := range h.Configs {
if config.Metric == metric {
if !isBucketExists(field, config) {
continue
}
if _, ok := h.buckets[metric]; !ok {
h.buckets[metric] = make(bucketsByFields)
}
h.buckets[metric][field] = sortBuckets(config.Buckets)
}
}
return h.buckets[metric][field]
}
// isBucketExists checks if buckets exists for the passed field
func isBucketExists(field string, cfg config) bool {
if len(cfg.Fields) == 0 {
return true
}
for _, fl := range cfg.Fields {
if fl == field {
return true
}
}
return false
}
// sortBuckets sorts the buckets if it is needed
func sortBuckets(buckets []float64) []float64 {
for i, bucket := range buckets {
if i < len(buckets)-1 && bucket >= buckets[i+1] {
sort.Float64s(buckets)
break
}
}
return buckets
}
// convert converts interface to concrete type
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
// copyTags copies tags
func copyTags(tags map[string]string) map[string]string {
copiedTags := map[string]string{}
for key, val := range tags {
copiedTags[key] = val
}
return copiedTags
}
// isTagsIdentical checks the identity of two list of tags
func isTagsIdentical(originalTags, checkedTags map[string]string) bool {
if len(originalTags) != len(checkedTags) {
return false
}
for tagName, tagValue := range originalTags {
if tagValue != checkedTags[tagName] {
return false
}
}
return true
}
// makeFieldsWithCount assigns count value to all metric fields
func makeFieldsWithCount(fieldsWithCountIn map[string]int64) map[string]interface{} {
fieldsWithCountOut := map[string]interface{}{}
for field, count := range fieldsWithCountIn {
fieldsWithCountOut[field+"_bucket"] = count
}
return fieldsWithCountOut
}
// init initializes histogram aggregator plugin
func init() {
aggregators.Add("histogram", func() telegraf.Aggregator {
return NewHistogramAggregator()
})
}

View File

@@ -0,0 +1,210 @@
package histogram
import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
// NewTestHistogram creates new test histogram aggregation with specified config
func NewTestHistogram(cfg []config) telegraf.Aggregator {
htm := &HistogramAggregator{Configs: cfg}
htm.buckets = make(bucketsByMetrics)
htm.resetCache()
return htm
}
// firstMetric1 is the first test metric
var firstMetric1, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.3),
"b": float64(40),
},
time.Now(),
)
// firstMetric1 is the first test metric with other value
var firstMetric2, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.9),
"c": float64(40),
},
time.Now(),
)
// secondMetric is the second metric
var secondMetric, _ = metric.New(
"second_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(105),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
// BenchmarkApply runs benchmarks
func BenchmarkApply(b *testing.B) {
histogram := NewHistogramAggregator()
for n := 0; n < b.N; n++ {
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
}
}
// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
func TestHistogramWithPeriodAndOneField(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Push(acc)
if len(acc.Metrics) != 6 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf)
}
// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields
func TestHistogramWithPeriodAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
histogram.Push(acc)
if len(acc.Metrics) != 12 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf)
}
// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
// getting added in different periods) for all fields
func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf)
acc.ClearMetrics()
histogram.Add(firstMetric2)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
}
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
func TestWrongBucketsOrder(t *testing.T) {
defer func() {
if r := recover(); r != nil {
assert.Equal(
t,
"histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a",
fmt.Sprint(r),
)
}
}()
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
histogram.Add(firstMetric2)
}
// assertContainsTaggedField is help functions to test histogram data
func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) {
acc.Lock()
defer acc.Unlock()
for _, checkedMetric := range acc.Metrics {
// check metric name
if checkedMetric.Measurement != metricName {
continue
}
// check "le" tag
if checkedMetric.Tags[bucketTag] != le {
continue
}
// check fields
isFieldsIdentical := true
for field := range fields {
if _, ok := checkedMetric.Fields[field]; !ok {
isFieldsIdentical = false
break
}
}
if !isFieldsIdentical {
continue
}
// check fields with their counts
if assert.Equal(t, fields, checkedMetric.Fields) {
return
}
assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName))
}
assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields))
}

View File

@@ -45,7 +45,6 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
### Example Output:
```
$ telegraf -input-filter example -test
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
```

File diff suppressed because one or more lines are too long

View File

@@ -73,10 +73,9 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
for _, n := range nodes {
tags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
fields := map[string]interface{}{
"node_name": n.GetName(),
}
fields := make(map[string]interface{})
stats, err := as.RequestNodeStats(n)
if err != nil {
return err
@@ -86,7 +85,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if err == nil {
fields[strings.Replace(k, "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v)
}
}
acc.AddFields("aerospike_node", fields, tags, time.Now())
@@ -100,11 +99,10 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
for _, namespace := range namespaces {
nTags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
nTags["namespace"] = namespace
nFields := map[string]interface{}{
"node_name": n.GetName(),
}
nFields := make(map[string]interface{})
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
if err != nil {
continue
@@ -119,7 +117,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if err == nil {
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1])
}
}
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())

View File

@@ -23,8 +23,10 @@ func TestAerospikeStatistics(t *testing.T) {
require.NoError(t, err)
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasTag("aerospike_node", "node_name"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestAerospikeStatisticsPartialErr(t *testing.T) {
@@ -45,7 +47,7 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestAerospikeParseValue(t *testing.T) {

View File

@@ -21,7 +21,9 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
@@ -35,6 +37,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
@@ -43,6 +46,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
@@ -53,6 +57,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
@@ -67,6 +72,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
@@ -77,12 +83,15 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
_ "github.com/influxdata/telegraf/plugins/inputs/win_services"
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
_ "github.com/influxdata/telegraf/plugins/inputs/zipkin"
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
)

View File

@@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}

View File

@@ -1,55 +1,84 @@
# Telegraf plugin: Apache
# Apache Input Plugin
#### Plugin arguments:
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
- **username** string: Username for HTTP basic authentication
- **password** string: Password for HTTP basic authentication
- **timeout** duration: time that the HTTP connection will remain waiting for response. Default 4 seconds ("4s")
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
##### Optional SSL Config
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documenation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
- **ssl_ca** string: the full path for the SSL CA certicate
- **ssl_cert** string: the full path for the SSL certificate
- **ssl_key** string: the full path for the key file
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
### Configuration:
#### Description
```toml
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
The Apache plugin collects from the /server-status?auto URL. See
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
example. And
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
mod_status documentation.
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
# Measurements:
## Maximum time to receive response.
# response_timeout = "5s"
Meta:
- tags: `port=<port>`, `server=url`
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
```
- apache_TotalAccesses
- apache_TotalkBytes
- apache_CPULoad
- apache_Uptime
- apache_ReqPerSec
- apache_BytesPerSec
- apache_BytesPerReq
- apache_BusyWorkers
- apache_IdleWorkers
- apache_ConnsTotal
- apache_ConnsAsyncWriting
- apache_ConnsAsyncKeepAlive
- apache_ConnsAsyncClosing
### Measurements & Fields:
### Scoreboard measurements
- apache
- BusyWorkers (float)
- BytesPerReq (float)
- BytesPerSec (float)
- ConnsAsyncClosing (float)
- ConnsAsyncKeepAlive (float)
- ConnsAsyncWriting (float)
- ConnsTotal (float)
- CPUChildrenSystem (float)
- CPUChildrenUser (float)
- CPULoad (float)
- CPUSystem (float)
- CPUUser (float)
- IdleWorkers (float)
- Load1 (float)
- Load5 (float)
- Load15 (float)
- ParentServerConfigGeneration (float)
- ParentServerMPMGeneration (float)
- ReqPerSec (float)
- ServerUptimeSeconds (float)
- TotalAccesses (float)
- TotalkBytes (float)
- Uptime (float)
- apache_scboard_waiting
- apache_scboard_starting
- apache_scboard_reading
- apache_scboard_sending
- apache_scboard_keepalive
- apache_scboard_dnslookup
- apache_scboard_closing
- apache_scboard_logging
- apache_scboard_finishing
- apache_scboard_idle_cleanup
- apache_scboard_open
The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state:
- apache
- scboard_closing (float)
- scboard_dnslookup (float)
- scboard_finishing (float)
- scboard_idle_cleanup (float)
- scboard_keepalive (float)
- scboard_logging (float)
- scboard_open (float)
- scboard_reading (float)
- scboard_sending (float)
- scboard_starting (float)
- scboard_waiting (float)
### Tags:
- All measurements have the following tags:
- port
- server
### Example Output:
```
apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
```

View File

@@ -34,15 +34,17 @@ type Apache struct {
}
var sampleConfig = `
## An array of Apache status URI to gather stats.
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## user credentials for basic HTTP authentication
username = "myuser"
password = "mypassword"
## Timeout to the complete conection and reponse time in seconds
response_timeout = "25s" ## default to 5 seconds
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"

View File

@@ -70,7 +70,7 @@ Using this configuration:
When run with:
```
./telegraf -config telegraf.conf -input-filter bcache -test
./telegraf --config telegraf.conf --input-filter bcache --test
```
It produces:

View File

@@ -296,7 +296,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
continue
}
if out["status"] != 200.0 {
acc.AddError(fmt.Errorf("URL returned with status %v\n", out["status"]))
acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl))
continue
}
m.addTagsFields(out)

View File

@@ -200,7 +200,7 @@ All measurements will have the following tags:
*Admin Socket Stats*
<pre>
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
telegraf --config /etc/telegraf/telegraf.conf --config-directory /etc/telegraf/telegraf.d --input-filter ceph --test
* Plugin: ceph, Collection 1
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219

View File

@@ -26,7 +26,7 @@ func TestParseSockId(t *testing.T) {
func TestParseMonDump(t *testing.T) {
dump, err := parseDump(monPerfDump)
assert.NoError(t, err)
assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
}

View File

@@ -225,7 +225,7 @@ var fileFormats = [...]fileFormat{
}
func numberOrString(s string) interface{} {
i, err := strconv.Atoi(s)
i, err := strconv.ParseInt(s, 10, 64)
if err == nil {
return i
}

View File

@@ -31,17 +31,17 @@ func TestCgroupStatistics_1(t *testing.T) {
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.stat.cache": 1739362304123123123,
"memory.stat.rss": 1775325184,
"memory.stat.rss_huge": 778043392,
"memory.stat.mapped_file": 421036032,
"memory.stat.dirty": -307200,
"memory.max_usage_in_bytes.0": 0,
"memory.max_usage_in_bytes.1": -1,
"memory.max_usage_in_bytes.2": 2,
"memory.limit_in_bytes": 223372036854771712,
"memory.stat.cache": int64(1739362304123123123),
"memory.stat.rss": int64(1775325184),
"memory.stat.rss_huge": int64(778043392),
"memory.stat.mapped_file": int64(421036032),
"memory.stat.dirty": int64(-307200),
"memory.max_usage_in_bytes.0": int64(0),
"memory.max_usage_in_bytes.1": int64(-1),
"memory.max_usage_in_bytes.2": int64(2),
"memory.limit_in_bytes": int64(223372036854771712),
"memory.use_hierarchy": "12-781",
"notify_on_release": 0,
"notify_on_release": int64(0),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -63,10 +63,10 @@ func TestCgroupStatistics_2(t *testing.T) {
"path": "testdata/cpu",
}
fields := map[string]interface{}{
"cpuacct.usage_percpu.0": -1452543795404,
"cpuacct.usage_percpu.1": 1376681271659,
"cpuacct.usage_percpu.2": 1450950799997,
"cpuacct.usage_percpu.3": -1473113374257,
"cpuacct.usage_percpu.0": int64(-1452543795404),
"cpuacct.usage_percpu.1": int64(1376681271659),
"cpuacct.usage_percpu.2": int64(1450950799997),
"cpuacct.usage_percpu.3": int64(-1473113374257),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -88,7 +88,7 @@ func TestCgroupStatistics_3(t *testing.T) {
"path": "testdata/memory/group_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -115,7 +115,7 @@ func TestCgroupStatistics_4(t *testing.T) {
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -147,7 +147,7 @@ func TestCgroupStatistics_5(t *testing.T) {
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -174,9 +174,9 @@ func TestCgroupStatistics_6(t *testing.T) {
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.usage_in_bytes": 3513667584,
"memory.usage_in_bytes": int64(3513667584),
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": 9223372036854771712,
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}

View File

@@ -83,7 +83,7 @@ Delete second or Not synchronised.
### Example Output:
```
$ telegraf -config telegraf.conf -input-filter chrony -test
$ telegraf --config telegraf.conf --input-filter chrony --test
* Plugin: chrony, Collection 1
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
```

View File

@@ -1,5 +1,3 @@
// +build linux
package chrony
import (

View File

@@ -1,3 +0,0 @@
// +build !linux
package chrony

View File

@@ -1,5 +1,3 @@
// +build linux
package chrony
import (

View File

@@ -9,8 +9,8 @@ API endpoint. In the following order the plugin will attempt to authenticate.
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
3. Shared profile from `profile` attribute
4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
4. [Environment Variables](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#environment-variables)
5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file)
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
### Configuration:
@@ -20,9 +20,24 @@ API endpoint. In the following order the plugin will attempt to authenticate.
## Amazon Region (required)
region = "us-east-1"
## Amazon Credentials
## Credentials are loaded in the following order
## 1) Assumed credentials via STS if role_arn is specified
## 2) explicit credentials from 'access_key' and 'secret_key'
## 3) shared profile from 'profile'
## 4) environment variables
## 5) shared credentials file
## 6) EC2 Instance Profile
#access_key = ""
#secret_key = ""
#token = ""
#role_arn = ""
#profile = ""
#shared_credential_file = ""
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.
@@ -57,10 +72,6 @@ API endpoint. In the following order the plugin will attempt to authenticate.
[[inputs.cloudwatch.metrics.dimensions]]
name = "LoadBalancerName"
value = "p-example"
[[inputs.cloudwatch.metrics.dimensions]]
name = "AvailabilityZone"
value = "*"
```
#### Requirements and Terminology
@@ -134,6 +145,6 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter cloudwatch -test
$ ./telegraf --config telegraf.conf --input-filter cloudwatch --test
> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
```

View File

@@ -81,7 +81,7 @@ func (c *CloudWatch) SampleConfig() string {
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# metrics are made available to the 1 minute period. Some are collected at
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# Note that if a period is configured that is smaller than the minimum for a
# particular metric, that metric will not be returned by the Cloudwatch API
# and will not be collected by Telegraf.

View File

@@ -51,6 +51,6 @@ This input does not use tags.
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter conntrack -test
$ ./telegraf --config telegraf.conf --input-filter conntrack --test
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
```

View File

@@ -1,6 +1,6 @@
# Telegraf Input Plugin: Consul
This plugin will collect statistics about all helath checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
This plugin will collect statistics about all health checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
## Configuration:
@@ -46,7 +46,7 @@ the health check at this sample.
## Example output
```
$ telegraf --config ./telegraf.conf -input-filter consul -test
$ telegraf --config ./telegraf.conf --input-filter consul --test
* Plugin: consul, Collection 1
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036

View File

@@ -22,7 +22,7 @@
### couchbase_node
Tags:
- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`
- cluster: sanitized string from `servers` configuration field e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` -> `http://couchbase-0.example.com:8091/endpoint`
- hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091`
Fields:
@@ -48,7 +48,7 @@ Fields:
## Example output
```
$ telegraf -config telegraf.conf -input-filter couchbase -test
$ telegraf --config telegraf.conf --input-filter couchbase --test
* Plugin: couchbase, Collection 1
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112

View File

@@ -1,10 +1,12 @@
package couchbase
import (
"regexp"
"sync"
couchbase "github.com/couchbase/go-couchbase"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"sync"
)
type Couchbase struct {
@@ -24,6 +26,8 @@ var sampleConfig = `
servers = ["http://localhost:8091"]
`
var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`)
func (r *Couchbase) SampleConfig() string {
return sampleConfig
}
@@ -71,15 +75,17 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co
}
pool = &p
}
for i := 0; i < len(pool.Nodes); i++ {
node := pool.Nodes[i]
tags := map[string]string{"cluster": addr, "hostname": node.Hostname}
tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname}
fields := make(map[string]interface{})
fields["memory_free"] = node.MemoryFree
fields["memory_total"] = node.MemoryTotal
acc.AddFields("couchbase_node", fields, tags)
}
for bucketName, _ := range pool.BucketMap {
for bucketName := range pool.BucketMap {
tags := map[string]string{"cluster": addr, "bucket": bucketName}
bs := pool.BucketMap[bucketName].BasicStats
fields := make(map[string]interface{})

File diff suppressed because one or more lines are too long

View File

@@ -63,7 +63,7 @@ httpd statistics:
### Example output:
```
➜ telegraf git:(master) ✗ ./telegraf -config ./config.conf -input-filter couchdb -test
➜ telegraf git:(master) ✗ ./telegraf --config ./config.conf --input-filter couchdb --test
* Plugin: couchdb,
Collection 1
> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0,

View File

@@ -16,21 +16,21 @@ const metricName = "dmcache"
type cacheStatus struct {
device string
length int
length int64
target string
metadataBlocksize int
metadataUsed int
metadataTotal int
cacheBlocksize int
cacheUsed int
cacheTotal int
readHits int
readMisses int
writeHits int
writeMisses int
demotions int
promotions int
dirty int
metadataBlocksize int64
metadataUsed int64
metadataTotal int64
cacheBlocksize int64
cacheUsed int64
cacheTotal int64
readHits int64
readMisses int64
writeHits int64
writeMisses int64
demotions int64
promotions int64
dirty int64
}
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
@@ -69,12 +69,12 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
}
status.device = strings.TrimRight(values[0], ":")
status.length, err = strconv.Atoi(values[2])
status.length, err = strconv.ParseInt(values[2], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.target = values[3]
status.metadataBlocksize, err = strconv.Atoi(values[4])
status.metadataBlocksize, err = strconv.ParseInt(values[4], 10, 64)
if err != nil {
return cacheStatus{}, err
}
@@ -82,15 +82,15 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
if len(metadata) != 2 {
return cacheStatus{}, parseError
}
status.metadataUsed, err = strconv.Atoi(metadata[0])
status.metadataUsed, err = strconv.ParseInt(metadata[0], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.metadataTotal, err = strconv.Atoi(metadata[1])
status.metadataTotal, err = strconv.ParseInt(metadata[1], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.cacheBlocksize, err = strconv.Atoi(values[6])
status.cacheBlocksize, err = strconv.ParseInt(values[6], 10, 64)
if err != nil {
return cacheStatus{}, err
}
@@ -98,39 +98,39 @@ func parseDMSetupStatus(line string) (cacheStatus, error) {
if len(cache) != 2 {
return cacheStatus{}, parseError
}
status.cacheUsed, err = strconv.Atoi(cache[0])
status.cacheUsed, err = strconv.ParseInt(cache[0], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.cacheTotal, err = strconv.Atoi(cache[1])
status.cacheTotal, err = strconv.ParseInt(cache[1], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.readHits, err = strconv.Atoi(values[8])
status.readHits, err = strconv.ParseInt(values[8], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.readMisses, err = strconv.Atoi(values[9])
status.readMisses, err = strconv.ParseInt(values[9], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.writeHits, err = strconv.Atoi(values[10])
status.writeHits, err = strconv.ParseInt(values[10], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.writeMisses, err = strconv.Atoi(values[11])
status.writeMisses, err = strconv.ParseInt(values[11], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.demotions, err = strconv.Atoi(values[12])
status.demotions, err = strconv.ParseInt(values[12], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.promotions, err = strconv.Atoi(values[13])
status.promotions, err = strconv.ParseInt(values[13], 10, 64)
if err != nil {
return cacheStatus{}, err
}
status.dirty, err = strconv.Atoi(values[14])
status.dirty, err = strconv.ParseInt(values[14], 10, 64)
if err != nil {
return cacheStatus{}, err
}

View File

@@ -1,3 +1,5 @@
// +build linux
package dmcache
import (
@@ -33,20 +35,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
"device": "cs-1",
}
fields1 := map[string]interface{}{
"length": 4883791872,
"metadata_blocksize": 8,
"metadata_used": 1018,
"metadata_total": 1501122,
"cache_blocksize": 512,
"cache_used": 7,
"cache_total": 464962,
"read_hits": 139,
"read_misses": 352643,
"write_hits": 15,
"write_misses": 46,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(4883791872),
"metadata_blocksize": int64(8),
"metadata_used": int64(1018),
"metadata_total": int64(1501122),
"cache_blocksize": int64(512),
"cache_used": int64(7),
"cache_total": int64(464962),
"read_hits": int64(139),
"read_misses": int64(352643),
"write_hits": int64(15),
"write_misses": int64(46),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
@@ -54,20 +56,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
"device": "cs-2",
}
fields2 := map[string]interface{}{
"length": 4294967296,
"metadata_blocksize": 8,
"metadata_used": 72352,
"metadata_total": 1310720,
"cache_blocksize": 128,
"cache_used": 26,
"cache_total": 24327168,
"read_hits": 2409,
"read_misses": 286,
"write_hits": 265,
"write_misses": 524682,
"demotions": 0,
"promotions": 0,
"dirty": 0,
"length": int64(4294967296),
"metadata_blocksize": int64(8),
"metadata_used": int64(72352),
"metadata_total": int64(1310720),
"cache_blocksize": int64(128),
"cache_used": int64(26),
"cache_total": int64(24327168),
"read_hits": int64(2409),
"read_misses": int64(286),
"write_hits": int64(265),
"write_misses": int64(524682),
"demotions": int64(0),
"promotions": int64(0),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
@@ -76,20 +78,20 @@ func TestPerDeviceGoodOutput(t *testing.T) {
}
fields3 := map[string]interface{}{
"length": 9178759168,
"metadata_blocksize": 16,
"metadata_used": 73370,
"metadata_total": 2811842,
"cache_blocksize": 640,
"cache_used": 33,
"cache_total": 24792130,
"read_hits": 2548,
"read_misses": 352929,
"write_hits": 280,
"write_misses": 524728,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(9178759168),
"metadata_blocksize": int64(16),
"metadata_used": int64(73370),
"metadata_total": int64(2811842),
"cache_blocksize": int64(640),
"cache_used": int64(33),
"cache_total": int64(24792130),
"read_hits": int64(2548),
"read_misses": int64(352929),
"write_hits": int64(280),
"write_misses": int64(524728),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
}
@@ -111,20 +113,20 @@ func TestNotPerDeviceGoodOutput(t *testing.T) {
}
fields := map[string]interface{}{
"length": 9178759168,
"metadata_blocksize": 16,
"metadata_used": 73370,
"metadata_total": 2811842,
"cache_blocksize": 640,
"cache_used": 33,
"cache_total": 24792130,
"read_hits": 2548,
"read_misses": 352929,
"write_hits": 280,
"write_misses": 524728,
"demotions": 0,
"promotions": 7,
"dirty": 0,
"length": int64(9178759168),
"metadata_blocksize": int64(16),
"metadata_used": int64(73370),
"metadata_total": int64(2811842),
"cache_blocksize": int64(640),
"cache_used": int64(33),
"cache_total": int64(24792130),
"read_hits": int64(2548),
"read_misses": int64(352929),
"write_hits": int64(280),
"write_misses": int64(524728),
"demotions": int64(0),
"promotions": int64(7),
"dirty": int64(0),
}
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
}

View File

@@ -8,19 +8,23 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi
# Sample Config:
[[inputs.dns_query]]
## servers to query
servers = ["8.8.8.8"] # required
servers = ["8.8.8.8"]
## Domains or subdomains to query. "." (root) is default
domains = ["."] # optional
## Network is the network protocol name.
# network = "udp"
## Query record type. Posible values: A, AAAA, ANY, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT. Default is "NS"
record_type = "A" # optional
## Domains or subdomains to query.
# domains = ["."]
## Dns server port. 53 is default
port = 53 # optional
## Query record type.
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A"
## Query timeout in seconds. Default is 2 seconds
timeout = 2 # optional
## Dns server port.
# port = 53
## Query timeout in seconds.
# timeout = 2
```
For querying more than one record type make:
@@ -46,6 +50,6 @@ For querying more than one record type make:
### Example output:
```
./telegraf -config telegraf.conf -test -input-filter dns_query -test
telegraf --input-filter dns_query --test
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
```

View File

@@ -3,11 +3,12 @@ package dns_query
import (
"errors"
"fmt"
"github.com/miekg/dns"
"net"
"strconv"
"time"
"github.com/miekg/dns"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
@@ -16,6 +17,9 @@ type DnsQuery struct {
// Domains or subdomains to query
Domains []string
// Network protocl name
Network string
// Server to query
Servers []string
@@ -31,20 +35,23 @@ type DnsQuery struct {
var sampleConfig = `
## servers to query
servers = ["8.8.8.8"] # required
servers = ["8.8.8.8"]
## Domains or subdomains to query. "."(root) is default
domains = ["."] # optional
## Network is the network protocol name.
# network = "udp"
## Query record type. Default is "A"
## Domains or subdomains to query.
# domains = ["."]
## Query record type.
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
record_type = "A" # optional
# record_type = "A"
## Dns server port. 53 is default
port = 53 # optional
## Dns server port.
# port = 53
## Query timeout in seconds. Default is 2 seconds
timeout = 2 # optional
## Query timeout in seconds.
# timeout = 2
`
func (d *DnsQuery) SampleConfig() string {
@@ -76,6 +83,10 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
}
func (d *DnsQuery) setDefaultValues() {
if d.Network == "" {
d.Network = "udp"
}
if len(d.RecordType) == 0 {
d.RecordType = "NS"
}
@@ -99,6 +110,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error
c := new(dns.Client)
c.ReadTimeout = time.Duration(d.Timeout) * time.Second
c.Net = d.Network
m := new(dns.Msg)
recordType, err := d.parseRecordType()

View File

@@ -1,15 +1,11 @@
# Docker Input Plugin
The docker plugin uses the docker remote API to gather metrics on running
docker containers. You can read Docker's documentation for their remote API
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
The docker plugin uses the Docker Engine API to gather metrics on running
docker containers.
The docker plugin uses the excellent
[docker engine-api](https://github.com/docker/engine-api) library to
gather stats. Documentation for the library can be found
[here](https://godoc.org/github.com/docker/engine-api) and documentation
for the stat structure can be found
[here](https://godoc.org/github.com/docker/engine-api/types#Stats)
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
### Configuration:
@@ -20,24 +16,47 @@ for the stat structure can be found
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Only collect metrics for these containers, collect all if empty
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
container_names = []
## Containers to include and exclude. Collect all if empty. Globs accepted.
container_name_include = []
container_name_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...) and
## network (eth0, eth1, ...) stats or not
perdevice = true
## Whether to report for each container total blkio and network stats or not
total = false
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = []
## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
```
#### Environment Configuration
When using the `"ENV"` endpoint, the connection is configured using the
[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient).
### Measurements & Fields:
Every effort was made to preserve the names based on the JSON response from the
@@ -167,7 +186,7 @@ based on the availability of per-cpu stats on your system.
### Example Output:
```
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
* Plugin: docker, Collection 1
> docker n_cpus=8i 1456926671065383978
> docker n_used_file_descriptors=15i 1456926671065383978

View File

@@ -0,0 +1,67 @@
package docker
import (
"context"
"crypto/tls"
"net/http"
"github.com/docker/docker/api/types"
docker "github.com/docker/docker/client"
"github.com/docker/go-connections/sockets"
)
var (
version string
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
)
type Client interface {
Info(ctx context.Context) (types.Info, error)
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
}
func NewEnvClient() (Client, error) {
client, err := docker.NewEnvClient()
if err != nil {
return nil, err
}
return &SocketClient{client}, nil
}
func NewClient(host string, tlsConfig *tls.Config) (Client, error) {
proto, addr, _, err := docker.ParseHost(host)
if err != nil {
return nil, err
}
transport := &http.Transport{
TLSClientConfig: tlsConfig,
}
sockets.ConfigureTransport(transport, proto, addr)
httpClient := &http.Client{Transport: transport}
client, err := docker.NewClient(host, version, httpClient, defaultHeaders)
if err != nil {
return nil, err
}
return &SocketClient{client}, nil
}
type SocketClient struct {
client *docker.Client
}
func (c *SocketClient) Info(ctx context.Context) (types.Info, error) {
return c.client.Info(ctx)
}
func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
return c.client.ContainerList(ctx, options)
}
func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
return c.client.ContainerStats(ctx, containerID, stream)
}
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
return c.client.ContainerInspect(ctx, containerID)
}

View File

@@ -2,9 +2,11 @@ package docker
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"strings"
@@ -12,7 +14,6 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/filter"
"github.com/influxdata/telegraf/internal"
@@ -24,59 +25,40 @@ type DockerLabelFilter struct {
labelExclude filter.Filter
}
type DockerContainerFilter struct {
containerInclude filter.Filter
containerExclude filter.Filter
}
// Docker object
type Docker struct {
Endpoint string
ContainerNames []string
Timeout internal.Duration
PerDevice bool `toml:"perdevice"`
Total bool `toml:"total"`
TagEnvironment []string `toml:"tag_env"`
LabelInclude []string `toml:"docker_label_include"`
LabelExclude []string `toml:"docker_label_exclude"`
LabelFilter DockerLabelFilter
LabelFilter DockerLabelFilter
ContainerInclude []string `toml:"container_name_include"`
ContainerExclude []string `toml:"container_name_exclude"`
ContainerFilter DockerContainerFilter
client *client.Client
engine_host string
SSLCA string `toml:"ssl_ca"`
SSLCert string `toml:"ssl_cert"`
SSLKey string `toml:"ssl_key"`
InsecureSkipVerify bool
testing bool
labelFiltersCreated bool
}
newEnvClient func() (Client, error)
newClient func(string, *tls.Config) (Client, error)
// infoWrapper wraps client.Client.List for testing.
func infoWrapper(c *client.Client, ctx context.Context) (types.Info, error) {
if c != nil {
return c.Info(ctx)
}
fc := FakeDockerClient{}
return fc.Info(ctx)
}
// listWrapper wraps client.Client.ContainerList for testing.
func listWrapper(
c *client.Client,
ctx context.Context,
options types.ContainerListOptions,
) ([]types.Container, error) {
if c != nil {
return c.ContainerList(ctx, options)
}
fc := FakeDockerClient{}
return fc.ContainerList(ctx, options)
}
// statsWrapper wraps client.Client.ContainerStats for testing.
func statsWrapper(
c *client.Client,
ctx context.Context,
containerID string,
stream bool,
) (types.ContainerStats, error) {
if c != nil {
return c.ContainerStats(ctx, containerID, stream)
}
fc := FakeDockerClient{}
return fc.ContainerStats(ctx, containerID, stream)
client Client
httpClient *http.Client
engine_host string
filtersCreated bool
}
// KB, MB, GB, TB, PB...human friendly
@@ -86,6 +68,8 @@ const (
GB = 1000 * MB
TB = 1000 * GB
PB = 1000 * TB
defaultEndpoint = "unix:///var/run/docker.sock"
)
var (
@@ -97,8 +81,15 @@ var sampleConfig = `
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Only collect metrics for these containers, collect all if empty
container_names = []
## Containers to include and exclude. Globs accepted.
## Note that an empty array for both will include all containers
container_name_include = []
container_name_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
@@ -107,52 +98,60 @@ var sampleConfig = `
perdevice = true
## Whether to report for each container total blkio and network stats or not
total = false
## Which environment variables should we use as a tag
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = []
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
`
// Description returns input description
func (d *Docker) Description() string {
return "Read metrics about docker containers"
}
// SampleConfig prints sampleConfig
func (d *Docker) SampleConfig() string { return sampleConfig }
// Gather starts stats collection
func (d *Docker) Gather(acc telegraf.Accumulator) error {
if d.client == nil && !d.testing {
var c *client.Client
if d.client == nil {
var c Client
var err error
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
if d.Endpoint == "ENV" {
c, err = client.NewEnvClient()
if err != nil {
return err
}
} else if d.Endpoint == "" {
c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders)
if err != nil {
return err
}
c, err = d.newEnvClient()
} else {
c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders)
tlsConfig, err := internal.GetTLSConfig(
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
if err != nil {
return err
}
c, err = d.newClient(d.Endpoint, tlsConfig)
}
if err != nil {
return err
}
d.client = c
}
// Create label filters if not already created
if !d.labelFiltersCreated {
if !d.filtersCreated {
err := d.createLabelFilters()
if err != nil {
return err
}
d.labelFiltersCreated = true
err = d.createContainerFilters()
if err != nil {
return err
}
d.filtersCreated = true
}
// Get daemon info
@@ -165,7 +164,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
opts := types.ContainerListOptions{}
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
containers, err := listWrapper(d.client, ctx, opts)
containers, err := d.client.ContainerList(ctx, opts)
if err != nil {
return err
}
@@ -196,7 +195,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
// Get info from docker daemon
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
info, err := infoWrapper(d.client, ctx)
info, err := d.client.Info(ctx)
if err != nil {
return err
}
@@ -291,15 +290,18 @@ func (d *Docker) gatherContainer(
"container_image": imageName,
"container_version": imageVersion,
}
if len(d.ContainerNames) > 0 {
if !sliceContains(cname, d.ContainerNames) {
return nil
if len(d.ContainerInclude) > 0 || len(d.ContainerExclude) > 0 {
if len(d.ContainerInclude) == 0 || !d.ContainerFilter.containerInclude.Match(cname) {
if len(d.ContainerExclude) == 0 || d.ContainerFilter.containerExclude.Match(cname) {
return nil
}
}
}
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
defer cancel()
r, err := statsWrapper(d.client, ctx, container.ID, false)
r, err := d.client.ContainerStats(ctx, container.ID, false)
if err != nil {
return fmt.Errorf("Error getting docker stats: %s", err.Error())
}
@@ -311,6 +313,7 @@ func (d *Docker) gatherContainer(
}
return fmt.Errorf("Error decoding: %s", err.Error())
}
daemonOSType := r.OSType
// Add labels to tags
for k, label := range container.Labels {
@@ -321,7 +324,24 @@ func (d *Docker) gatherContainer(
}
}
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
// Add whitelisted environment variables to tags
if len(d.TagEnvironment) > 0 {
info, err := d.client.ContainerInspect(ctx, container.ID)
if err != nil {
return fmt.Errorf("Error inspecting docker container: %s", err.Error())
}
for _, envvar := range info.Config.Env {
for _, configvar := range d.TagEnvironment {
dock_env := strings.SplitN(envvar, "=", 2)
//check for presence of tag in whitelist
if len(dock_env) == 2 && len(strings.TrimSpace(dock_env[1])) != 0 && configvar == dock_env[0] {
tags[dock_env[0]] = dock_env[1]
}
}
}
}
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType)
return nil
}
@@ -333,46 +353,68 @@ func gatherContainerStats(
id string,
perDevice bool,
total bool,
daemonOSType string,
) {
now := stat.Read
memfields := map[string]interface{}{
"max_usage": stat.MemoryStats.MaxUsage,
"usage": stat.MemoryStats.Usage,
"fail_count": stat.MemoryStats.Failcnt,
"limit": stat.MemoryStats.Limit,
"total_pgmafault": stat.MemoryStats.Stats["total_pgmajfault"],
"cache": stat.MemoryStats.Stats["cache"],
"mapped_file": stat.MemoryStats.Stats["mapped_file"],
"total_inactive_file": stat.MemoryStats.Stats["total_inactive_file"],
"pgpgout": stat.MemoryStats.Stats["pagpgout"],
"rss": stat.MemoryStats.Stats["rss"],
"total_mapped_file": stat.MemoryStats.Stats["total_mapped_file"],
"writeback": stat.MemoryStats.Stats["writeback"],
"unevictable": stat.MemoryStats.Stats["unevictable"],
"pgpgin": stat.MemoryStats.Stats["pgpgin"],
"total_unevictable": stat.MemoryStats.Stats["total_unevictable"],
"pgmajfault": stat.MemoryStats.Stats["pgmajfault"],
"total_rss": stat.MemoryStats.Stats["total_rss"],
"total_rss_huge": stat.MemoryStats.Stats["total_rss_huge"],
"total_writeback": stat.MemoryStats.Stats["total_write_back"],
"total_inactive_anon": stat.MemoryStats.Stats["total_inactive_anon"],
"rss_huge": stat.MemoryStats.Stats["rss_huge"],
"hierarchical_memory_limit": stat.MemoryStats.Stats["hierarchical_memory_limit"],
"total_pgfault": stat.MemoryStats.Stats["total_pgfault"],
"total_active_file": stat.MemoryStats.Stats["total_active_file"],
"active_anon": stat.MemoryStats.Stats["active_anon"],
"total_active_anon": stat.MemoryStats.Stats["total_active_anon"],
"total_pgpgout": stat.MemoryStats.Stats["total_pgpgout"],
"total_cache": stat.MemoryStats.Stats["total_cache"],
"inactive_anon": stat.MemoryStats.Stats["inactive_anon"],
"active_file": stat.MemoryStats.Stats["active_file"],
"pgfault": stat.MemoryStats.Stats["pgfault"],
"inactive_file": stat.MemoryStats.Stats["inactive_file"],
"total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
"usage_percent": calculateMemPercent(stat),
"container_id": id,
"container_id": id,
}
memstats := []string{
"active_anon",
"active_file",
"cache",
"hierarchical_memory_limit",
"inactive_anon",
"inactive_file",
"mapped_file",
"pgfault",
"pgmajfault",
"pgpgin",
"pgpgout",
"rss",
"rss_huge",
"total_active_anon",
"total_active_file",
"total_cache",
"total_inactive_anon",
"total_inactive_file",
"total_mapped_file",
"total_pgfault",
"total_pgmajfault",
"total_pgpgin",
"total_pgpgout",
"total_rss",
"total_rss_huge",
"total_unevictable",
"total_writeback",
"unevictable",
"writeback",
}
for _, field := range memstats {
if value, ok := stat.MemoryStats.Stats[field]; ok {
memfields[field] = value
}
}
if stat.MemoryStats.Failcnt != 0 {
memfields["fail_count"] = stat.MemoryStats.Failcnt
}
if daemonOSType != "windows" {
memfields["limit"] = stat.MemoryStats.Limit
memfields["usage"] = stat.MemoryStats.Usage
memfields["max_usage"] = stat.MemoryStats.MaxUsage
mem := calculateMemUsageUnixNoCache(stat.MemoryStats)
memLimit := float64(stat.MemoryStats.Limit)
memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem)
} else {
memfields["commit_bytes"] = stat.MemoryStats.Commit
memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
}
acc.AddFields("docker_container_mem", memfields, tags, now)
cpufields := map[string]interface{}{
@@ -383,14 +425,33 @@ func gatherContainerStats(
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
"usage_percent": calculateCPUPercent(stat),
"container_id": id,
}
if daemonOSType != "windows" {
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
previousSystem := stat.PreCPUStats.SystemUsage
cpuPercent := calculateCPUPercentUnix(previousCPU, previousSystem, stat)
cpufields["usage_percent"] = cpuPercent
} else {
cpuPercent := calculateCPUPercentWindows(stat)
cpufields["usage_percent"] = cpuPercent
}
cputags := copyTags(tags)
cputags["cpu"] = "cpu-total"
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
var percpuusage []uint64
if stat.CPUStats.OnlineCPUs > 0 {
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs]
} else {
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage
}
for i, percpu := range percpuusage {
percputags := copyTags(tags)
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
fields := map[string]interface{}{
@@ -456,26 +517,6 @@ func gatherContainerStats(
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
}
func calculateMemPercent(stat *types.StatsJSON) float64 {
var memPercent = 0.0
if stat.MemoryStats.Limit > 0 {
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
}
return memPercent
}
func calculateCPUPercent(stat *types.StatsJSON) float64 {
var cpuPercent = 0.0
// calculate the change for the cpu and system usage of the container in between readings
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
systemDelta := float64(stat.CPUStats.SystemUsage) - float64(stat.PreCPUStats.SystemUsage)
if systemDelta > 0.0 && cpuDelta > 0.0 {
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
}
return cpuPercent
}
func gatherBlockIOMetrics(
stat *types.StatsJSON,
acc telegraf.Accumulator,
@@ -624,8 +665,32 @@ func parseSize(sizeStr string) (int64, error) {
return int64(size), nil
}
func (d *Docker) createContainerFilters() error {
if len(d.ContainerNames) > 0 {
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
}
if len(d.ContainerInclude) != 0 {
var err error
d.ContainerFilter.containerInclude, err = filter.Compile(d.ContainerInclude)
if err != nil {
return err
}
}
if len(d.ContainerExclude) != 0 {
var err error
d.ContainerFilter.containerExclude, err = filter.Compile(d.ContainerExclude)
if err != nil {
return err
}
}
return nil
}
func (d *Docker) createLabelFilters() error {
if len(d.LabelInclude) != 0 && d.LabelFilter.labelInclude == nil {
if len(d.LabelInclude) != 0 {
var err error
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
if err != nil {
@@ -633,7 +698,7 @@ func (d *Docker) createLabelFilters() error {
}
}
if len(d.LabelExclude) != 0 && d.LabelFilter.labelExclude == nil {
if len(d.LabelExclude) != 0 {
var err error
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
if err != nil {
@@ -647,9 +712,12 @@ func (d *Docker) createLabelFilters() error {
func init() {
inputs.Add("docker", func() telegraf.Input {
return &Docker{
PerDevice: true,
Timeout: internal.Duration{Duration: time.Second * 5},
labelFiltersCreated: false,
PerDevice: true,
Timeout: internal.Duration{Duration: time.Second * 5},
Endpoint: defaultEndpoint,
newEnvClient: NewEnvClient,
newClient: NewClient,
filtersCreated: false,
}
})
}

View File

@@ -1,8 +1,9 @@
package docker
import (
"context"
"crypto/tls"
"testing"
"time"
"github.com/influxdata/telegraf/testutil"
@@ -10,6 +11,56 @@ import (
"github.com/stretchr/testify/require"
)
type MockClient struct {
InfoF func(ctx context.Context) (types.Info, error)
ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerStatsF func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error)
}
func (c *MockClient) Info(ctx context.Context) (types.Info, error) {
return c.InfoF(ctx)
}
func (c *MockClient) ContainerList(
ctx context.Context,
options types.ContainerListOptions,
) ([]types.Container, error) {
return c.ContainerListF(ctx, options)
}
func (c *MockClient) ContainerStats(
ctx context.Context,
containerID string,
stream bool,
) (types.ContainerStats, error) {
return c.ContainerStatsF(ctx, containerID, stream)
}
func (c *MockClient) ContainerInspect(
ctx context.Context,
containerID string,
) (types.ContainerJSON, error) {
return c.ContainerInspectF(ctx, containerID)
}
func newClient(host string, tlsConfig *tls.Config) (Client, error) {
return &MockClient{
InfoF: func(context.Context) (types.Info, error) {
return info, nil
},
ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) {
return containerList, nil
},
ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) {
return containerStats(), nil
},
ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) {
return containerInspect, nil
},
}, nil
}
func TestDockerGatherContainerStats(t *testing.T) {
var acc testutil.Accumulator
stats := testStats()
@@ -18,7 +69,8 @@ func TestDockerGatherContainerStats(t *testing.T) {
"container_name": "redis",
"container_image": "redis/image",
}
gatherContainerStats(stats, &acc, tags, "123456789", true, true)
gatherContainerStats(stats, &acc, tags, "123456789", true, true, "linux")
// test docker_container_net measurement
netfields := map[string]interface{}{
@@ -72,41 +124,41 @@ func TestDockerGatherContainerStats(t *testing.T) {
// test docker_container_mem measurement
memfields := map[string]interface{}{
"max_usage": uint64(1001),
"usage": uint64(1111),
"fail_count": uint64(1),
"limit": uint64(2000),
"total_pgmafault": uint64(0),
"cache": uint64(0),
"mapped_file": uint64(0),
"total_inactive_file": uint64(0),
"pgpgout": uint64(0),
"rss": uint64(0),
"total_mapped_file": uint64(0),
"writeback": uint64(0),
"unevictable": uint64(0),
"pgpgin": uint64(0),
"total_unevictable": uint64(0),
"pgmajfault": uint64(0),
"total_rss": uint64(44),
"total_rss_huge": uint64(444),
"total_writeback": uint64(55),
"total_inactive_anon": uint64(0),
"rss_huge": uint64(0),
"hierarchical_memory_limit": uint64(0),
"total_pgfault": uint64(0),
"total_active_file": uint64(0),
"active_anon": uint64(0),
"total_active_anon": uint64(0),
"total_pgpgout": uint64(0),
"total_cache": uint64(0),
"inactive_anon": uint64(0),
"active_file": uint64(1),
"pgfault": uint64(2),
"inactive_file": uint64(3),
"total_pgpgin": uint64(4),
"usage_percent": float64(55.55),
"cache": uint64(0),
"container_id": "123456789",
"fail_count": uint64(1),
"hierarchical_memory_limit": uint64(0),
"inactive_anon": uint64(0),
"inactive_file": uint64(3),
"limit": uint64(2000),
"mapped_file": uint64(0),
"max_usage": uint64(1001),
"pgfault": uint64(2),
"pgmajfault": uint64(0),
"pgpgin": uint64(0),
"pgpgout": uint64(0),
"rss_huge": uint64(0),
"rss": uint64(0),
"total_active_anon": uint64(0),
"total_active_file": uint64(0),
"total_cache": uint64(0),
"total_inactive_anon": uint64(0),
"total_inactive_file": uint64(0),
"total_mapped_file": uint64(0),
"total_pgfault": uint64(0),
"total_pgmajfault": uint64(0),
"total_pgpgin": uint64(4),
"total_pgpgout": uint64(0),
"total_rss_huge": uint64(444),
"total_rss": uint64(44),
"total_unevictable": uint64(0),
"total_writeback": uint64(55),
"unevictable": uint64(0),
"usage_percent": float64(55.55),
"usage": uint64(1111),
"writeback": uint64(0),
}
acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags)
@@ -140,166 +192,174 @@ func TestDockerGatherContainerStats(t *testing.T) {
"container_id": "123456789",
}
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu1fields, cputags)
// Those tagged filed should not be present because of offline CPUs
cputags["cpu"] = "cpu2"
cpu2fields := map[string]interface{}{
"usage_total": uint64(0),
"container_id": "123456789",
}
acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu2fields, cputags)
cputags["cpu"] = "cpu3"
cpu3fields := map[string]interface{}{
"usage_total": uint64(0),
"container_id": "123456789",
}
acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu3fields, cputags)
}
func testStats() *types.StatsJSON {
stats := &types.StatsJSON{}
stats.Read = time.Now()
stats.Networks = make(map[string]types.NetworkStats)
func TestDocker_WindowsMemoryContainerStats(t *testing.T) {
var acc testutil.Accumulator
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
stats.CPUStats.CPUUsage.UsageInUsermode = 100
stats.CPUStats.CPUUsage.TotalUsage = 500
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
stats.CPUStats.SystemUsage = 100
stats.CPUStats.ThrottlingData.Periods = 1
stats.PreCPUStats.CPUUsage.TotalUsage = 400
stats.PreCPUStats.SystemUsage = 50
stats.MemoryStats.Stats = make(map[string]uint64)
stats.MemoryStats.Stats["total_pgmajfault"] = 0
stats.MemoryStats.Stats["cache"] = 0
stats.MemoryStats.Stats["mapped_file"] = 0
stats.MemoryStats.Stats["total_inactive_file"] = 0
stats.MemoryStats.Stats["pagpgout"] = 0
stats.MemoryStats.Stats["rss"] = 0
stats.MemoryStats.Stats["total_mapped_file"] = 0
stats.MemoryStats.Stats["writeback"] = 0
stats.MemoryStats.Stats["unevictable"] = 0
stats.MemoryStats.Stats["pgpgin"] = 0
stats.MemoryStats.Stats["total_unevictable"] = 0
stats.MemoryStats.Stats["pgmajfault"] = 0
stats.MemoryStats.Stats["total_rss"] = 44
stats.MemoryStats.Stats["total_rss_huge"] = 444
stats.MemoryStats.Stats["total_write_back"] = 55
stats.MemoryStats.Stats["total_inactive_anon"] = 0
stats.MemoryStats.Stats["rss_huge"] = 0
stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
stats.MemoryStats.Stats["total_pgfault"] = 0
stats.MemoryStats.Stats["total_active_file"] = 0
stats.MemoryStats.Stats["active_anon"] = 0
stats.MemoryStats.Stats["total_active_anon"] = 0
stats.MemoryStats.Stats["total_pgpgout"] = 0
stats.MemoryStats.Stats["total_cache"] = 0
stats.MemoryStats.Stats["inactive_anon"] = 0
stats.MemoryStats.Stats["active_file"] = 1
stats.MemoryStats.Stats["pgfault"] = 2
stats.MemoryStats.Stats["inactive_file"] = 3
stats.MemoryStats.Stats["total_pgpgin"] = 4
stats.MemoryStats.MaxUsage = 1001
stats.MemoryStats.Usage = 1111
stats.MemoryStats.Failcnt = 1
stats.MemoryStats.Limit = 2000
stats.Networks["eth0"] = types.NetworkStats{
RxDropped: 1,
RxBytes: 2,
RxErrors: 3,
TxPackets: 4,
TxDropped: 1,
RxPackets: 2,
TxErrors: 3,
TxBytes: 4,
d := Docker{
newClient: func(string, *tls.Config) (Client, error) {
return &MockClient{
InfoF: func(ctx context.Context) (types.Info, error) {
return info, nil
},
ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
return containerList, nil
},
ContainerStatsF: func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
return containerStatsWindows(), nil
},
ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) {
return containerInspect, nil
},
}, nil
},
}
stats.Networks["eth1"] = types.NetworkStats{
RxDropped: 5,
RxBytes: 6,
RxErrors: 7,
TxPackets: 8,
TxDropped: 5,
RxPackets: 6,
TxErrors: 7,
TxBytes: 8,
}
sbr := types.BlkioStatEntry{
Major: 6,
Minor: 0,
Op: "read",
Value: 100,
}
sr := types.BlkioStatEntry{
Major: 6,
Minor: 0,
Op: "write",
Value: 101,
}
sr2 := types.BlkioStatEntry{
Major: 6,
Minor: 1,
Op: "write",
Value: 201,
}
stats.BlkioStats.IoServiceBytesRecursive = append(
stats.BlkioStats.IoServiceBytesRecursive, sbr)
stats.BlkioStats.IoServicedRecursive = append(
stats.BlkioStats.IoServicedRecursive, sr)
stats.BlkioStats.IoServicedRecursive = append(
stats.BlkioStats.IoServicedRecursive, sr2)
return stats
}
var gatherLabelsTests = []struct {
include []string
exclude []string
expected []string
notexpected []string
}{
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
err := d.Gather(&acc)
require.NoError(t, err)
}
func TestDockerGatherLabels(t *testing.T) {
var gatherLabelsTests = []struct {
include []string
exclude []string
expected []string
notexpected []string
}{
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
}
for _, tt := range gatherLabelsTests {
var acc testutil.Accumulator
d := Docker{
client: nil,
testing: true,
}
for _, label := range tt.include {
d.LabelInclude = append(d.LabelInclude, label)
}
for _, label := range tt.exclude {
d.LabelExclude = append(d.LabelExclude, label)
}
err := d.Gather(&acc)
require.NoError(t, err)
for _, label := range tt.expected {
if !acc.HasTag("docker_container_cpu", label) {
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
t.Run("", func(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
newClient: newClient,
}
}
for _, label := range tt.notexpected {
if acc.HasTag("docker_container_cpu", label) {
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
for _, label := range tt.include {
d.LabelInclude = append(d.LabelInclude, label)
}
}
for _, label := range tt.exclude {
d.LabelExclude = append(d.LabelExclude, label)
}
err := d.Gather(&acc)
require.NoError(t, err)
for _, label := range tt.expected {
if !acc.HasTag("docker_container_cpu", label) {
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
}
}
for _, label := range tt.notexpected {
if acc.HasTag("docker_container_cpu", label) {
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
label, tt.include, tt.exclude)
}
}
})
}
}
func TestContainerNames(t *testing.T) {
var gatherContainerNames = []struct {
include []string
exclude []string
expected []string
notexpected []string
}{
{[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}},
{[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}},
{[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}},
{[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}},
{[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}},
{[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}},
{[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}},
}
for _, tt := range gatherContainerNames {
t.Run("", func(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
newClient: newClient,
ContainerInclude: tt.include,
ContainerExclude: tt.exclude,
}
err := d.Gather(&acc)
require.NoError(t, err)
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
if val, ok := metric.Tags["container_name"]; ok {
var found bool = false
for _, cname := range tt.expected {
if val == cname {
found = true
break
}
}
if !found {
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
}
}
}
}
for _, metric := range acc.Metrics {
if metric.Measurement == "docker_container_cpu" {
if val, ok := metric.Tags["container_name"]; ok {
var found bool = false
for _, cname := range tt.notexpected {
if val == cname {
found = true
break
}
}
if found {
t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude)
}
}
}
}
})
}
}
func TestDockerGatherInfo(t *testing.T) {
var acc testutil.Accumulator
d := Docker{
client: nil,
testing: true,
newClient: newClient,
TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5",
"ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"},
}
err := acc.GatherError(d.Gather)
@@ -345,6 +405,10 @@ func TestDockerGatherInfo(t *testing.T) {
"cpu": "cpu3",
"container_version": "v2.2.2",
"engine_host": "absol",
"ENVVAR1": "loremipsum",
"ENVVAR2": "dolorsitamet",
"ENVVAR3": "=ubuntu:10.04",
"ENVVAR7": "ENVVAR8=ENVVAR9",
"label1": "test_value_1",
"label2": "test_value_2",
},
@@ -352,51 +416,23 @@ func TestDockerGatherInfo(t *testing.T) {
acc.AssertContainsTaggedFields(t,
"docker_container_mem",
map[string]interface{}{
"total_pgpgout": uint64(0),
"usage_percent": float64(0),
"rss": uint64(0),
"total_writeback": uint64(0),
"active_anon": uint64(0),
"total_pgmafault": uint64(0),
"total_rss": uint64(0),
"total_unevictable": uint64(0),
"active_file": uint64(0),
"total_mapped_file": uint64(0),
"pgpgin": uint64(0),
"total_active_file": uint64(0),
"total_active_anon": uint64(0),
"total_cache": uint64(0),
"inactive_anon": uint64(0),
"pgmajfault": uint64(0),
"total_inactive_anon": uint64(0),
"total_rss_huge": uint64(0),
"rss_huge": uint64(0),
"hierarchical_memory_limit": uint64(0),
"pgpgout": uint64(0),
"unevictable": uint64(0),
"total_inactive_file": uint64(0),
"writeback": uint64(0),
"total_pgfault": uint64(0),
"total_pgpgin": uint64(0),
"cache": uint64(0),
"mapped_file": uint64(0),
"inactive_file": uint64(0),
"max_usage": uint64(0),
"fail_count": uint64(0),
"pgfault": uint64(0),
"usage": uint64(0),
"limit": uint64(18935443456),
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
"limit": uint64(18935443456),
"max_usage": uint64(0),
"usage": uint64(0),
"usage_percent": float64(0),
},
map[string]string{
"engine_host": "absol",
"container_name": "etcd2",
"container_image": "quay.io:4443/coreos/etcd",
"container_version": "v2.2.2",
"ENVVAR1": "loremipsum",
"ENVVAR2": "dolorsitamet",
"ENVVAR3": "=ubuntu:10.04",
"ENVVAR7": "ENVVAR8=ENVVAR9",
"label1": "test_value_1",
"label2": "test_value_2",
},
)
//fmt.Print(info)
}

View File

@@ -0,0 +1,406 @@
package docker
import (
"io/ioutil"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/registry"
)
var info = types.Info{
Containers: 108,
ContainersRunning: 98,
ContainersStopped: 6,
ContainersPaused: 3,
OomKillDisable: false,
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
NEventsListener: 0,
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
Debug: false,
LoggingDriver: "json-file",
KernelVersion: "4.3.0-1-amd64",
IndexServerAddress: "https://index.docker.io/v1/",
MemTotal: 3840757760,
Images: 199,
CPUCfsQuota: true,
Name: "absol",
SwapLimit: false,
IPv4Forwarding: true,
ExperimentalBuild: false,
CPUCfsPeriod: true,
RegistryConfig: &registry.ServiceConfig{
IndexConfigs: map[string]*registry.IndexInfo{
"docker.io": {
Name: "docker.io",
Mirrors: []string{},
Official: true,
Secure: true,
},
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
OperatingSystem: "Linux Mint LMDE (containerized)",
BridgeNfIptables: true,
HTTPSProxy: "",
Labels: []string{},
MemoryLimit: false,
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
NFd: 19,
HTTPProxy: "",
Driver: "devicemapper",
NGoroutines: 39,
NCPU: 4,
DockerRootDir: "/var/lib/docker",
NoProxy: "",
BridgeNfIP6tables: true,
}
var containerList = []types.Container{
types.Container{
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
Names: []string{"/etcd"},
Image: "quay.io/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941930,
Status: "Up 4 hours",
Ports: []types.Port{
types.Port{
PrivatePort: 7001,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 4001,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2380,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2379,
PublicPort: 2379,
Type: "tcp",
IP: "0.0.0.0",
},
},
Labels: map[string]string{
"label1": "test_value_1",
"label2": "test_value_2",
},
SizeRw: 0,
SizeRootFs: 0,
},
types.Container{
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
Names: []string{"/etcd2"},
Image: "quay.io:4443/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941933,
Status: "Up 4 hours",
Ports: []types.Port{
types.Port{
PrivatePort: 7002,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 4002,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2381,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2382,
PublicPort: 2382,
Type: "tcp",
IP: "0.0.0.0",
},
},
Labels: map[string]string{
"label1": "test_value_1",
"label2": "test_value_2",
},
SizeRw: 0,
SizeRootFs: 0,
},
}
func containerStats() types.ContainerStats {
var stat types.ContainerStats
jsonStat := `
{
"blkio_stats": {
"io_service_bytes_recursive": [
{
"major": 252,
"minor": 1,
"op": "Read",
"value": 753664
},
{
"major": 252,
"minor": 1,
"op": "Write"
},
{
"major": 252,
"minor": 1,
"op": "Sync"
},
{
"major": 252,
"minor": 1,
"op": "Async",
"value": 753664
},
{
"major": 252,
"minor": 1,
"op": "Total",
"value": 753664
}
],
"io_serviced_recursive": [
{
"major": 252,
"minor": 1,
"op": "Read",
"value": 26
},
{
"major": 252,
"minor": 1,
"op": "Write"
},
{
"major": 252,
"minor": 1,
"op": "Sync"
},
{
"major": 252,
"minor": 1,
"op": "Async",
"value": 26
},
{
"major": 252,
"minor": 1,
"op": "Total",
"value": 26
}
]
},
"cpu_stats": {
"cpu_usage": {
"percpu_usage": [
17871,
4959158,
1646137,
1231652,
11829401,
244656,
369972,
0
],
"total_usage": 20298847,
"usage_in_usermode": 10000000
},
"system_cpu_usage": 24052607520000000,
"throttling_data": {}
},
"memory_stats": {
"limit": 18935443456,
"stats": {}
},
"precpu_stats": {
"cpu_usage": {
"percpu_usage": [
17871,
4959158,
1646137,
1231652,
11829401,
244656,
369972,
0
],
"total_usage": 20298847,
"usage_in_usermode": 10000000
},
"system_cpu_usage": 24052599550000000,
"throttling_data": {}
},
"read": "2016-02-24T11:42:27.472459608-05:00"
}`
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
return stat
}
func testStats() *types.StatsJSON {
stats := &types.StatsJSON{}
stats.Read = time.Now()
stats.Networks = make(map[string]types.NetworkStats)
stats.CPUStats.OnlineCPUs = 2
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002, 0, 0}
stats.CPUStats.CPUUsage.UsageInUsermode = 100
stats.CPUStats.CPUUsage.TotalUsage = 500
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
stats.CPUStats.SystemUsage = 100
stats.CPUStats.ThrottlingData.Periods = 1
stats.PreCPUStats.CPUUsage.TotalUsage = 400
stats.PreCPUStats.SystemUsage = 50
stats.MemoryStats.Stats = make(map[string]uint64)
stats.MemoryStats.Stats["active_anon"] = 0
stats.MemoryStats.Stats["active_file"] = 1
stats.MemoryStats.Stats["cache"] = 0
stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
stats.MemoryStats.Stats["inactive_anon"] = 0
stats.MemoryStats.Stats["inactive_file"] = 3
stats.MemoryStats.Stats["mapped_file"] = 0
stats.MemoryStats.Stats["pgfault"] = 2
stats.MemoryStats.Stats["pgmajfault"] = 0
stats.MemoryStats.Stats["pgpgin"] = 0
stats.MemoryStats.Stats["pgpgout"] = 0
stats.MemoryStats.Stats["rss"] = 0
stats.MemoryStats.Stats["rss_huge"] = 0
stats.MemoryStats.Stats["total_active_anon"] = 0
stats.MemoryStats.Stats["total_active_file"] = 0
stats.MemoryStats.Stats["total_cache"] = 0
stats.MemoryStats.Stats["total_inactive_anon"] = 0
stats.MemoryStats.Stats["total_inactive_file"] = 0
stats.MemoryStats.Stats["total_mapped_file"] = 0
stats.MemoryStats.Stats["total_pgfault"] = 0
stats.MemoryStats.Stats["total_pgmajfault"] = 0
stats.MemoryStats.Stats["total_pgpgin"] = 4
stats.MemoryStats.Stats["total_pgpgout"] = 0
stats.MemoryStats.Stats["total_rss"] = 44
stats.MemoryStats.Stats["total_rss_huge"] = 444
stats.MemoryStats.Stats["total_unevictable"] = 0
stats.MemoryStats.Stats["total_writeback"] = 55
stats.MemoryStats.Stats["unevictable"] = 0
stats.MemoryStats.Stats["writeback"] = 0
stats.MemoryStats.MaxUsage = 1001
stats.MemoryStats.Usage = 1111
stats.MemoryStats.Failcnt = 1
stats.MemoryStats.Limit = 2000
stats.Networks["eth0"] = types.NetworkStats{
RxDropped: 1,
RxBytes: 2,
RxErrors: 3,
TxPackets: 4,
TxDropped: 1,
RxPackets: 2,
TxErrors: 3,
TxBytes: 4,
}
stats.Networks["eth1"] = types.NetworkStats{
RxDropped: 5,
RxBytes: 6,
RxErrors: 7,
TxPackets: 8,
TxDropped: 5,
RxPackets: 6,
TxErrors: 7,
TxBytes: 8,
}
sbr := types.BlkioStatEntry{
Major: 6,
Minor: 0,
Op: "read",
Value: 100,
}
sr := types.BlkioStatEntry{
Major: 6,
Minor: 0,
Op: "write",
Value: 101,
}
sr2 := types.BlkioStatEntry{
Major: 6,
Minor: 1,
Op: "write",
Value: 201,
}
stats.BlkioStats.IoServiceBytesRecursive = append(
stats.BlkioStats.IoServiceBytesRecursive, sbr)
stats.BlkioStats.IoServicedRecursive = append(
stats.BlkioStats.IoServicedRecursive, sr)
stats.BlkioStats.IoServicedRecursive = append(
stats.BlkioStats.IoServicedRecursive, sr2)
return stats
}
func containerStatsWindows() types.ContainerStats {
var stat types.ContainerStats
jsonStat := `
{
"read":"2017-01-11T08:32:46.2413794Z",
"preread":"0001-01-01T00:00:00Z",
"num_procs":64,
"cpu_stats":{
"cpu_usage":{
"total_usage":536718750,
"usage_in_kernelmode":390468750,
"usage_in_usermode":390468750
},
"throttling_data":{
"periods":0,
"throttled_periods":0,
"throttled_time":0
}
},
"precpu_stats":{
"cpu_usage":{
"total_usage":0,
"usage_in_kernelmode":0,
"usage_in_usermode":0
},
"throttling_data":{
"periods":0,
"throttled_periods":0,
"throttled_time":0
}
},
"memory_stats":{
"commitbytes":77160448,
"commitpeakbytes":105000960,
"privateworkingset":59961344
},
"name":"/gt_test_iis",
}`
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
return stat
}
var containerInspect = types.ContainerJSON{
Config: &container.Config{
Env: []string{
"ENVVAR1=loremipsum",
"ENVVAR1FOO=loremipsum",
"ENVVAR2=dolorsitamet",
"ENVVAR3==ubuntu:10.04",
"ENVVAR4",
"ENVVAR5=",
"ENVVAR6= ",
"ENVVAR7=ENVVAR8=ENVVAR9",
"PATH=/bin:/sbin",
},
},
}

View File

@@ -1,151 +0,0 @@
package docker
import (
"context"
"io/ioutil"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/registry"
)
type FakeDockerClient struct {
}
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
env := types.Info{
Containers: 108,
ContainersRunning: 98,
ContainersStopped: 6,
ContainersPaused: 3,
OomKillDisable: false,
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
NEventsListener: 0,
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
Debug: false,
LoggingDriver: "json-file",
KernelVersion: "4.3.0-1-amd64",
IndexServerAddress: "https://index.docker.io/v1/",
MemTotal: 3840757760,
Images: 199,
CPUCfsQuota: true,
Name: "absol",
SwapLimit: false,
IPv4Forwarding: true,
ExperimentalBuild: false,
CPUCfsPeriod: true,
RegistryConfig: &registry.ServiceConfig{
IndexConfigs: map[string]*registry.IndexInfo{
"docker.io": {
Name: "docker.io",
Mirrors: []string{},
Official: true,
Secure: true,
},
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
OperatingSystem: "Linux Mint LMDE (containerized)",
BridgeNfIptables: true,
HTTPSProxy: "",
Labels: []string{},
MemoryLimit: false,
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
NFd: 19,
HTTPProxy: "",
Driver: "devicemapper",
NGoroutines: 39,
NCPU: 4,
DockerRootDir: "/var/lib/docker",
NoProxy: "",
BridgeNfIP6tables: true,
}
return env, nil
}
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
container1 := types.Container{
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
Names: []string{"/etcd"},
Image: "quay.io/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941930,
Status: "Up 4 hours",
Ports: []types.Port{
types.Port{
PrivatePort: 7001,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 4001,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2380,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2379,
PublicPort: 2379,
Type: "tcp",
IP: "0.0.0.0",
},
},
Labels: map[string]string{
"label1": "test_value_1",
"label2": "test_value_2",
},
SizeRw: 0,
SizeRootFs: 0,
}
container2 := types.Container{
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
Names: []string{"/etcd2"},
Image: "quay.io:4443/coreos/etcd:v2.2.2",
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
Created: 1455941933,
Status: "Up 4 hours",
Ports: []types.Port{
types.Port{
PrivatePort: 7002,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 4002,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2381,
PublicPort: 0,
Type: "tcp",
},
types.Port{
PrivatePort: 2382,
PublicPort: 2382,
Type: "tcp",
IP: "0.0.0.0",
},
},
Labels: map[string]string{
"label1": "test_value_1",
"label2": "test_value_2",
},
SizeRw: 0,
SizeRootFs: 0,
}
containers := []types.Container{container1, container2}
return containers, nil
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
}
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
var stat types.ContainerStats
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
return stat, nil
}

View File

@@ -0,0 +1,55 @@
// Helper functions copied from
// https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go
package docker
import "github.com/docker/docker/api/types"
func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
var (
cpuPercent = 0.0
// calculate the change for the cpu usage of the container in between readings
cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU)
// calculate the change for the entire system between readings
systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem)
onlineCPUs = float64(v.CPUStats.OnlineCPUs)
)
if onlineCPUs == 0.0 {
onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage))
}
if systemDelta > 0.0 && cpuDelta > 0.0 {
cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0
}
return cpuPercent
}
func calculateCPUPercentWindows(v *types.StatsJSON) float64 {
// Max number of 100ns intervals between the previous time read and now
possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals
possIntervals /= 100 // Convert to number of 100ns intervals
possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors
// Intervals used
intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage
// Percentage avoiding divide-by-zero
if possIntervals > 0 {
return float64(intervalsUsed) / float64(possIntervals) * 100.0
}
return 0.00
}
// calculateMemUsageUnixNoCache calculate memory usage of the container.
// Page cache is intentionally excluded to avoid misinterpretation of the output.
func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 {
return float64(mem.Usage - mem.Stats["cache"])
}
func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 {
// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
// got any data from cgroup
if limit != 0 {
return usedNoCache / limit * 100.0
}
return 0
}

View File

@@ -62,7 +62,7 @@ domains. You can read Dovecot's documentation
### Example Output:
```
telegraf -config t.cfg -input-filter dovecot -test
telegraf --config t.cfg --input-filter dovecot --test
* Plugin: dovecot, Collection 1
> dovecot,ip=192.168.0.1,server=dovecot-1.domain.test,type=ip clock_time=0,disk_input=0i,disk_output=0i,invol_cs=0i,last_update="2016-04-08 10:59:47.000208479 +0200 CEST",mail_cache_hits=0i,mail_lookup_attr=0i,mail_lookup_path=0i,mail_read_bytes=0i,mail_read_count=0i,maj_faults=0i,min_faults=0i,num_cmds=12i,num_connected_sessions=0i,num_logins=6i,read_bytes=0i,read_count=0i,reset_timestamp="2016-04-08 10:33:34 +0200 CEST",sys_cpu=0,user_cpu=0,vol_cs=0i,write_bytes=0i,write_count=0i 1460106251633824223
* Plugin: dovecot, Collection 1
@@ -71,4 +71,4 @@ telegraf -config t.cfg -input-filter dovecot -test
> dovecot,domain=domain.test,server=dovecot-1.domain.test,type=domain clock_time=100896189179847.7,disk_input=6467588263936i,disk_output=17933680439296i,invol_cs=1194808498i,last_update="2016-04-08 11:04:08.000377367 +0200 CEST",mail_cache_hits=46455781i,mail_lookup_attr=0i,mail_lookup_path=571490i,mail_read_bytes=79287033067i,mail_read_count=491243i,maj_faults=16992i,min_faults=1278442541i,num_cmds=606005i,num_connected_sessions=6597i,num_logins=166381i,read_bytes=30231409780721i,read_count=1624912080i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=156440.372,user_cpu=216676.476,vol_cs=2749291157i,write_bytes=17097106707594i,write_count=944448998i 1460106261639672622
* Plugin: dovecot, Collection 1
> dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907
```
```

View File

@@ -0,0 +1,56 @@
# Fail2ban Input Plugin
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
This plugin runs the `fail2ban-client` command which generally requires root access.
Acquiring the required permissions can be done using several methods:
- Use sudo run fail2ban-client.
- Run telegraf as root. (not recommended)
### Using sudo
You may edit your sudo configuration with the following:
``` sudo
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
```
### Configuration:
``` toml
# Read metrics from fail2ban.
[[inputs.fail2ban]]
## Use sudo to run fail2ban-client
use_sudo = false
```
### Measurements & Fields:
- fail2ban
- failed (integer, count)
- banned (integer, count)
### Tags:
- All measurements have the following tags:
- jail
### Example Output:
```
# fail2ban-client status sshd
Status for the jail: sshd
|- Filter
| |- Currently failed: 5
| |- Total failed: 20
| `- File list: /var/log/secure
`- Actions
|- Currently banned: 2
|- Total banned: 10
`- Banned IP list: 192.168.0.1 192.168.0.2
```
```
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
```

View File

@@ -0,0 +1,131 @@
package fail2ban
import (
"errors"
"fmt"
"os/exec"
"strings"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
var (
execCommand = exec.Command // execCommand is used to mock commands in tests.
)
type Fail2ban struct {
path string
UseSudo bool
}
var sampleConfig = `
## Use sudo to run fail2ban-client
use_sudo = false
`
var metricsTargets = []struct {
target string
field string
}{
{
target: "Currently failed:",
field: "failed",
},
{
target: "Currently banned:",
field: "banned",
},
}
func (f *Fail2ban) Description() string {
return "Read metrics from fail2ban."
}
func (f *Fail2ban) SampleConfig() string {
return sampleConfig
}
func (f *Fail2ban) Gather(acc telegraf.Accumulator) error {
if len(f.path) == 0 {
return errors.New("fail2ban-client not found: verify that fail2ban is installed and that fail2ban-client is in your PATH")
}
name := f.path
var arg []string
if f.UseSudo {
name = "sudo"
arg = append(arg, f.path)
}
args := append(arg, "status")
cmd := execCommand(name, args...)
out, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
}
lines := strings.Split(string(out), "\n")
const targetString = "Jail list:"
var jails []string
for _, line := range lines {
idx := strings.LastIndex(line, targetString)
if idx < 0 {
// not target line, skip.
continue
}
jails = strings.Split(strings.TrimSpace(line[idx+len(targetString):]), ", ")
break
}
for _, jail := range jails {
fields := make(map[string]interface{})
args := append(arg, "status", jail)
cmd := execCommand(name, args...)
out, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
}
lines := strings.Split(string(out), "\n")
for _, line := range lines {
key, value := extractCount(line)
if key != "" {
fields[key] = value
}
}
acc.AddFields("fail2ban", fields, map[string]string{"jail": jail})
}
return nil
}
func extractCount(line string) (string, int) {
for _, metricsTarget := range metricsTargets {
idx := strings.LastIndex(line, metricsTarget.target)
if idx < 0 {
continue
}
ban := strings.TrimSpace(line[idx+len(metricsTarget.target):])
banCount, err := strconv.Atoi(ban)
if err != nil {
return "", -1
}
return metricsTarget.field, banCount
}
return "", -1
}
func init() {
f := Fail2ban{}
path, _ := exec.LookPath("fail2ban-client")
if len(path) > 0 {
f.path = path
}
inputs.Add("fail2ban", func() telegraf.Input {
f := f
return &f
})
}

View File

@@ -0,0 +1,125 @@
package fail2ban
import (
"fmt"
"os"
"os/exec"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
)
// By all rights, we should use `string literal`, but the string contains "`".
var execStatusOutput = "Status\n" +
"|- Number of jail:\t3\n" +
"`- Jail list:\tdovecot, postfix, sshd"
var execStatusDovecotOutput = "Status for the jail: dovecot\n" +
"|- Filter\n" +
"| |- Currently failed:\t11\n" +
"| |- Total failed:\t22\n" +
"| `- File list:\t/var/log/maillog\n" +
"`- Actions\n" +
" |- Currently banned:\t0\n" +
" |- Total banned:\t100\n" +
" `- Banned IP list:"
var execStatusPostfixOutput = "Status for the jail: postfix\n" +
"|- Filter\n" +
"| |- Currently failed:\t4\n" +
"| |- Total failed:\t10\n" +
"| `- File list:\t/var/log/maillog\n" +
"`- Actions\n" +
" |- Currently banned:\t3\n" +
" |- Total banned:\t60\n" +
" `- Banned IP list:\t192.168.10.1 192.168.10.3"
var execStatusSshdOutput = "Status for the jail: sshd\n" +
"|- Filter\n" +
"| |- Currently failed:\t0\n" +
"| |- Total failed:\t5\n" +
"| `- File list:\t/var/log/secure\n" +
"`- Actions\n" +
" |- Currently banned:\t2\n" +
" |- Total banned:\t50\n" +
" `- Banned IP list:\t192.168.0.1 192.168.1.1"
func TestGather(t *testing.T) {
f := Fail2ban{
path: "/usr/bin/fail2ban-client",
}
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := f.Gather(&acc)
if err != nil {
t.Fatal(err)
}
fields1 := map[string]interface{}{
"banned": 2,
"failed": 0,
}
tags1 := map[string]string{
"jail": "sshd",
}
fields2 := map[string]interface{}{
"banned": 3,
"failed": 4,
}
tags2 := map[string]string{
"jail": "postfix",
}
fields3 := map[string]interface{}{
"banned": 0,
"failed": 11,
}
tags3 := map[string]string{
"jail": "dovecot",
}
acc.AssertContainsTaggedFields(t, "fail2ban", fields1, tags1)
acc.AssertContainsTaggedFields(t, "fail2ban", fields2, tags2)
acc.AssertContainsTaggedFields(t, "fail2ban", fields3, tags3)
}
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
args := os.Args
cmd, args := args[3], args[4:]
if !strings.HasSuffix(cmd, "fail2ban-client") {
fmt.Fprint(os.Stdout, "command not found")
os.Exit(1)
}
if len(args) == 1 && args[0] == "status" {
fmt.Fprint(os.Stdout, execStatusOutput)
os.Exit(0)
} else if len(args) == 2 && args[0] == "status" {
if args[1] == "sshd" {
fmt.Fprint(os.Stdout, execStatusSshdOutput)
os.Exit(0)
} else if args[1] == "postfix" {
fmt.Fprint(os.Stdout, execStatusPostfixOutput)
os.Exit(0)
} else if args[1] == "dovecot" {
fmt.Fprint(os.Stdout, execStatusDovecotOutput)
os.Exit(0)
}
}
fmt.Fprint(os.Stdout, "invalid argument")
os.Exit(1)
}

View File

@@ -30,7 +30,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats.
### Example Output:
```
$ telegraf -config /etc/telegraf/telegraf.conf -input-filter filestat -test
$ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test
* Plugin: filestat, Collection 1
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1461203374493128216
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i 1461203374493199335

View File

@@ -0,0 +1,64 @@
# Fluentd Input Plugin
The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](http://docs.fluentd.org/v0.12/articles/monitoring).
This plugin understands data provided by /api/plugin.json resource (/api/config.json is not covered).
You might need to adjust your fluentd configuration, in order to reduce series cardinality in case whene your fluentd restarts frequently. Every time when fluentd starts, `plugin_id` value is given a new random value.
According to [fluentd documentation](http://docs.fluentd.org/v0.12/articles/config-file), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`.
example configuratio with `@id` parameter for http plugin:
```
<source>
@type http
@id http
port 8888
</source>
```
### Configuration:
```toml
# Read metrics exposed by fluentd in_monitor plugin
[[inputs.fluentd]]
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
##
## Endpoint:
## - only one URI is allowed
## - https is not supported
endpoint = "http://localhost:24220/api/plugins.json"
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
exclude = [
"monitor_agent",
"dummy",
]
```
### Measurements & Fields:
Fields may vary depends on type of the plugin
- fluentd
- retry_count (float, unit)
- buffer_queue_length (float, unit)
- buffer_total_queued_size (float, unit)
### Tags:
- All measurements have the following tags:
- plugin_id (unique plugin id)
- plugin_type (type of the plugin e.g. s3)
- plugin_category (plugin category e.g. output)
### Example Output:
```
$ telegraf --config fluentd.conf --input-filter fluentd --test
* Plugin: inputs.fluentd, Collection 1
> fluentd,host=T440s,plugin_id=object:9f748c,plugin_category=input,plugin_type=dummy buffer_total_queued_size=0,buffer_queue_length=0,retry_count=0 1492006105000000000
> fluentd,plugin_category=input,plugin_type=dummy,host=T440s,plugin_id=object:8da98c buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000
> fluentd,plugin_id=object:820190,plugin_category=input,plugin_type=monitor_agent,host=T440s retry_count=0,buffer_total_queued_size=0,buffer_queue_length=0 1492006105000000000
> fluentd,plugin_id=object:c5e054,plugin_category=output,plugin_type=stdout,host=T440s buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000
> fluentd,plugin_type=s3,host=T440s,plugin_id=object:bd7a90,plugin_category=output buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000
```

View File

@@ -0,0 +1,173 @@
package fluentd
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
const (
measurement = "fluentd"
description = "Read metrics exposed by fluentd in_monitor plugin"
sampleConfig = `
## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
##
## Endpoint:
## - only one URI is allowed
## - https is not supported
endpoint = "http://localhost:24220/api/plugins.json"
## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
exclude = [
"monitor_agent",
"dummy",
]
`
)
// Fluentd - plugin main structure
type Fluentd struct {
Endpoint string
Exclude []string
client *http.Client
}
type endpointInfo struct {
Payload []pluginData `json:"plugins"`
}
type pluginData struct {
PluginID string `json:"plugin_id"`
PluginType string `json:"type"`
PluginCategory string `json:"plugin_category"`
RetryCount *float64 `json:"retry_count"`
BufferQueueLength *float64 `json:"buffer_queue_length"`
BufferTotalQueuedSize *float64 `json:"buffer_total_queued_size"`
}
// parse JSON from fluentd Endpoint
// Parameters:
// data: unprocessed json recivied from endpoint
//
// Returns:
// pluginData: slice that contains parsed plugins
// error: error that may have occurred
func parse(data []byte) (datapointArray []pluginData, err error) {
var endpointData endpointInfo
if err = json.Unmarshal(data, &endpointData); err != nil {
err = fmt.Errorf("Processing JSON structure")
return
}
for _, point := range endpointData.Payload {
datapointArray = append(datapointArray, point)
}
return
}
// Description - display description
func (h *Fluentd) Description() string { return description }
// SampleConfig - generate configuretion
func (h *Fluentd) SampleConfig() string { return sampleConfig }
// Gather - Main code responsible for gathering, processing and creating metrics
func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
_, err := url.Parse(h.Endpoint)
if err != nil {
return fmt.Errorf("Invalid URL \"%s\"", h.Endpoint)
}
if h.client == nil {
tr := &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
}
client := &http.Client{
Transport: tr,
Timeout: time.Duration(4 * time.Second),
}
h.client = client
}
resp, err := h.client.Get(h.Endpoint)
if err != nil {
return fmt.Errorf("Unable to perform HTTP client GET on \"%s\": %s", h.Endpoint, err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("Unable to read the HTTP body \"%s\": %s", string(body), err)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("http status ok not met")
}
dataPoints, err := parse(body)
if err != nil {
return fmt.Errorf("Problem with parsing")
}
// Go through all plugins one by one
for _, p := range dataPoints {
skip := false
// Check if this specific type was excluded in configuration
for _, exclude := range h.Exclude {
if exclude == p.PluginType {
skip = true
}
}
// If not, create new metric and add it to Accumulator
if !skip {
tmpFields := make(map[string]interface{})
tmpTags := map[string]string{
"plugin_id": p.PluginID,
"plugin_category": p.PluginCategory,
"plugin_type": p.PluginType,
}
if p.BufferQueueLength != nil {
tmpFields["buffer_queue_length"] = *p.BufferQueueLength
}
if p.RetryCount != nil {
tmpFields["retry_count"] = *p.RetryCount
}
if p.BufferTotalQueuedSize != nil {
tmpFields["buffer_total_queued_size"] = *p.BufferTotalQueuedSize
}
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {
acc.AddFields(measurement, tmpFields, tmpTags)
}
}
}
return nil
}
func init() {
inputs.Add("fluentd", func() telegraf.Input { return &Fluentd{} })
}

View File

@@ -0,0 +1,163 @@
package fluentd
import (
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
// sampleJSON from fluentd version '0.14.9'
const sampleJSON = `
{
"plugins": [
{
"plugin_id": "object:f48698",
"plugin_category": "input",
"type": "dummy",
"config": {
"@type": "dummy",
"@log_level": "info",
"tag": "stdout.page.node",
"rate": "",
"dummy": "{\"hello\":\"world_from_first_dummy\"}",
"auto_increment_key": "id1"
},
"output_plugin": false,
"retry_count": null
},
{
"plugin_id": "object:e27138",
"plugin_category": "input",
"type": "dummy",
"config": {
"@type": "dummy",
"@log_level": "info",
"tag": "stdout.superproject.supercontainer",
"rate": "",
"dummy": "{\"hello\":\"world_from_second_dummy\"}",
"auto_increment_key": "id1"
},
"output_plugin": false,
"retry_count": null
},
{
"plugin_id": "object:d74060",
"plugin_category": "input",
"type": "monitor_agent",
"config": {
"@type": "monitor_agent",
"@log_level": "error",
"bind": "0.0.0.0",
"port": "24220"
},
"output_plugin": false,
"retry_count": null
},
{
"plugin_id": "object:11a5e2c",
"plugin_category": "output",
"type": "stdout",
"config": {
"@type": "stdout"
},
"output_plugin": true,
"retry_count": 0
},
{
"plugin_id": "object:11237ec",
"plugin_category": "output",
"type": "s3",
"config": {
"@type": "s3",
"@log_level": "info",
"aws_key_id": "xxxxxx",
"aws_sec_key": "xxxxxx",
"s3_bucket": "bucket",
"s3_endpoint": "http://mock:4567",
"path": "logs/%Y%m%d_%H/${tag[1]}/",
"time_slice_format": "%M",
"s3_object_key_format": "%{path}%{time_slice}_%{hostname}_%{index}_%{hex_random}.%{file_extension}",
"store_as": "gzip"
},
"output_plugin": true,
"buffer_queue_length": 0,
"buffer_total_queued_size": 0,
"retry_count": 0
}
]
}
`
var (
zero float64
err error
pluginOutput []pluginData
expectedOutput = []pluginData{
// {"object:f48698", "dummy", "input", nil, nil, nil},
// {"object:e27138", "dummy", "input", nil, nil, nil},
// {"object:d74060", "monitor_agent", "input", nil, nil, nil},
{"object:11a5e2c", "stdout", "output", (*float64)(&zero), nil, nil},
{"object:11237ec", "s3", "output", (*float64)(&zero), (*float64)(&zero), (*float64)(&zero)},
}
fluentdTest = &Fluentd{
Endpoint: "http://localhost:8081",
}
)
func Test_parse(t *testing.T) {
t.Log("Testing parser function")
_, err := parse([]byte(sampleJSON))
if err != nil {
t.Error(err)
}
}
func Test_Gather(t *testing.T) {
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "%s", string(sampleJSON))
}))
requestURL, err := url.Parse(fluentdTest.Endpoint)
ts.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port()))
ts.Start()
defer ts.Close()
var acc testutil.Accumulator
err = fluentdTest.Gather(&acc)
if err != nil {
t.Error(err)
}
if !acc.HasMeasurement("fluentd") {
t.Errorf("acc.HasMeasurement: expected fluentd")
}
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
}

View File

@@ -1,5 +1,3 @@
// +build linux
package hddtemp
import (

View File

@@ -1,3 +0,0 @@
// +build !linux
package hddtemp

View File

@@ -8,25 +8,26 @@ This input plugin will test HTTP/HTTPS connections.
# HTTP/HTTPS request given an address a method and a timeout
[[inputs.http_response]]
## Server address (default http://localhost)
address = "http://github.com"
# address = "http://localhost"
## Set response_timeout (default 5 seconds)
response_timeout = "5s"
# response_timeout = "5s"
## HTTP Request Method
method = "GET"
# method = "GET"
## Whether to follow redirects from the server (defaults to false)
follow_redirects = true
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
# follow_redirects = false
## Optional HTTP Request Body
# body = '''
# {'fake':'data'}
# '''
## Optional substring or regex match in body of the response
## response_string_match = "\"service_status\": \"up\""
## response_string_match = "ok"
## response_string_match = "\".*_status\".?:.?\"up\""
# response_string_match = "\"service_status\": \"up\""
# response_string_match = "ok"
# response_string_match = "\".*_status\".?:.?\"up\""
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
@@ -34,6 +35,10 @@ This input plugin will test HTTP/HTTPS connections.
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
```
### Measurements & Fields:
@@ -41,6 +46,7 @@ This input plugin will test HTTP/HTTPS connections.
- http_response
- response_time (float, seconds)
- http_response_code (int) #The code received
- result_type (string) # success, timeout, response_string_mismatch, connection_failed
### Tags:
@@ -51,6 +57,5 @@ This input plugin will test HTTP/HTTPS connections.
### Example Output:
```
$ ./telegraf -config telegraf.conf -input-filter http_response -test
http_response,method=GET,server=http://www.github.com http_response_code=200i,response_time=6.223266528 1459419354977857955
```

View File

@@ -5,6 +5,7 @@ import (
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"regexp"
@@ -46,25 +47,26 @@ func (h *HTTPResponse) Description() string {
var sampleConfig = `
## Server address (default http://localhost)
address = "http://github.com"
# address = "http://localhost"
## Set response_timeout (default 5 seconds)
response_timeout = "5s"
# response_timeout = "5s"
## HTTP Request Method
method = "GET"
# method = "GET"
## Whether to follow redirects from the server (defaults to false)
follow_redirects = true
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
# follow_redirects = false
## Optional HTTP Request Body
# body = '''
# {'fake':'data'}
# '''
## Optional substring or regex match in body of the response
## response_string_match = "\"service_status\": \"up\""
## response_string_match = "ok"
## response_string_match = "\".*_status\".?:.?\"up\""
# response_string_match = "\"service_status\": \"up\""
# response_string_match = "ok"
# response_string_match = "\".*_status\".?:.?\"up\""
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
@@ -72,6 +74,10 @@ var sampleConfig = `
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Request Headers (all values must be strings)
# [inputs.http_response.headers]
# Host = "github.com"
`
// SampleConfig returns the plugin SampleConfig
@@ -92,6 +98,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) {
}
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
TLSClientConfig: tlsCfg,
},
@@ -130,15 +137,21 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
// Start Timer
start := time.Now()
resp, err := h.client.Do(request)
if err != nil {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
fields["result_type"] = "timeout"
return fields, nil
}
fields["result_type"] = "connection_failed"
if h.FollowRedirects {
return nil, err
return fields, nil
}
if urlError, ok := err.(*url.Error); ok &&
urlError.Err == ErrRedirectAttempted {
err = nil
} else {
return nil, err
return fields, nil
}
}
defer func() {
@@ -157,7 +170,7 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
h.compiledStringMatch = regexp.MustCompile(h.ResponseStringMatch)
if err != nil {
log.Printf("E! Failed to compile regular expression %s : %s", h.ResponseStringMatch, err)
fields["response_string_match"] = 0
fields["result_type"] = "response_string_mismatch"
return fields, nil
}
}
@@ -165,16 +178,20 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, error) {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("E! Failed to read body of HTTP Response : %s", err)
fields["result_type"] = "response_string_mismatch"
fields["response_string_match"] = 0
return fields, nil
}
if h.compiledStringMatch.Match(bodyBytes) {
fields["result_type"] = "success"
fields["response_string_match"] = 1
} else {
fields["result_type"] = "response_string_mismatch"
fields["response_string_match"] = 0
}
} else {
fields["result_type"] = "success"
}
return fields, nil

View File

@@ -106,6 +106,9 @@ func TestFields(t *testing.T) {
value, ok := acc.IntField("http_response", "http_response_code")
require.True(t, ok)
require.Equal(t, http.StatusOK, value)
response_value, ok := acc.StringField("http_response", "result_type")
require.True(t, ok)
require.Equal(t, "success", response_value)
}
func TestRedirects(t *testing.T) {
@@ -143,10 +146,13 @@ func TestRedirects(t *testing.T) {
}
acc = testutil.Accumulator{}
err = h.Gather(&acc)
require.Error(t, err)
require.NoError(t, err)
value, ok = acc.IntField("http_response", "http_response_code")
require.False(t, ok)
response_value, ok := acc.StringField("http_response", "result_type")
require.True(t, ok)
require.Equal(t, "connection_failed", response_value)
}
func TestMethod(t *testing.T) {
@@ -277,6 +283,9 @@ func TestStringMatch(t *testing.T) {
value, ok = acc.IntField("http_response", "response_string_match")
require.True(t, ok)
require.Equal(t, 1, value)
response_value, ok := acc.StringField("http_response", "result_type")
require.True(t, ok)
require.Equal(t, "success", response_value)
_, ok = acc.FloatField("http_response", "response_time")
require.True(t, ok)
}
@@ -307,6 +316,9 @@ func TestStringMatchJson(t *testing.T) {
value, ok = acc.IntField("http_response", "response_string_match")
require.True(t, ok)
require.Equal(t, 1, value)
response_value, ok := acc.StringField("http_response", "result_type")
require.True(t, ok)
require.Equal(t, "success", response_value)
_, ok = acc.FloatField("http_response", "response_time")
require.True(t, ok)
}
@@ -338,6 +350,9 @@ func TestStringMatchFail(t *testing.T) {
value, ok = acc.IntField("http_response", "response_string_match")
require.True(t, ok)
require.Equal(t, 0, value)
response_value, ok := acc.StringField("http_response", "result_type")
require.True(t, ok)
require.Equal(t, "response_string_mismatch", response_value)
_, ok = acc.FloatField("http_response", "response_time")
require.True(t, ok)
}
@@ -355,7 +370,7 @@ func TestTimeout(t *testing.T) {
Address: ts.URL + "/twosecondnap",
Body: "{ 'test': 'data'}",
Method: "GET",
ResponseTimeout: internal.Duration{Duration: time.Millisecond},
ResponseTimeout: internal.Duration{Duration: time.Second},
Headers: map[string]string{
"Content-Type": "application/json",
},
@@ -365,6 +380,11 @@ func TestTimeout(t *testing.T) {
err := h.Gather(&acc)
require.NoError(t, err)
ok := acc.HasIntField("http_response", "http_response_code")
_, ok := acc.IntField("http_response", "http_response_code")
require.False(t, ok)
response_value, ok := acc.StringField("http_response", "result_type")
require.True(t, ok)
require.Equal(t, "timeout", response_value)
_, ok = acc.FloatField("http_response", "response_time")
require.False(t, ok)
}

View File

@@ -1,6 +1,7 @@
package httpjson
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
@@ -15,6 +16,10 @@ import (
"github.com/influxdata/telegraf/plugins/parsers"
)
var (
utf8BOM = []byte("\xef\xbb\xbf")
)
// HttpJson struct
type HttpJson struct {
Name string
@@ -170,7 +175,6 @@ func (h *HttpJson) gatherServer(
serverURL string,
) error {
resp, responseTime, err := h.sendRequest(serverURL)
if err != nil {
return err
}
@@ -266,6 +270,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
if err != nil {
return string(body), responseTime, err
}
body = bytes.TrimPrefix(body, utf8BOM)
// Process response
if resp.StatusCode != http.StatusOK {

View File

@@ -477,15 +477,13 @@ func TestHttpJsonBadJson(t *testing.T) {
assert.Equal(t, 0, acc.NFields())
}
// Test response to empty string as response objectgT
// Test response to empty string as response object
func TestHttpJsonEmptyResponse(t *testing.T) {
httpjson := genMockHttpJson(empty, 200)
var acc testutil.Accumulator
err := acc.GatherError(httpjson[0].Gather)
assert.Error(t, err)
assert.Equal(t, 0, acc.NFields())
assert.NoError(t, err)
}
// Test that the proper values are ignored or collected
@@ -560,3 +558,18 @@ func TestHttpJsonArray200Tags(t *testing.T) {
}
}
}
var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
// TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed
func TestHttpJsonBOM(t *testing.T) {
httpjson := genMockHttpJson(string(jsonBOM), 200)
for _, service := range httpjson {
if service.Name == "other_webapp" {
var acc testutil.Accumulator
err := acc.GatherError(service.Gather)
require.NoError(t, err)
}
}
}

View File

@@ -19,6 +19,16 @@ InfluxDB-formatted endpoints. See below for more information.
urls = [
"http://localhost:8086/debug/vars"
]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## http request & header timeout
timeout = "5s"
```
### Measurements & Fields
@@ -64,7 +74,7 @@ InfluxDB-formatted endpoints. See below for more information.
### Example Output:
```
telegraf -config ~/ws/telegraf.conf -input-filter influxdb -test
telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test
* Plugin: influxdb, Collection 1
> influxdb_database,database=_internal,host=tyrion,url=http://localhost:8086/debug/vars numMeasurements=10,numSeries=29 1463590500247354636
> influxdb_httpd,bind=:8086,host=tyrion,url=http://localhost:8086/debug/vars req=7,reqActive=1,reqDurationNs=14227734 1463590500247354636

View File

@@ -15,6 +15,14 @@ import (
type InfluxDB struct {
URLs []string `toml:"urls"`
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
Timeout internal.Duration
@@ -37,6 +45,13 @@ func (*InfluxDB) SampleConfig() string {
"http://localhost:8086/debug/vars"
]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## http request & header timeout
timeout = "5s"
`
@@ -48,9 +63,15 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
}
if i.client == nil {
tlsCfg, err := internal.GetTLSConfig(
i.SSLCert, i.SSLKey, i.SSLCA, i.InsecureSkipVerify)
if err != nil {
return err
}
i.client = &http.Client{
Transport: &http.Transport{
ResponseHeaderTimeout: i.Timeout.Duration,
TLSClientConfig: tlsCfg,
},
Timeout: i.Timeout.Duration,
}

View File

@@ -26,7 +26,7 @@ There are two measurements reported by this plugin.
### Example Output
```
./telegraf -config ~/interrupts_config.conf -test
./telegraf --config ~/interrupts_config.conf --test
* Plugin: inputs.interrupts, Collection 1
> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname CPU0=23i,total=23i 1489346531000000000
> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042 CPU0=9i,total=9i 1489346531000000000

View File

@@ -43,6 +43,13 @@ The `server` tag will be made available when retrieving stats from remote server
## if no servers are specified, local machine sensor stats will be queried
##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
## gaps or overlap in pulled data
interval = "30s"
## Timeout for the ipmitool command to complete. Default is 20 seconds.
timeout = "20s"
```
## Output

View File

@@ -19,6 +19,7 @@ var (
type Ipmi struct {
Path string
Servers []string
Timeout internal.Duration
}
var sampleConfig = `
@@ -33,6 +34,13 @@ var sampleConfig = `
## if no servers are specified, local machine sensor stats will be queried
##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid
## gaps or overlap in pulled data
interval = "30s"
## Timeout for the ipmitool command to complete
timeout = "20s"
`
func (m *Ipmi) SampleConfig() string {
@@ -78,7 +86,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error {
opts = append(opts, "sdr")
cmd := execCommand(m.Path, opts...)
out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration)
if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
}
@@ -152,6 +160,7 @@ func init() {
if len(path) > 0 {
m.Path = path
}
m.Timeout = internal.Duration{Duration: time.Second * 20}
inputs.Add("ipmi_sensor", func() telegraf.Input {
m := m
return &m

View File

@@ -5,7 +5,9 @@ import (
"os"
"os/exec"
"testing"
"time"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -15,6 +17,7 @@ func TestGather(t *testing.T) {
i := &Ipmi{
Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"},
Path: "ipmitool",
Timeout: internal.Duration{Duration: time.Second * 5},
}
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
@@ -118,7 +121,8 @@ func TestGather(t *testing.T) {
}
i = &Ipmi{
Path: "ipmitool",
Path: "ipmitool",
Timeout: internal.Duration{Duration: time.Second * 5},
}
err = acc.GatherError(i.Gather)

View File

@@ -78,7 +78,7 @@ pkts bytes target prot opt in out source destination
```
```
$ ./telegraf -config telegraf.conf -input-filter iptables -test
$ ./telegraf --config telegraf.conf --input-filter iptables --test
iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455
iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455
```

View File

@@ -95,7 +95,7 @@ const measurement = "iptables"
var errParse = errors.New("Cannot parse iptables list information")
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
var valuesRe = regexp.MustCompile(`^\s*([0-9]+)\s+([0-9]+)\s+.*?(/\*\s(.*)\s\*/)?$`)
var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
lines := strings.Split(data, "\n")
@@ -110,17 +110,27 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
return errParse
}
for _, line := range lines[2:] {
mv := valuesRe.FindAllStringSubmatch(line, -1)
// best effort : if line does not match or rule is not commented forget about it
if len(mv) == 0 || len(mv[0]) != 5 || mv[0][4] == "" {
matches := valuesRe.FindStringSubmatch(line)
if len(matches) != 4 {
continue
}
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": mv[0][4]}
pkts := matches[1]
bytes := matches[2]
comment := matches[3]
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
fields := make(map[string]interface{})
// since parse error is already catched by the regexp,
// we never enter ther error case here => no error check (but still need a test to cover the case)
fields["pkts"], _ = strconv.ParseUint(mv[0][1], 10, 64)
fields["bytes"], _ = strconv.ParseUint(mv[0][2], 10, 64)
var err error
fields["pkts"], err = strconv.ParseUint(pkts, 10, 64)
if err != nil {
continue
}
fields["bytes"], err = strconv.ParseUint(bytes, 10, 64)
if err != nil {
continue
}
acc.AddFields(measurement, fields, tags)
}
return nil

View File

@@ -124,68 +124,115 @@ func TestIptables_Gather(t *testing.T) {
{map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}},
},
},
{ // 10 - allow trailing text
table: "mangle",
chains: []string{"SHAPER"},
values: []string{
`Chain SHAPER (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
0 0 ACCEPT all -- * * 1.3.5.7 0.0.0.0/0 /* test */
0 0 CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4
`},
tags: []map[string]string{
map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test"},
map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test2"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}},
{map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}},
},
},
{ // 11 - invalid pkts/bytes
table: "mangle",
chains: []string{"SHAPER"},
values: []string{
`Chain SHAPER (policy ACCEPT 58 packets, 5096 bytes)
pkts bytes target prot opt in out source destination
a a ACCEPT all -- * * 1.3.5.7 0.0.0.0/0 /* test */
a a CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4
`},
tags: []map[string]string{},
fields: [][]map[string]interface{}{},
},
{ // 11 - all target and ports
table: "all_recv",
chains: []string{"accountfwd"},
values: []string{
`Chain accountfwd (1 references)
pkts bytes target prot opt in out source destination
123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */
`},
tags: []map[string]string{
map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}},
},
},
}
for i, tt := range tests {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
t.Run(tt.table, func(t *testing.T) {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
continue
}
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
continue
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
}
return
}
continue
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
return
}
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
}
return
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
}
}
})
}
}

View File

@@ -6,6 +6,9 @@ line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/con
is used to talk to the Kafka cluster so multiple instances of telegraf can read
from the same topic in parallel.
For old kafka version (< 0.8), please use the kafka_consumer_legacy input plugin
and use the old zookeeper connection method.
## Configuration
```toml
@@ -13,17 +16,24 @@ from the same topic in parallel.
[[inputs.kafka_consumer]]
## topic(s) to consume
topics = ["telegraf"]
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
brokers = ["localhost:9092"]
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Maximum number of metrics to buffer between collection intervals
metric_buffer = 100000
## Offset (must be either "oldest" or "newest")
offset = "oldest"
## Data format to consume.
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Optional SASL Config
# sasl_username = "kafka"
# sasl_password = "secret"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@@ -7,20 +7,35 @@ import (
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/Shopify/sarama"
"github.com/wvanbergen/kafka/consumergroup"
cluster "github.com/bsm/sarama-cluster"
)
type Kafka struct {
ConsumerGroup string
Topics []string
MaxMessageLen int
ZookeeperPeers []string
ZookeeperChroot string
Consumer *consumergroup.ConsumerGroup
ConsumerGroup string
Topics []string
Brokers []string
MaxMessageLen int
Cluster *cluster.Consumer
// Verify Kafka SSL Certificate
InsecureSkipVerify bool
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// SASL Username
SASLUsername string `toml:"sasl_username"`
// SASL Password
SASLPassword string `toml:"sasl_password"`
// Legacy metric buffer support
MetricBuffer int
@@ -47,12 +62,22 @@ type Kafka struct {
}
var sampleConfig = `
## kafka servers
brokers = ["localhost:9092"]
## topic(s) to consume
topics = ["telegraf"]
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
## Zookeeper Chroot
zookeeper_chroot = ""
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Optional SASL Config
# sasl_username = "kafka"
# sasl_password = "secret"
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
@@ -84,45 +109,67 @@ func (k *Kafka) SetParser(parser parsers.Parser) {
func (k *Kafka) Start(acc telegraf.Accumulator) error {
k.Lock()
defer k.Unlock()
var consumerErr error
var clusterErr error
k.acc = acc
config := consumergroup.NewConfig()
config.Zookeeper.Chroot = k.ZookeeperChroot
config := cluster.NewConfig()
config.Consumer.Return.Errors = true
tlsConfig, err := internal.GetTLSConfig(
k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
if err != nil {
return err
}
if tlsConfig != nil {
log.Printf("D! TLS Enabled")
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Enable = true
}
if k.SASLUsername != "" && k.SASLPassword != "" {
log.Printf("D! Using SASL auth with username '%s',",
k.SASLUsername)
config.Net.SASL.User = k.SASLUsername
config.Net.SASL.Password = k.SASLPassword
config.Net.SASL.Enable = true
}
switch strings.ToLower(k.Offset) {
case "oldest", "":
config.Offsets.Initial = sarama.OffsetOldest
config.Consumer.Offsets.Initial = sarama.OffsetOldest
case "newest":
config.Offsets.Initial = sarama.OffsetNewest
config.Consumer.Offsets.Initial = sarama.OffsetNewest
default:
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
k.Offset)
config.Offsets.Initial = sarama.OffsetOldest
config.Consumer.Offsets.Initial = sarama.OffsetOldest
}
if k.Consumer == nil || k.Consumer.Closed() {
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
if k.Cluster == nil {
k.Cluster, clusterErr = cluster.NewConsumer(
k.Brokers,
k.ConsumerGroup,
k.Topics,
k.ZookeeperPeers,
config,
)
if consumerErr != nil {
return consumerErr
if clusterErr != nil {
log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v\n",
k.Brokers, k.Topics)
return clusterErr
}
// Setup message and error channels
k.in = k.Consumer.Messages()
k.errs = k.Consumer.Errors()
k.in = k.Cluster.Messages()
k.errs = k.Cluster.Errors()
}
k.done = make(chan struct{})
// Start the kafka message reader
go k.receiver()
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
k.ZookeeperPeers, k.Topics)
log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v\n",
k.Brokers, k.Topics)
return nil
}
@@ -156,7 +203,7 @@ func (k *Kafka) receiver() {
// TODO(cam) this locking can be removed if this PR gets merged:
// https://github.com/wvanbergen/kafka/pull/84
k.Lock()
k.Consumer.CommitUpto(msg)
k.Cluster.MarkOffset(msg, "")
k.Unlock()
}
}
@@ -167,7 +214,7 @@ func (k *Kafka) Stop() {
k.Lock()
defer k.Unlock()
close(k.done)
if err := k.Consumer.Close(); err != nil {
if err := k.Cluster.Close(); err != nil {
k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error()))
}
}

View File

@@ -19,7 +19,6 @@ func TestReadsMetricsFromKafka(t *testing.T) {
}
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
// Send a Kafka message to the kafka host
@@ -36,11 +35,11 @@ func TestReadsMetricsFromKafka(t *testing.T) {
// Start the Kafka Consumer
k := &Kafka{
ConsumerGroup: "telegraf_test_consumers",
Topics: []string{testTopic},
ZookeeperPeers: zkPeers,
PointBuffer: 100000,
Offset: "oldest",
ConsumerGroup: "telegraf_test_consumers",
Topics: []string{testTopic},
Brokers: brokerPeers,
PointBuffer: 100000,
Offset: "oldest",
}
p, _ := parsers.NewInfluxParser()
k.SetParser(p)

View File

@@ -23,7 +23,7 @@ func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
k := Kafka{
ConsumerGroup: "test",
Topics: []string{"telegraf"},
ZookeeperPeers: []string{"localhost:2181"},
Brokers: []string{"localhost:9092"},
Offset: "oldest",
in: in,
doNotCommitMsgs: true,

View File

@@ -0,0 +1,39 @@
# Kafka Consumer Input Plugin
The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka
topic and adds messages to InfluxDB. The plugin assumes messages follow the
line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup)
is used to talk to the Kafka cluster so multiple instances of telegraf can read
from the same topic in parallel.
## Configuration
```toml
# Read metrics from Kafka topic(s)
[[inputs.kafka_consumer]]
## topic(s) to consume
topics = ["telegraf"]
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
## Zookeeper Chroot
zookeeper_chroot = ""
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Maximum length of a message to consume, in bytes (default 0/unlimited);
## larger messages are dropped
max_message_len = 65536
```
## Testing
Running integration tests requires running Zookeeper & Kafka. See Makefile
for kafka container command.

View File

@@ -0,0 +1,183 @@
package kafka_consumer_legacy
import (
"fmt"
"log"
"strings"
"sync"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/Shopify/sarama"
"github.com/wvanbergen/kafka/consumergroup"
)
type Kafka struct {
ConsumerGroup string
Topics []string
MaxMessageLen int
ZookeeperPeers []string
ZookeeperChroot string
Consumer *consumergroup.ConsumerGroup
// Legacy metric buffer support
MetricBuffer int
// TODO remove PointBuffer, legacy support
PointBuffer int
Offset string
parser parsers.Parser
sync.Mutex
// channel for all incoming kafka messages
in <-chan *sarama.ConsumerMessage
// channel for all kafka consumer errors
errs <-chan error
done chan struct{}
// keep the accumulator internally:
acc telegraf.Accumulator
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
// this is mostly for test purposes, but there may be a use-case for it later.
doNotCommitMsgs bool
}
var sampleConfig = `
## topic(s) to consume
topics = ["telegraf"]
## an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
## Zookeeper Chroot
zookeeper_chroot = ""
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
## Maximum length of a message to consume, in bytes (default 0/unlimited);
## larger messages are dropped
max_message_len = 65536
`
func (k *Kafka) SampleConfig() string {
return sampleConfig
}
func (k *Kafka) Description() string {
return "Read metrics from Kafka topic(s)"
}
func (k *Kafka) SetParser(parser parsers.Parser) {
k.parser = parser
}
func (k *Kafka) Start(acc telegraf.Accumulator) error {
k.Lock()
defer k.Unlock()
var consumerErr error
k.acc = acc
config := consumergroup.NewConfig()
config.Zookeeper.Chroot = k.ZookeeperChroot
switch strings.ToLower(k.Offset) {
case "oldest", "":
config.Offsets.Initial = sarama.OffsetOldest
case "newest":
config.Offsets.Initial = sarama.OffsetNewest
default:
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
k.Offset)
config.Offsets.Initial = sarama.OffsetOldest
}
if k.Consumer == nil || k.Consumer.Closed() {
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
k.ConsumerGroup,
k.Topics,
k.ZookeeperPeers,
config,
)
if consumerErr != nil {
return consumerErr
}
// Setup message and error channels
k.in = k.Consumer.Messages()
k.errs = k.Consumer.Errors()
}
k.done = make(chan struct{})
// Start the kafka message reader
go k.receiver()
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
k.ZookeeperPeers, k.Topics)
return nil
}
// receiver() reads all incoming messages from the consumer, and parses them into
// influxdb metric points.
func (k *Kafka) receiver() {
for {
select {
case <-k.done:
return
case err := <-k.errs:
if err != nil {
k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err))
}
case msg := <-k.in:
if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen {
k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)",
len(msg.Value), k.MaxMessageLen))
} else {
metrics, err := k.parser.Parse(msg.Value)
if err != nil {
k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s",
string(msg.Value), err.Error()))
}
for _, metric := range metrics {
k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
}
}
if !k.doNotCommitMsgs {
// TODO(cam) this locking can be removed if this PR gets merged:
// https://github.com/wvanbergen/kafka/pull/84
k.Lock()
k.Consumer.CommitUpto(msg)
k.Unlock()
}
}
}
}
func (k *Kafka) Stop() {
k.Lock()
defer k.Unlock()
close(k.done)
if err := k.Consumer.Close(); err != nil {
k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error()))
}
}
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
return nil
}
func init() {
inputs.Add("kafka_consumer_legacy", func() telegraf.Input {
return &Kafka{}
})
}

Some files were not shown because too many files have changed in this diff Show More