Compare commits
306 Commits
release-1.
...
1.3.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f93615672b | ||
|
|
444f1ba09c | ||
|
|
5417a12ee3 | ||
|
|
f6c986b5ca | ||
|
|
7f67bf593f | ||
|
|
7ca902f987 | ||
|
|
dc69cacaa1 | ||
|
|
29671154bc | ||
|
|
4f8341670e | ||
|
|
99edca80ef | ||
|
|
44654e011c | ||
|
|
389439111b | ||
|
|
2bc5594b44 | ||
|
|
99b53c8745 | ||
|
|
27b89dff48 | ||
|
|
b16eb6eae6 | ||
|
|
feaf76913b | ||
|
|
ff704fbe0d | ||
|
|
ebef47f56a | ||
|
|
18fd2d987d | ||
|
|
5e70cb3e44 | ||
|
|
ce203dc687 | ||
|
|
b0a2e8e1bd | ||
|
|
499495f844 | ||
|
|
20ab8fb2c3 | ||
|
|
bc474d3a53 | ||
|
|
547be87d79 | ||
|
|
619d4d5d29 | ||
|
|
052e88ad5e | ||
|
|
b9ce455bba | ||
|
|
cd103c85db | ||
|
|
a3feacbd2f | ||
|
|
e1a734c525 | ||
|
|
53ab56de72 | ||
|
|
4e2fe598ac | ||
|
|
5fe5c46c6d | ||
|
|
153304d92b | ||
|
|
cb9aecbf04 | ||
|
|
c66e2896c6 | ||
|
|
9b874dff8d | ||
|
|
b243faa22b | ||
|
|
8f5cd6c2ae | ||
|
|
3c28b93514 | ||
|
|
06baf7cf78 | ||
|
|
801f6cb8a0 | ||
|
|
3684ec6315 | ||
|
|
da0773151b | ||
|
|
38e1c1de77 | ||
|
|
799c8bed29 | ||
|
|
a237301932 | ||
|
|
b03d78d00f | ||
|
|
748ca7d503 | ||
|
|
bf30ef89ee | ||
|
|
3690e1b9bf | ||
|
|
2542ef6d62 | ||
|
|
eb7ef5392e | ||
|
|
70b3e763e7 | ||
|
|
58ee962679 | ||
|
|
dc5779e2a7 | ||
|
|
b968759d10 | ||
|
|
b90a5b48a1 | ||
|
|
a12e082dbe | ||
|
|
cadd845b36 | ||
|
|
dff216c44d | ||
|
|
45c9b867f6 | ||
|
|
9388fff1f7 | ||
|
|
3e0c55bff9 | ||
|
|
49ab4e26f8 | ||
|
|
360b10c4de | ||
|
|
2c98e5ae66 | ||
|
|
0193cbee51 | ||
|
|
f55af7d21f | ||
|
|
516dffa4c4 | ||
|
|
62b5c1f7e7 | ||
|
|
07c428ef89 | ||
|
|
aa722fac9b | ||
|
|
7cc4ca2341 | ||
|
|
92fa20cef2 | ||
|
|
c9f8308f27 | ||
|
|
5ffc9fd379 | ||
|
|
8bf193dc06 | ||
|
|
f2805fd4aa | ||
|
|
35e4390168 | ||
|
|
51c99d5b67 | ||
|
|
540f98e228 | ||
|
|
c980c92cd5 | ||
|
|
9495b615f5 | ||
|
|
fb1c7d0154 | ||
|
|
03ee6022f3 | ||
|
|
cc5b2f68b6 | ||
|
|
2d7f612bd7 | ||
|
|
9e036b2d65 | ||
|
|
1100a98f11 | ||
|
|
37689f4df6 | ||
|
|
78c7f4e4af | ||
|
|
84a9f91f5c | ||
|
|
5612df48f9 | ||
|
|
0fa9001453 | ||
|
|
995546e7c6 | ||
|
|
1402c158b7 | ||
|
|
616b66f5cb | ||
|
|
70a0a84882 | ||
|
|
5c33c760c7 | ||
|
|
bb28fb256b | ||
|
|
a962e958eb | ||
|
|
8514acdc3c | ||
|
|
426182b81a | ||
|
|
7a5d857846 | ||
|
|
13f314a507 | ||
|
|
ea6e0b8259 | ||
|
|
e811e2600d | ||
|
|
49c212337f | ||
|
|
d243d69a09 | ||
|
|
ae6a5d2255 | ||
|
|
56aa89e5c8 | ||
|
|
7513fcac4e | ||
|
|
9df2974a0f | ||
|
|
ceb36adac7 | ||
|
|
7a8e821731 | ||
|
|
76bcdecd21 | ||
|
|
10744646db | ||
|
|
1873abd248 | ||
|
|
9618515926 | ||
|
|
a251adb838 | ||
|
|
9e810ac463 | ||
|
|
b9457a1092 | ||
|
|
6f2eeae498 | ||
|
|
42a41d33cc | ||
|
|
81408f9da7 | ||
|
|
c4212d69c9 | ||
|
|
e17164d3f0 | ||
|
|
e5349393f8 | ||
|
|
06176ef410 | ||
|
|
2a3448c8f3 | ||
|
|
5da40d56ad | ||
|
|
54c9a385d5 | ||
|
|
25c55419df | ||
|
|
c19fb1535e | ||
|
|
45a168e425 | ||
|
|
22243a8354 | ||
|
|
ff9369f1a1 | ||
|
|
21cf79163c | ||
|
|
f05fac74cb | ||
|
|
c8cc01ba6a | ||
|
|
694955c87b | ||
|
|
b1945c0493 | ||
|
|
1c4673e900 | ||
|
|
dfb4038654 | ||
|
|
b3537ef2a8 | ||
|
|
0ce44648cf | ||
|
|
55d3f70771 | ||
|
|
a610f8bd03 | ||
|
|
dfba3ff37a | ||
|
|
285be648c4 | ||
|
|
f7d551a807 | ||
|
|
3f224a15d5 | ||
|
|
c0bbde03ea | ||
|
|
97050e9669 | ||
|
|
eafd1dcc7c | ||
|
|
c528c53e5b | ||
|
|
07a6223932 | ||
|
|
aeb849d744 | ||
|
|
9003efc3fa | ||
|
|
32e06a489d | ||
|
|
2932db8480 | ||
|
|
19dee32287 | ||
|
|
4dad723088 | ||
|
|
54cfbb5b87 | ||
|
|
3e37dda7b0 | ||
|
|
fb7931591d | ||
|
|
e87ce22af9 | ||
|
|
738cbbdbb6 | ||
|
|
074e6d177c | ||
|
|
1d864ebd40 | ||
|
|
e9decadf75 | ||
|
|
3fa37a9212 | ||
|
|
c9e87a39f8 | ||
|
|
4a5d313693 | ||
|
|
168270ea5f | ||
|
|
c4d4185fb5 | ||
|
|
822333690f | ||
|
|
d7a8bb2214 | ||
|
|
a505123e60 | ||
|
|
be10b19760 | ||
|
|
b9ae3d6a57 | ||
|
|
c882570983 | ||
|
|
80411f99f0 | ||
|
|
6df3f0fdae | ||
|
|
22340ad984 | ||
|
|
c15504c509 | ||
|
|
20bf90ee52 | ||
|
|
3de6bfbcb8 | ||
|
|
e0c6262e0b | ||
|
|
9b2f6499e7 | ||
|
|
9262712f0a | ||
|
|
0c9da0985a | ||
|
|
b89c45b858 | ||
|
|
b60b360f13 | ||
|
|
734988d732 | ||
|
|
95bad9e55b | ||
|
|
e812a2efc6 | ||
|
|
411853fc74 | ||
|
|
b7d29ca0e9 | ||
|
|
947e1909ff | ||
|
|
31a4f03031 | ||
|
|
81f95e7a29 | ||
|
|
2aa2c796e5 | ||
|
|
a658e6c509 | ||
|
|
5f6766f6e1 | ||
|
|
7279018cfe | ||
|
|
4b08d127e0 | ||
|
|
fd1feff7b4 | ||
|
|
37bc9cf795 | ||
|
|
b762546fa7 | ||
|
|
bf5f2659a1 | ||
|
|
d2787e8ef5 | ||
|
|
a9f03a72f5 | ||
|
|
7fc57812a7 | ||
|
|
8a982ca68f | ||
|
|
200237a515 | ||
|
|
0ae1e0611c | ||
|
|
1392e73125 | ||
|
|
a90afd95c6 | ||
|
|
9866146545 | ||
|
|
8df325a68c | ||
|
|
48ae105a11 | ||
|
|
4e808c5c20 | ||
|
|
eb96443a34 | ||
|
|
e36c354ff5 | ||
|
|
f09c08d1f3 | ||
|
|
0e8122a2fc | ||
|
|
6723ea5fe6 | ||
|
|
e8bf968c78 | ||
|
|
9c8f24601f | ||
|
|
4957717df5 | ||
|
|
21fac3ebec | ||
|
|
ecbc634221 | ||
|
|
90cec20d1d | ||
|
|
bcbf82f8e8 | ||
|
|
3a45d8851d | ||
|
|
4a83c8c518 | ||
|
|
bc13d32d53 | ||
|
|
e6fc32bdf0 | ||
|
|
a970b9c62c | ||
|
|
17b307a7bc | ||
|
|
393f5044bb | ||
|
|
c630212dde | ||
|
|
f39db08c6d | ||
|
|
b4f9bc8745 | ||
|
|
5f06bd2566 | ||
|
|
8a4ab3654d | ||
|
|
e2f9617228 | ||
|
|
e097ae9632 | ||
|
|
07684fb030 | ||
|
|
17fa6f9b17 | ||
|
|
8e3fbaa9dd | ||
|
|
dede3e70ad | ||
|
|
7558081873 | ||
|
|
6e241611be | ||
|
|
fc9f921b62 | ||
|
|
12db3b9120 | ||
|
|
b58926dd26 | ||
|
|
91143dda1a | ||
|
|
efb64a049f | ||
|
|
4f6087a99d | ||
|
|
6b0e863556 | ||
|
|
11bc82379c | ||
|
|
a093ec1eaa | ||
|
|
d71a42cd1b | ||
|
|
d518d7d806 | ||
|
|
1d1afe6481 | ||
|
|
5a3f2e61f3 | ||
|
|
504f4e69db | ||
|
|
9f6666beb3 | ||
|
|
af6e7b9531 | ||
|
|
6fd7361364 | ||
|
|
e5c7a71d8e | ||
|
|
db7a4b24b6 | ||
|
|
332f678afb | ||
|
|
04a2b36a52 | ||
|
|
f862c6585d | ||
|
|
5c32521a07 | ||
|
|
9db30250c3 | ||
|
|
2b0cd2037b | ||
|
|
536dbfb724 | ||
|
|
b77398c4d3 | ||
|
|
fbf5bee051 | ||
|
|
81004c808f | ||
|
|
196509cc53 | ||
|
|
94ce67cc67 | ||
|
|
33ed528afe | ||
|
|
2435e47926 | ||
|
|
ff67a4b96c | ||
|
|
f816b952cf | ||
|
|
b905bc1b5d | ||
|
|
0ecbf9e349 | ||
|
|
1c7715780e | ||
|
|
5d3850c44e | ||
|
|
e84b356a12 | ||
|
|
b349800f7a | ||
|
|
47de43abf3 | ||
|
|
7a9fef80f5 | ||
|
|
dc28875437 | ||
|
|
a6ed4d4c3a | ||
|
|
fe6162b2a1 | ||
|
|
34182d9c9f |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,7 +1,7 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
||||
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
|
||||
230
CHANGELOG.md
230
CHANGELOG.md
@@ -1,15 +1,225 @@
|
||||
## v1.2 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
## v1.3.1 [unreleased]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v1.1 [unreleased]
|
||||
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
|
||||
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
|
||||
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
|
||||
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
|
||||
- [#2851](https://github.com/influxdata/telegraf/pull/2851): Fix InfluxDB output database quoting.
|
||||
- [#2856](https://github.com/influxdata/telegraf/issues/2856): Fix net input on older Linux kernels.
|
||||
- [#2848](https://github.com/influxdata/telegraf/pull/2848): Fix panic in mongo input.
|
||||
- [#2869](https://github.com/influxdata/telegraf/pull/2869): Fix length calculation of split metric buffer.
|
||||
|
||||
## v1.3 [2017-05-15]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Users of the windows `ping` plugin will need to drop or migrate their
|
||||
measurements in order to continue using the plugin. The reason for this is that
|
||||
the windows plugin was outputting a different type than the linux plugin. This
|
||||
made it impossible to use the `ping` plugin for both windows and linux
|
||||
machines.
|
||||
|
||||
- Ceph: the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag.
|
||||
|
||||
Telegraf < 1.3:
|
||||
|
||||
```
|
||||
# field_name value
|
||||
active+clean 123
|
||||
active+clean+scrubbing 3
|
||||
```
|
||||
|
||||
Telegraf >= 1.3:
|
||||
|
||||
```
|
||||
# field_name value tag
|
||||
count 123 state=active+clean
|
||||
count 3 state=active+clean+scrubbing
|
||||
```
|
||||
|
||||
- The [Riemann output plugin](./plugins/outputs/riemann) has been rewritten
|
||||
and the previous riemann plugin is _incompatible_ with the new one. The reasons
|
||||
for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878).
|
||||
The previous riemann output will still be available using
|
||||
`outputs.riemann_legacy` if needed, but that will eventually be deprecated.
|
||||
It is highly recommended that all users migrate to the new riemann output plugin.
|
||||
|
||||
- Generic [socket_listener](./plugins/inputs/socket_listener) and
|
||||
[socket_writer](./plugins/outputs/socket_writer) plugins have been implemented
|
||||
for receiving and sending UDP, TCP, unix, & unix-datagram data. These plugins
|
||||
will replace udp_listener and tcp_listener, which are still available but will
|
||||
be deprecated eventually.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2721](https://github.com/influxdata/telegraf/pull/2721): Added SASL options for kafka output plugin.
|
||||
- [#2723](https://github.com/influxdata/telegraf/pull/2723): Added SSL configuration for input haproxy.
|
||||
- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin.
|
||||
- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
|
||||
- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
|
||||
- [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
|
||||
- [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin.
|
||||
- [#2229](https://github.com/influxdata/telegraf/pull/2229): `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag.
|
||||
- [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations.
|
||||
- [#2330](https://github.com/influxdata/telegraf/pull/2330): Keep -config-directory when running as Windows service.
|
||||
- [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite.
|
||||
- [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags.
|
||||
- [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state.
|
||||
- [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin.
|
||||
- [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
|
||||
- [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
|
||||
- [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
|
||||
- [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
|
||||
- [#2512](https://github.com/influxdata/telegraf/pull/2512): Added pprof tool.
|
||||
- [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
|
||||
- [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
|
||||
- [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
|
||||
- [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
|
||||
- [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
|
||||
- [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
|
||||
- [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
|
||||
- [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
|
||||
- [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
|
||||
- [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
|
||||
- [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
|
||||
- [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
|
||||
- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
|
||||
- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
|
||||
- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
|
||||
- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin
|
||||
- [#2705](https://github.com/influxdata/telegraf/pull/2705): Kinesis output: add use_random_partitionkey option
|
||||
- [#2635](https://github.com/influxdata/telegraf/issues/2635): add tcp keep-alive to socket_listener & socket_writer
|
||||
- [#2031](https://github.com/influxdata/telegraf/pull/2031): Add Kapacitor input plugin
|
||||
- [#2732](https://github.com/influxdata/telegraf/pull/2732): Use go 1.8.1
|
||||
- [#2712](https://github.com/influxdata/telegraf/issues/2712): Documentation for rabbitmq input plugin
|
||||
- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password
|
||||
- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
|
||||
- [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
|
||||
- [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
|
||||
- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
|
||||
- [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
|
||||
- [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
|
||||
- [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
|
||||
- [#2360](https://github.com/influxdata/telegraf/pull/2360): Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems
|
||||
- [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
|
||||
- [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
|
||||
- [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
|
||||
- [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
|
||||
- [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin.
|
||||
- [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
|
||||
- [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
|
||||
- [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
|
||||
- [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
|
||||
- [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
|
||||
- [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
|
||||
- [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
|
||||
- [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
|
||||
- [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
|
||||
- [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
|
||||
- [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
|
||||
- [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
|
||||
- [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
|
||||
- [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
|
||||
- [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
|
||||
- [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
|
||||
- [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
|
||||
- [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
|
||||
- [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces
|
||||
- [#1911](https://github.com/influxdata/telegraf/issues/1911): Sysstat plugin needs LANG=C or similar locale
|
||||
- [#2528](https://github.com/influxdata/telegraf/issues/2528): File output closes standard streams on reload.
|
||||
- [#2603](https://github.com/influxdata/telegraf/issues/2603): AMQP output disconnect blocks all outputs
|
||||
- [#2706](https://github.com/influxdata/telegraf/issues/2706): Improve documentation for redis input plugin
|
||||
|
||||
## v1.2.1 [2017-02-01]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2317](https://github.com/influxdata/telegraf/issues/2317): Fix segfault on nil metrics with influxdb output.
|
||||
- [#2324](https://github.com/influxdata/telegraf/issues/2324): Fix negative number handling.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2348](https://github.com/influxdata/telegraf/pull/2348): Go version 1.7.4 -> 1.7.5
|
||||
|
||||
## v1.2 [2017-01-00]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The StatsD plugin will now default all "delete_" config options to "true". This
|
||||
will change te default behavior for users who were not specifying these parameters
|
||||
in their config file.
|
||||
|
||||
- The StatsD plugin will also no longer save it's state on a service reload.
|
||||
Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887).
|
||||
The reason for this is that saving the state in a global variable is not
|
||||
thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)),
|
||||
and this creates issues if users want to define multiple instances
|
||||
of the statsd plugin. Saving state on reload may be considered in the future,
|
||||
but this would need to be implemented at a higher level and applied to all
|
||||
plugins, not just statsd.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages
|
||||
- [#1564](https://github.com/influxdata/telegraf/issues/1564): Use RFC3339 timestamps in log output.
|
||||
- [#1997](https://github.com/influxdata/telegraf/issues/1997): Non-default HTTP timeouts for RabbitMQ plugin.
|
||||
- [#2074](https://github.com/influxdata/telegraf/pull/2074): "discard" output plugin added, primarily for testing purposes.
|
||||
- [#1965](https://github.com/influxdata/telegraf/pull/1965): The JSON parser can now parse an array of objects using the same configuration.
|
||||
- [#1807](https://github.com/influxdata/telegraf/pull/1807): Option to use device name rather than path for reporting disk stats.
|
||||
- [#1348](https://github.com/influxdata/telegraf/issues/1348): Telegraf "internal" plugin for collecting stats on itself.
|
||||
- [#2127](https://github.com/influxdata/telegraf/pull/2127): Update Go version to 1.7.4.
|
||||
- [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function.
|
||||
- [#2026](https://github.com/influxdata/telegraf/pull/2065): elasticsearch "shield" (basic auth) support doc.
|
||||
- [#1885](https://github.com/influxdata/telegraf/pull/1885): Fix over-querying of cloudwatch metrics
|
||||
- [#1913](https://github.com/influxdata/telegraf/pull/1913): OpenTSDB basic auth support.
|
||||
- [#1908](https://github.com/influxdata/telegraf/pull/1908): RabbitMQ Connection metrics.
|
||||
- [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric.
|
||||
- [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets.
|
||||
- [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior.
|
||||
- [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output.
|
||||
- [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data.
|
||||
- [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field.
|
||||
- [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin.
|
||||
- [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin.
|
||||
- [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf.
|
||||
- [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters.
|
||||
- [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support.
|
||||
- [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins.
|
||||
- [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages.
|
||||
- [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
|
||||
- [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input.
|
||||
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus.
|
||||
- [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker.
|
||||
- [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag.
|
||||
- [#1730](https://github.com/influxdata/telegraf/issues/1730) & [#2261](https://github.com/influxdata/telegraf/pull/2261): Fix win_perf_counters not gathering non-English counters.
|
||||
- [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s).
|
||||
- [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field.
|
||||
- [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature.
|
||||
- [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data.
|
||||
- [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable.
|
||||
- [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port.
|
||||
- [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe.
|
||||
- [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag.
|
||||
- [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites.
|
||||
- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses.
|
||||
- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin.
|
||||
- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix.
|
||||
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages.
|
||||
- [#2299](https://github.com/influxdata/telegraf/issues/2299): opentsdb: add tcp:// prefix if no scheme provided.
|
||||
- [#2297](https://github.com/influxdata/telegraf/issues/2297): influx parser: parse line-protocol without newlines.
|
||||
- [#2245](https://github.com/influxdata/telegraf/issues/2245): influxdb output: fix field type conflict blocking output buffer.
|
||||
|
||||
## v1.1.1 [unreleased]
|
||||
## v1.1.2 [2016-12-12]
|
||||
|
||||
### Bugfixes
|
||||
@@ -98,6 +308,7 @@ continue sending logs to /var/log/telegraf/telegraf.log.
|
||||
- [#1771](https://github.com/influxdata/telegraf/issues/1771): Delete nil fields in the metric maker.
|
||||
- [#870](https://github.com/influxdata/telegraf/issues/870): Fix MySQL special characters in DSN parsing.
|
||||
- [#1742](https://github.com/influxdata/telegraf/issues/1742): Ping input odd timeout behavior.
|
||||
- [#1950](https://github.com/influxdata/telegraf/pull/1950): Switch to github.com/kballard/go-shellquote.
|
||||
|
||||
## v1.0.1 [2016-09-26]
|
||||
|
||||
@@ -158,8 +369,11 @@ which can be installed via
|
||||
evaluated at every flush interval, rather than once at startup. This makes it
|
||||
consistent with the behavior of `collection_jitter`.
|
||||
|
||||
- postgresql plugins now handle oid and name typed columns seamlessly, previously they were ignored/skipped.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1617](https://github.com/influxdata/telegraf/pull/1617): postgresql_extensible now handles name and oid types correctly.
|
||||
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
|
||||
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
|
||||
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()
|
||||
|
||||
@@ -124,7 +124,7 @@ You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -254,7 +254,7 @@ You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
116
Godeps
116
Godeps
@@ -1,65 +1,65 @@
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/aws/aws-sdk-go 7524cb911daddd6e5c9195def8e59ae892bef8d9
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker b89aff1afa1f61993ab2ba18fd62d9375a195f5d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
||||
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/klauspost/crc32 cb6bfca970f6908083f26f39a79009d608efd5cd
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
|
||||
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
|
||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
|
||||
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 9a4a9167ad3b4355dbf1c2c7a0f5f0d3fb1e9ab9
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 ee3ebceab960cf68ab9a89ee6d78c031ef5b4a4e
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
|
||||
12
Makefile
12
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
@@ -51,6 +51,7 @@ docker-run:
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
@@ -58,7 +59,7 @@ docker-run:
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
@@ -69,15 +70,16 @@ docker-run-circle:
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
|
||||
313
README.md
313
README.md
@@ -1,15 +1,23 @@
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
||||
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||
Telegraf is an agent written in Go for collecting, processing, aggregating,
|
||||
and writing metrics.
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
|
||||
New input and output plugins are designed to be easy to contribute,
|
||||
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||
|
||||
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
|
||||
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
|
||||
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
|
||||
4. [Output Plugins](#output-plugins) write metrics to various destinations
|
||||
|
||||
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
|
||||
|
||||
New plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
@@ -17,65 +25,25 @@ new plugins.
|
||||
|
||||
## Installation:
|
||||
|
||||
### Linux deb and rpm Packages:
|
||||
You can either download the binaries directly from the
|
||||
[downloads](https://www.influxdata.com/downloads) page.
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||
* On sysv systems, the telegraf daemon can be controlled via
|
||||
`service telegraf [action]`
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### yum/apt Repositories:
|
||||
|
||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||
[here](https://docs.influxdata.com/influxdb/v0.10/introduction/installation/#installation)
|
||||
for instructions on setting up the repo. Once it is configured, you will be able
|
||||
to use this repo to install & update telegraf.
|
||||
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz
|
||||
A few alternate installs are available here as well:
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-VERSION_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
```
|
||||
brew update
|
||||
brew install telegraf
|
||||
```
|
||||
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.5+.
|
||||
if you don't have it already. You also must build with golang version 1.8+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
@@ -91,31 +59,31 @@ See usage with:
|
||||
telegraf --help
|
||||
```
|
||||
|
||||
### Generate a telegraf config file:
|
||||
#### Generate a telegraf config file:
|
||||
|
||||
```
|
||||
telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
### Generate config with only cpu input & influxdb output plugins defined
|
||||
#### Generate config with only cpu input & influxdb output plugins defined
|
||||
|
||||
```
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
```
|
||||
|
||||
### Run a single telegraf collection, outputing metrics to stdout
|
||||
#### Run a single telegraf collection, outputing metrics to stdout
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -test
|
||||
```
|
||||
|
||||
### Run telegraf with all plugins defined in config file
|
||||
#### Run telegraf with all plugins defined in config file
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf
|
||||
```
|
||||
|
||||
### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
@@ -127,77 +95,78 @@ telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Input Plugins
|
||||
## Input Plugins
|
||||
|
||||
Telegraf currently has support for collecting metrics from many sources. For
|
||||
more information on each, please look at the directory of the same name in
|
||||
`plugins/inputs`.
|
||||
|
||||
Currently implemented sources:
|
||||
|
||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch)
|
||||
* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike)
|
||||
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
||||
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
||||
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
||||
* [ceph](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ceph)
|
||||
* [chrony](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/chrony)
|
||||
* [consul](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul)
|
||||
* [conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack)
|
||||
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
||||
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
||||
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
||||
* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query)
|
||||
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
|
||||
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
|
||||
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
|
||||
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
|
||||
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
||||
* [hddtemp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hddtemp)
|
||||
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
||||
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
|
||||
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
|
||||
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
|
||||
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
|
||||
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
|
||||
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
|
||||
* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp)
|
||||
* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached)
|
||||
* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos)
|
||||
* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb)
|
||||
* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql)
|
||||
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
||||
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
||||
* [nstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nstat)
|
||||
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
||||
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
||||
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
||||
* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping)
|
||||
* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql)
|
||||
* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns)
|
||||
* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus)
|
||||
* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent)
|
||||
* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq)
|
||||
* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops)
|
||||
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
|
||||
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
|
||||
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
|
||||
* [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
|
||||
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||
* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy)
|
||||
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
||||
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
||||
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
|
||||
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
||||
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [sysstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sysstat)
|
||||
* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system)
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
* [consul](./plugins/inputs/consul)
|
||||
* [conntrack](./plugins/inputs/conntrack)
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dmcache](./plugins/inputs/dmcache)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
* [docker](./plugins/inputs/docker)
|
||||
* [dovecot](./plugins/inputs/dovecot)
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
* [influxdb](./plugins/inputs/influxdb)
|
||||
* [interrupts](./plugins/inputs/interrupts)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
* [nginx](./plugins/inputs/nginx)
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [phpfpm](./plugins/inputs/phpfpm)
|
||||
* [phusion passenger](./plugins/inputs/passenger)
|
||||
* [ping](./plugins/inputs/ping)
|
||||
* [postgresql](./plugins/inputs/postgresql)
|
||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](./plugins/inputs/powerdns)
|
||||
* [procstat](./plugins/inputs/procstat)
|
||||
* [prometheus](./plugins/inputs/prometheus)
|
||||
* [puppetagent](./plugins/inputs/puppetagent)
|
||||
* [rabbitmq](./plugins/inputs/rabbitmq)
|
||||
* [raindrops](./plugins/inputs/raindrops)
|
||||
* [redis](./plugins/inputs/redis)
|
||||
* [rethinkdb](./plugins/inputs/rethinkdb)
|
||||
* [riak](./plugins/inputs/riak)
|
||||
* [sensors](./plugins/inputs/sensors)
|
||||
* [snmp](./plugins/inputs/snmp)
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
* mem
|
||||
* net
|
||||
@@ -208,48 +177,72 @@ Currently implemented sources:
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
* kernel (/proc/vmstat)
|
||||
* linux_sysctl_fs (/proc/sys/fs)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
|
||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
||||
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
|
||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
|
||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
|
||||
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
|
||||
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
|
||||
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
|
||||
* [http_listener](./plugins/inputs/http_listener)
|
||||
* [kafka_consumer](./plugins/inputs/kafka_consumer)
|
||||
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
|
||||
* [nats_consumer](./plugins/inputs/nats_consumer)
|
||||
* [nsq_consumer](./plugins/inputs/nsq_consumer)
|
||||
* [logparser](./plugins/inputs/logparser)
|
||||
* [statsd](./plugins/inputs/statsd)
|
||||
* [socket_listener](./plugins/inputs/socket_listener)
|
||||
* [tail](./plugins/inputs/tail)
|
||||
* [tcp_listener](./plugins/inputs/socket_listener)
|
||||
* [udp_listener](./plugins/inputs/socket_listener)
|
||||
* [webhooks](./plugins/inputs/webhooks)
|
||||
* [filestack](./plugins/inputs/webhooks/filestack)
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
Telegraf is able to parse the following input data formats into metrics, these
|
||||
formats may be used with input plugins supporting the `data_format` option:
|
||||
|
||||
## Supported Output Plugins
|
||||
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
|
||||
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
|
||||
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
|
||||
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
|
||||
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb)
|
||||
* [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon)
|
||||
* [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp)
|
||||
* [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch)
|
||||
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
||||
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
||||
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
||||
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
|
||||
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
|
||||
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
||||
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
||||
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
||||
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
|
||||
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
|
||||
* [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann)
|
||||
## Processor Plugins
|
||||
|
||||
* [printer](./plugins/processors/printer)
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
* [influxdb](./plugins/outputs/influxdb)
|
||||
* [amon](./plugins/outputs/amon)
|
||||
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [datadog](./plugins/outputs/datadog)
|
||||
* [discard](./plugins/outputs/discard)
|
||||
* [elasticsearch](./plugins/outputs/elasticsearch)
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
* [instrumental](./plugins/outputs/instrumental)
|
||||
* [kafka](./plugins/outputs/kafka)
|
||||
* [librato](./plugins/outputs/librato)
|
||||
* [mqtt](./plugins/outputs/mqtt)
|
||||
* [nats](./plugins/outputs/nats)
|
||||
* [nsq](./plugins/outputs/nsq)
|
||||
* [opentsdb](./plugins/outputs/opentsdb)
|
||||
* [prometheus](./plugins/outputs/prometheus_client)
|
||||
* [riemann](./plugins/outputs/riemann)
|
||||
* [riemann_legacy](./plugins/outputs/riemann_legacy)
|
||||
* [socket_writer](./plugins/outputs/socket_writer)
|
||||
* [tcp](./plugins/outputs/socket_writer)
|
||||
* [udp](./plugins/outputs/socket_writer)
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
@@ -2,10 +2,14 @@ package agent
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
|
||||
)
|
||||
|
||||
type MetricMaker interface {
|
||||
@@ -37,8 +41,6 @@ type accumulator struct {
|
||||
maker MetricMaker
|
||||
|
||||
precision time.Duration
|
||||
|
||||
errCount uint64
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
@@ -80,7 +82,7 @@ func (ac *accumulator) AddError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
atomic.AddUint64(&ac.errCount, 1)
|
||||
NErrors.Incr(1)
|
||||
//TODO suppress/throttle consecutive duplicate errors?
|
||||
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -41,7 +42,7 @@ func TestAdd(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -69,7 +70,7 @@ func TestAddFields(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -87,7 +88,7 @@ func TestAccAddError(t *testing.T) {
|
||||
a.AddError(fmt.Errorf("baz"))
|
||||
|
||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||
assert.EqualValues(t, 3, a.errCount)
|
||||
assert.EqualValues(t, int64(3), NErrors.Get())
|
||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||
assert.Contains(t, string(errs[0]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[0]), "foo")
|
||||
@@ -125,7 +126,7 @@ func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -157,7 +158,7 @@ func TestAddDisablePrecision(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -189,7 +190,7 @@ func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -206,7 +207,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Millisecond)
|
||||
@@ -216,7 +217,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Microsecond)
|
||||
@@ -226,7 +227,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||
actual)
|
||||
|
||||
a.SetPrecision(0, time.Nanosecond)
|
||||
@@ -236,7 +237,7 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
@@ -269,7 +270,7 @@ func TestAddGauge(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
@@ -303,7 +304,7 @@ func TestAddCounter(t *testing.T) {
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
@@ -323,15 +324,15 @@ func (tm *TestMetricMaker) MakeMetric(
|
||||
) telegraf.Metric {
|
||||
switch mType {
|
||||
case telegraf.Untyped:
|
||||
if m, err := telegraf.NewMetric(measurement, tags, fields, t); err == nil {
|
||||
if m, err := metric.New(measurement, tags, fields, t); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Counter:
|
||||
if m, err := telegraf.NewCounterMetric(measurement, tags, fields, t); err == nil {
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Gauge:
|
||||
if m, err := telegraf.NewGaugeMetric(measurement, tags, fields, t); err == nil {
|
||||
if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
@@ -44,8 +45,6 @@ func NewAgent(config *config.Config) (*Agent, error) {
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.Quiet = a.Config.Agent.Quiet
|
||||
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
@@ -106,24 +105,26 @@ func (a *Agent) gatherer(
|
||||
) {
|
||||
defer panicRecover(input)
|
||||
|
||||
GatherTime := selfstat.RegisterTiming("gather",
|
||||
"gather_time_ns",
|
||||
map[string]string{"input": input.Config.Name},
|
||||
)
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
input.SetDebug(a.Config.Agent.Debug)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||
|
||||
start := time.Now()
|
||||
gatherWithTimeout(shutdown, input, acc, interval)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||
input.Name(), interval, elapsed)
|
||||
GatherTime.Incr(elapsed.Nanoseconds())
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
@@ -156,13 +157,13 @@ func gatherWithTimeout(
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
log.Printf("E! ERROR in input [%s]: %s", input.Name(), err)
|
||||
acc.AddError(err)
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Printf("E! ERROR: input [%s] took longer to collect than "+
|
||||
"collection interval (%s)",
|
||||
input.Name(), timeout)
|
||||
err := fmt.Errorf("took longer to collect than collection interval (%s)",
|
||||
timeout)
|
||||
acc.AddError(err)
|
||||
continue
|
||||
case <-shutdown:
|
||||
return
|
||||
@@ -190,6 +191,12 @@ func (a *Agent) Test() error {
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
if _, ok := input.Input.(telegraf.ServiceInput); ok {
|
||||
fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n",
|
||||
input.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
@@ -204,14 +211,11 @@ func (a *Agent) Test() error {
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
if acc.errCount > 0 {
|
||||
return fmt.Errorf("Errors encountered during processing")
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name() {
|
||||
case "cpu", "mongodb", "procstat":
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
@@ -269,7 +273,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(copyMetric(m)); ok {
|
||||
if ok := agg.Add(m.Copy()); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
@@ -279,7 +283,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(copyMetric(m))
|
||||
o.AddMetric(m.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -288,6 +292,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
semaphore := make(chan struct{}, 1)
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
@@ -297,8 +302,18 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
go func() {
|
||||
select {
|
||||
case semaphore <- struct{}{}:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
<-semaphore
|
||||
default:
|
||||
// skipping this flush because one is already happening
|
||||
log.Println("W! Skipping a scheduled flush because there is" +
|
||||
" already a flush ongoing.")
|
||||
}
|
||||
}()
|
||||
case metric := <-metricC:
|
||||
// NOTE potential bottleneck here as we put each metric through the
|
||||
// processors serially.
|
||||
@@ -327,13 +342,13 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
@@ -383,21 +398,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
a.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||
t := time.Time(m.Time())
|
||||
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range m.Tags() {
|
||||
tags[k] = v
|
||||
}
|
||||
for k, v := range m.Fields() {
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
|
||||
return out
|
||||
}
|
||||
|
||||
12
circle.yml
12
circle.yml
@@ -1,13 +1,11 @@
|
||||
machine:
|
||||
go:
|
||||
version: 1.8.1
|
||||
services:
|
||||
- docker
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.7.3 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz
|
||||
- go version
|
||||
- memcached
|
||||
- redis
|
||||
- rabbitmq-server
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
@@ -24,6 +26,8 @@ import (
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"turn on debug logging")
|
||||
var pprofAddr = flag.String("pprof-addr", "",
|
||||
"pprof address to listen on, not activate pprof if empty")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
@@ -87,6 +91,7 @@ The commands & flags are:
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
@@ -105,98 +110,24 @@ Examples:
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
var srvc service.Service
|
||||
|
||||
type program struct{}
|
||||
|
||||
func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
defer func() {
|
||||
if service.Interactive() {
|
||||
os.Exit(0)
|
||||
}
|
||||
return
|
||||
}()
|
||||
func reloadLoop(
|
||||
stop chan struct{},
|
||||
inputFilters []string,
|
||||
outputFilters []string,
|
||||
aggregatorFilters []string,
|
||||
processorFilters []string,
|
||||
) {
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
var inputFilters []string
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
var outputFilters []string
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
var aggregatorFilters []string
|
||||
if *fAggregatorFilters != "" {
|
||||
aggregatorFilter := strings.TrimSpace(*fAggregatorFilters)
|
||||
aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":")
|
||||
}
|
||||
var processorFilters []string
|
||||
if *fProcessorFilters != "" {
|
||||
processorFilter := strings.TrimSpace(*fProcessorFilters)
|
||||
processorFilters = strings.Split(":"+processorFilter+":", ":")
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// switch for flags which just do something and exit immediately
|
||||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
case *fUsage != "":
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("E! %s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no other options are specified, load the config file and run.
|
||||
c := config.NewConfig()
|
||||
@@ -213,7 +144,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
if !*fTest && len(c.Outputs) == 0 {
|
||||
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
@@ -237,7 +168,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
return
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
@@ -271,14 +202,21 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("E! Unable to create pidfile: %s", err)
|
||||
log.Printf("E! Unable to create pidfile: %s", err)
|
||||
} else {
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
|
||||
defer func() {
|
||||
err := os.Remove(*fPidfile)
|
||||
if err != nil {
|
||||
log.Printf("E! Unable to remove pidfile: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
@@ -290,14 +228,26 @@ func usageExit(rc int) {
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
type program struct {
|
||||
inputFilters []string
|
||||
outputFilters []string
|
||||
aggregatorFilters []string
|
||||
processorFilters []string
|
||||
}
|
||||
|
||||
func (p *program) Start(s service.Service) error {
|
||||
srvc = s
|
||||
go p.run()
|
||||
return nil
|
||||
}
|
||||
func (p *program) run() {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, srvc)
|
||||
reloadLoop(
|
||||
stop,
|
||||
p.inputFilters,
|
||||
p.outputFilters,
|
||||
p.aggregatorFilters,
|
||||
p.processorFilters,
|
||||
)
|
||||
}
|
||||
func (p *program) Stop(s service.Service) error {
|
||||
close(stop)
|
||||
@@ -307,6 +257,91 @@ func (p *program) Stop(s service.Service) error {
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
inputFilters, outputFilters := []string{}, []string{}
|
||||
if *fInputFilters != "" {
|
||||
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilters = strings.Split(":"+strings.TrimSpace(*fOutputFilters)+":", ":")
|
||||
}
|
||||
|
||||
aggregatorFilters, processorFilters := []string{}, []string{}
|
||||
if *fAggregatorFilters != "" {
|
||||
aggregatorFilters = strings.Split(":"+strings.TrimSpace(*fAggregatorFilters)+":", ":")
|
||||
}
|
||||
if *fProcessorFilters != "" {
|
||||
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
|
||||
}
|
||||
|
||||
if *pprofAddr != "" {
|
||||
go func() {
|
||||
pprofHostPort := *pprofAddr
|
||||
parts := strings.Split(pprofHostPort, ":")
|
||||
if len(parts) == 2 && parts[0] == "" {
|
||||
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
|
||||
}
|
||||
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
|
||||
|
||||
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
|
||||
|
||||
if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// switch for flags which just do something and exit immediately
|
||||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
case *fUsage != "":
|
||||
err := config.PrintInputConfig(*fUsage)
|
||||
err2 := config.PrintOutputConfig(*fUsage)
|
||||
if err != nil && err2 != nil {
|
||||
log.Fatalf("E! %s and %s", err, err2)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
@@ -316,7 +351,12 @@ func main() {
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{}
|
||||
prg := &program{
|
||||
inputFilters: inputFilters,
|
||||
outputFilters: outputFilters,
|
||||
aggregatorFilters: aggregatorFilters,
|
||||
processorFilters: processorFilters,
|
||||
}
|
||||
s, err := service.New(prg, svcConfig)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
@@ -327,10 +367,14 @@ func main() {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
}
|
||||
if *fConfigDirectory != "" {
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
os.Exit(0)
|
||||
} else {
|
||||
err = s.Run()
|
||||
if err != nil {
|
||||
@@ -339,6 +383,12 @@ func main() {
|
||||
}
|
||||
} else {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, nil)
|
||||
reloadLoop(
|
||||
stop,
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
59
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
59
docs/AGGREGATORS_AND_PROCESSORS.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Telegraf Aggregator & Processor Plugins
|
||||
|
||||
As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugins.
|
||||
|
||||
These plugins sit in-between Input & Output plugins, aggregating and processing
|
||||
metrics as they pass through Telegraf:
|
||||
|
||||
```
|
||||
┌───────────┐
|
||||
│ │
|
||||
│ CPU │───┐
|
||||
│ │ │
|
||||
└───────────┘ │
|
||||
│
|
||||
┌───────────┐ │ ┌───────────┐
|
||||
│ │ │ │ │
|
||||
│ Memory │───┤ ┌──▶│ InfluxDB │
|
||||
│ │ │ │ │ │
|
||||
└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘
|
||||
│ │ │ │Aggregate │ │
|
||||
┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐
|
||||
│ │ │ │ - transform │ │ - quantiles │ │ │ │
|
||||
│ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │
|
||||
│ │ │ │ - filter │ │ - count │ │ │ │
|
||||
└───────────┘ │ │ │ │ │ │ └───────────┘
|
||||
│ └─────────────┘ └─────────────┘ │
|
||||
┌───────────┐ │ │ ┌───────────┐
|
||||
│ │ │ │ │ │
|
||||
│ SNMP │───┤ └──▶│ Kafka │
|
||||
│ │ │ │ │
|
||||
└───────────┘ │ └───────────┘
|
||||
│
|
||||
┌───────────┐ │
|
||||
│ │ │
|
||||
│ Docker │───┘
|
||||
│ │
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
|
||||
|
||||
**Processor** plugins process metrics as they pass through and immediately emit
|
||||
results based on the values they process. For example, this could be printing
|
||||
all metrics or adding a tag to all metrics that pass through.
|
||||
|
||||
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
|
||||
are typically for emitting new _aggregate_ metrics, such as a running mean,
|
||||
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
|
||||
plugins are configured with a `period`. The `period` is the size of the window
|
||||
of metrics that each _aggregate_ represents. In other words, the emitted
|
||||
_aggregate_ metric will be the aggregated value of the past `period` seconds.
|
||||
Since many users will only care about their aggregates and not every single metric
|
||||
gathered, there is also a `drop_original` argument, which tells Telegraf to only
|
||||
emit the aggregates and not the original metrics.
|
||||
|
||||
**NOTE** That since aggregators only aggregate metrics within their period, that
|
||||
historical data is not supported. In other words, if your metric timestamp is more
|
||||
than `now() - period` in the past, it will not be aggregated. If this is a feature
|
||||
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)
|
||||
@@ -24,6 +24,16 @@ Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
## Configuration file locations
|
||||
|
||||
The location of the configuration file can be set via the `--config` command
|
||||
line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
|
||||
the `-config-directory` command line flag is used.
|
||||
|
||||
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
|
||||
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
|
||||
configuration files.
|
||||
|
||||
# Global Tags
|
||||
|
||||
Global tags can be specified in the `[global_tags]` section of the config file
|
||||
@@ -60,7 +70,7 @@ ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s
|
||||
as the collection interval, with the maximum being 1s. Precision will NOT
|
||||
be used for service inputs, such as logparser and statsd. Valid values are
|
||||
"ns", "us" (or "µs"), "ms", "s".
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stdout.
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stderr.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
@@ -114,31 +124,40 @@ is not specified then processor execution order will be random.
|
||||
Filters can be configured per input, output, processor, or aggregator,
|
||||
see below for examples.
|
||||
|
||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against
|
||||
measurement names and if it matches, the field is emitted.
|
||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
fielddrop is not available for outputs.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
||||
As opposed to tagdrop, which will drop an entire measurement based on it's
|
||||
tags, tagexclude simply strips the given tag keys from the measurement. This
|
||||
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
||||
as it is more efficient to filter out tags at the ingestion point.
|
||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||
the tag keys in the final measurement.
|
||||
* **namepass**:
|
||||
An array of glob pattern strings. Only points whose measurement name matches
|
||||
a pattern in this list are emitted.
|
||||
* **namedrop**:
|
||||
The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. Not available for outputs.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
patterns is emitted.
|
||||
* **tagdrop**:
|
||||
The inverse of `tagpass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `tagpass` test.
|
||||
* **taginclude**:
|
||||
An array of glob pattern strings. Only tags with a tag key matching one of
|
||||
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
|
||||
point based on its tag, `taginclude` removes all non matching tags from the
|
||||
point. This filter can be used on both inputs & outputs, but it is
|
||||
_recommended_ to be used on inputs, as it is more efficient to filter out tags
|
||||
at the ingestion point.
|
||||
* **tagexclude**:
|
||||
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
|
||||
will be discarded from the point.
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
|
||||
must be defined at the _end_ of the plugin definition, otherwise subsequent
|
||||
plugin config options will be interpreted as part of the tagpass/tagdrop
|
||||
tables.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
@@ -351,4 +370,4 @@ to the system load metrics due to the `namepass` parameter.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
```
|
||||
|
||||
@@ -7,6 +7,7 @@ Telegraf is able to parse the following input data formats into metrics:
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
@@ -40,7 +41,7 @@ example, in the exec plugin:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -67,7 +68,7 @@ metrics are parsed directly into Telegraf metrics.
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -117,7 +118,7 @@ For example, if you had this configuration:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -147,6 +148,62 @@ Your Telegraf metrics would get tagged with "my_tag_1"
|
||||
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||
```
|
||||
|
||||
If the JSON data is an array, then each element of the array is parsed with the configured settings.
|
||||
Each resulting metric will be output with the same timestamp.
|
||||
|
||||
For example, if the following configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
tag_keys = [
|
||||
"my_tag_1",
|
||||
"my_tag_2"
|
||||
]
|
||||
```
|
||||
|
||||
with this JSON output from a command:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"my_tag_1": "foo",
|
||||
"my_tag_2": "baz"
|
||||
},
|
||||
{
|
||||
"a": 7,
|
||||
"b": {
|
||||
"c": 8
|
||||
},
|
||||
"my_tag_1": "bar",
|
||||
"my_tag_2": "baz"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
|
||||
|
||||
```
|
||||
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
|
||||
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
|
||||
```
|
||||
|
||||
# Value:
|
||||
|
||||
The "value" data format translates single values into Telegraf metrics. This
|
||||
@@ -176,7 +233,7 @@ name of the plugin.
|
||||
name_override = "entropy_available"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "value"
|
||||
@@ -334,7 +391,7 @@ There are many more options available,
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "graphite"
|
||||
@@ -371,14 +428,54 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
|
||||
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "nagios"
|
||||
```
|
||||
|
||||
# Collectd:
|
||||
|
||||
The collectd format parses the collectd binary network protocol. Tags are
|
||||
created for host, instance, type, and type instance. All collectd values are
|
||||
added as float64 fields.
|
||||
|
||||
For more information about the binary network protocol see
|
||||
[here](https://collectd.org/wiki/index.php/Binary_protocol).
|
||||
|
||||
You can control the cryptographic settings with parser options. Create an
|
||||
authentication file and set `collectd_auth_file` to the path of the file, then
|
||||
set the desired security level in `collectd_security_level`.
|
||||
|
||||
Additional information including client setup can be found
|
||||
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
|
||||
|
||||
You can also change the path to the typesdb or add additional typesdb using
|
||||
`collectd_typesdb`.
|
||||
|
||||
#### Collectd Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.socket_listener]]
|
||||
service_address = "udp://127.0.0.1:25826"
|
||||
name_prefix = "collectd_"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "collectd"
|
||||
|
||||
## Authentication file for cryptographic security levels
|
||||
collectd_auth_file = "/etc/collectd/auth_file"
|
||||
## One of none (default), sign, or encrypt
|
||||
collectd_security_level = "encrypt"
|
||||
## Path of to TypesDB specifications
|
||||
collectd_typesdb = ["/usr/share/collectd/types.db"]
|
||||
```
|
||||
|
||||
@@ -36,7 +36,7 @@ config option, for example, in the `file` output plugin:
|
||||
files = ["stdout"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
@@ -60,7 +60,7 @@ metrics are serialized directly into InfluxDB line-protocol.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
@@ -104,7 +104,7 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "graphite"
|
||||
@@ -143,8 +143,18 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
json_timestamp_units = "1ns"
|
||||
```
|
||||
|
||||
By default, the timestamp that is output in JSON data format serialized Telegraf
|
||||
metrics is in seconds. The precision of this timestamp can be adjusted for any output
|
||||
by adding the optional `json_timestamp_units` parameter to the configuration for
|
||||
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
|
||||
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
|
||||
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
|
||||
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
|
||||
output in hundredths of a second (`10ms`).
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# List
|
||||
- collectd.org [MIT LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
@@ -12,11 +13,11 @@
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
@@ -30,4 +31,3 @@
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
|
||||
|
||||
24
docs/PROFILING.md
Normal file
24
docs/PROFILING.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Telegraf profiling
|
||||
|
||||
Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
|
||||
|
||||
By default, the profiling is turned off.
|
||||
|
||||
To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
```
|
||||
|
||||
There are several paths to get different profiling information:
|
||||
|
||||
To look at the heap profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/heap`
|
||||
|
||||
or to look at a 30-second CPU profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
|
||||
|
||||
To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -105,10 +105,11 @@
|
||||
"% Privileged Time",
|
||||
"% User Time",
|
||||
"% Processor Time",
|
||||
"% DPC Time",
|
||||
]
|
||||
Measurement = "win_cpu"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
IncludeTotal=true
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
@@ -116,20 +117,54 @@
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Disk Time","% Disk Read Time",
|
||||
"% Disk Time",
|
||||
"% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
"% User Time",
|
||||
"Current Disk Queue Length",
|
||||
"% Free Space",
|
||||
"Free Megabytes",
|
||||
]
|
||||
Measurement = "win_disk"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "PhysicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Disk Read Bytes/sec",
|
||||
"Disk Write Bytes/sec",
|
||||
"Current Disk Queue Length",
|
||||
"Disk Reads/sec",
|
||||
"Disk Writes/sec",
|
||||
"% Disk Time",
|
||||
"% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
]
|
||||
Measurement = "win_diskio"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "Network Interface"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Bytes Received/sec",
|
||||
"Bytes Sent/sec",
|
||||
"Packets Received/sec",
|
||||
"Packets Sent/sec",
|
||||
"Packets Received Discarded",
|
||||
"Packets Outbound Discarded",
|
||||
"Packets Received Errors",
|
||||
"Packets Outbound Errors",
|
||||
]
|
||||
Measurement = "win_net"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = [
|
||||
"Context Switches/sec",
|
||||
"System Calls/sec",
|
||||
"Processor Queue Length",
|
||||
"System Up Time",
|
||||
]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
@@ -149,6 +184,10 @@
|
||||
"Transition Faults/sec",
|
||||
"Pool Nonpaged Bytes",
|
||||
"Pool Paged Bytes",
|
||||
"Standby Cache Reserve Bytes",
|
||||
"Standby Cache Normal Priority Bytes",
|
||||
"Standby Cache Core Bytes",
|
||||
|
||||
]
|
||||
# Use 6 x - to remove the Instance bit from the query.
|
||||
Instances = ["------"]
|
||||
@@ -156,6 +195,31 @@
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back,
|
||||
# such as from the Paging File object.
|
||||
ObjectName = "Paging File"
|
||||
Counters = [
|
||||
"% Usage",
|
||||
]
|
||||
Instances = ["_Total"]
|
||||
Measurement = "win_swap"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "Network Interface"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Bytes Sent/sec",
|
||||
"Bytes Received/sec",
|
||||
"Packets Sent/sec",
|
||||
"Packets Received/sec",
|
||||
"Packets Received Discarded",
|
||||
"Packets Received Errors",
|
||||
"Packets Outbound Discarded",
|
||||
"Packets Outbound Errors",
|
||||
]
|
||||
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
@@ -4,15 +4,17 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var (
|
||||
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
|
||||
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
|
||||
)
|
||||
|
||||
// Buffer is an object for storing metrics in a circular buffer.
|
||||
type Buffer struct {
|
||||
buf chan telegraf.Metric
|
||||
// total dropped metrics
|
||||
drops int
|
||||
// total metrics added
|
||||
total int
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
@@ -36,27 +38,18 @@ func (b *Buffer) Len() int {
|
||||
return len(b.buf)
|
||||
}
|
||||
|
||||
// Drops returns the total number of dropped metrics that have occured in this
|
||||
// buffer since instantiation.
|
||||
func (b *Buffer) Drops() int {
|
||||
return b.drops
|
||||
}
|
||||
|
||||
// Total returns the total number of metrics that have been added to this buffer.
|
||||
func (b *Buffer) Total() int {
|
||||
return b.total
|
||||
}
|
||||
|
||||
// Add adds metrics to the buffer.
|
||||
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
for i, _ := range metrics {
|
||||
b.total++
|
||||
MetricsWritten.Incr(1)
|
||||
select {
|
||||
case b.buf <- metrics[i]:
|
||||
default:
|
||||
b.drops++
|
||||
b.mu.Lock()
|
||||
MetricsDropped.Incr(1)
|
||||
<-b.buf
|
||||
b.buf <- metrics[i]
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,47 +27,53 @@ func BenchmarkAddMetrics(b *testing.B) {
|
||||
|
||||
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, b.Drops())
|
||||
assert.Zero(t, b.Total())
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Zero(t, MetricsWritten.Get())
|
||||
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
b.Add(m)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 1)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 1)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(1), MetricsWritten.Get())
|
||||
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 6)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 6)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(6), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestDroppingMetrics(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Add up to the size of the buffer
|
||||
b.Add(metricList...)
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 10)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
|
||||
// Add 5 more and verify they were dropped
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, b.Drops(), 5)
|
||||
assert.Equal(t, b.Total(), 15)
|
||||
assert.Equal(t, int64(5), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(15), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
func TestGettingBatches(t *testing.T) {
|
||||
b := NewBuffer(20)
|
||||
MetricsDropped.Set(0)
|
||||
MetricsWritten.Set(0)
|
||||
|
||||
// Verify that the buffer returned is smaller than requested when there are
|
||||
// not as many items as requested.
|
||||
@@ -78,8 +84,8 @@ func TestGettingBatches(t *testing.T) {
|
||||
// Verify that the buffer is now empty
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, b.Drops())
|
||||
assert.Equal(t, b.Total(), 5)
|
||||
assert.Zero(t, MetricsDropped.Get())
|
||||
assert.Equal(t, int64(5), MetricsWritten.Get())
|
||||
|
||||
// Verify that the buffer returned is not more than the size requested
|
||||
b.Add(metricList...)
|
||||
@@ -89,6 +95,6 @@ func TestGettingBatches(t *testing.T) {
|
||||
// Verify that buffer is not empty
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 2)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 10)
|
||||
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -25,7 +26,6 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
"github.com/influxdata/toml"
|
||||
"github.com/influxdata/toml/ast"
|
||||
)
|
||||
@@ -85,8 +85,8 @@ type AgentConfig struct {
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// By default, precision will be set to the same timestamp order as the
|
||||
// collection interval, with the maximum being 1s.
|
||||
// By default or when set to "0s", precision will be set to the same
|
||||
// timestamp order as the collection interval, with the maximum being 1s.
|
||||
// ie, when interval = "10s", precision will be "1s"
|
||||
// when interval = "250ms", precision will be "1ms"
|
||||
// Precision will NOT be used for service inputs. It is up to each individual
|
||||
@@ -230,10 +230,13 @@ var header = `# Telegraf Configuration
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## By default, precision will be set to the same timestamp order as the
|
||||
## collection interval, with the maximum being 1s.
|
||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||
## By default or when set to "0s", precision will be set to the same
|
||||
## timestamp order as the collection interval, with the maximum being 1s.
|
||||
## ie, when interval = "10s", precision will be "1s"
|
||||
## when interval = "250ms", precision will be "1ms"
|
||||
## Precision will NOT be used for service inputs. It is up to each individual
|
||||
## service input to set the timestamp at the appropriate precision.
|
||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
|
||||
## Logging configuration:
|
||||
@@ -506,6 +509,10 @@ func PrintOutputConfig(name string) error {
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
walkfn := func(thispath string, info os.FileInfo, _ error) error {
|
||||
if info == nil {
|
||||
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
@@ -566,7 +573,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("E! Could not parse [global_tags] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
@@ -579,7 +586,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("E! Could not parse [agent] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
@@ -716,7 +723,7 @@ func (c *Config) addAggregator(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, aggregator); err != nil {
|
||||
if err := toml.UnmarshalTable(table, aggregator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -736,7 +743,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, processor); err != nil {
|
||||
if err := toml.UnmarshalTable(table, processor); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -776,7 +783,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, output); err != nil {
|
||||
if err := toml.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -817,14 +824,11 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, input); err != nil {
|
||||
if err := toml.UnmarshalTable(table, input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &models.RunningInput{
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
}
|
||||
rp := models.NewRunningInput(input, pluginConfig)
|
||||
c.Inputs = append(c.Inputs, rp)
|
||||
return nil
|
||||
}
|
||||
@@ -912,7 +916,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
|
||||
conf.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
||||
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
@@ -1149,7 +1153,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("E! Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
@@ -1229,6 +1233,34 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.CollectdAuthFile = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_security_level"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.CollectdSecurityLevel = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.MetricName = name
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
@@ -1236,6 +1268,9 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
delete(tbl.Fields, "templates")
|
||||
delete(tbl.Fields, "tag_keys")
|
||||
delete(tbl.Fields, "data_type")
|
||||
delete(tbl.Fields, "collectd_auth_file")
|
||||
delete(tbl.Fields, "collectd_security_level")
|
||||
delete(tbl.Fields, "collectd_typesdb")
|
||||
|
||||
return parsers.NewParser(c)
|
||||
}
|
||||
@@ -1244,7 +1279,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
// a serializers.Serializer object, and creates it, which can then be added onto
|
||||
// an Output object.
|
||||
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
|
||||
c := &serializers.Config{}
|
||||
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
|
||||
|
||||
if node, ok := tbl.Fields["data_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
@@ -1274,9 +1309,26 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
timestampVal, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
|
||||
}
|
||||
// now that we have a duration, truncate it to the nearest
|
||||
// power of ten (just in case)
|
||||
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
|
||||
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
|
||||
c.TimestampUnits = time.Duration(new_nanoseconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
delete(tbl.Fields, "json_timestamp_units")
|
||||
return serializers.NewSerializer(c)
|
||||
}
|
||||
|
||||
|
||||
2
internal/config/testdata/telegraf-agent.toml
vendored
2
internal/config/testdata/telegraf-agent.toml
vendored
@@ -60,7 +60,7 @@
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
# ie, if this tag exists, its value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
package errchan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrChan struct {
|
||||
C chan error
|
||||
}
|
||||
|
||||
// New returns an error channel of max length 'n'
|
||||
// errors can be sent to the ErrChan.C channel, and will be returned when
|
||||
// ErrChan.Error() is called.
|
||||
func New(n int) *ErrChan {
|
||||
return &ErrChan{
|
||||
C: make(chan error, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Error closes the ErrChan.C channel and returns an error if there are any
|
||||
// non-nil errors, otherwise returns nil.
|
||||
func (e *ErrChan) Error() error {
|
||||
close(e.C)
|
||||
|
||||
var out string
|
||||
for err := range e.C {
|
||||
if err != nil {
|
||||
out += "[" + err.Error() + "], "
|
||||
}
|
||||
}
|
||||
|
||||
if out != "" {
|
||||
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func TestCompileAndMatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 3)
|
||||
assert.Len(t, matches, 6)
|
||||
matches = g2.Match()
|
||||
assert.Len(t, matches, 2)
|
||||
matches = g3.Match()
|
||||
@@ -56,6 +56,16 @@ func TestFindRootDir(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindNestedTextFile(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
// test super asterisk
|
||||
g1, err := Compile(dir + "/**.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 1)
|
||||
}
|
||||
|
||||
func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||
|
||||
0
internal/globpath/testdata/nested1/nested2/nested.txt
vendored
Normal file
0
internal/globpath/testdata/nested1/nested2/nested.txt
vendored
Normal file
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
// makemetric is used by both RunningAggregator & RunningInput
|
||||
@@ -31,7 +32,6 @@ func makemetric(
|
||||
daemonTags map[string]string,
|
||||
filter Filter,
|
||||
applyFilter bool,
|
||||
debug bool,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
@@ -122,11 +122,9 @@ func makemetric(
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
@@ -135,16 +133,7 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
var m telegraf.Metric
|
||||
var err error
|
||||
switch mType {
|
||||
case telegraf.Counter:
|
||||
m, err = telegraf.NewCounterMetric(measurement, tags, fields, t)
|
||||
case telegraf.Gauge:
|
||||
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, t)
|
||||
default:
|
||||
m, err = telegraf.NewMetric(measurement, tags, fields, t)
|
||||
}
|
||||
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return nil
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
type RunningAggregator struct {
|
||||
@@ -65,12 +66,13 @@ func (r *RunningAggregator) MakeMetric(
|
||||
nil,
|
||||
r.Config.Filter,
|
||||
false,
|
||||
false,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
m.SetAggregate(true)
|
||||
if m != nil {
|
||||
m.SetAggregate(true)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
@@ -90,7 +92,7 @@ func (r *RunningAggregator) Add(in telegraf.Metric) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
in, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
in, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
r.metrics <- in
|
||||
|
||||
@@ -184,8 +184,8 @@ func TestMakeMetricA(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -202,8 +202,8 @@ func TestMakeMetricA(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -220,8 +220,8 @@ func TestMakeMetricA(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
|
||||
@@ -5,15 +5,34 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
|
||||
|
||||
type RunningInput struct {
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
|
||||
trace bool
|
||||
debug bool
|
||||
defaultTags map[string]string
|
||||
|
||||
MetricsGathered selfstat.Stat
|
||||
}
|
||||
|
||||
func NewRunningInput(
|
||||
input telegraf.Input,
|
||||
config *InputConfig,
|
||||
) *RunningInput {
|
||||
return &RunningInput{
|
||||
Input: input,
|
||||
Config: config,
|
||||
MetricsGathered: selfstat.Register(
|
||||
"gather",
|
||||
"metrics_gathered",
|
||||
map[string]string{"input": config.Name},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
@@ -51,26 +70,19 @@ func (r *RunningInput) MakeMetric(
|
||||
r.defaultTags,
|
||||
r.Config.Filter,
|
||||
true,
|
||||
r.debug,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Println("> " + m.String())
|
||||
fmt.Print("> " + m.String())
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
GlobalMetricsGathered.Incr(1)
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *RunningInput) Debug() bool {
|
||||
return r.debug
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetDebug(debug bool) {
|
||||
r.debug = debug
|
||||
}
|
||||
|
||||
func (r *RunningInput) Trace() bool {
|
||||
return r.trace
|
||||
}
|
||||
|
||||
@@ -13,11 +13,9 @@ import (
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -32,11 +30,9 @@ func TestMakeMetricNoFields(t *testing.T) {
|
||||
// nil fields should get dropped
|
||||
func TestMakeMetricNilFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -50,7 +46,7 @@ func TestMakeMetricNilFields(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
@@ -58,13 +54,10 @@ func TestMakeMetricNilFields(t *testing.T) {
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
@@ -78,8 +71,8 @@ func TestMakeMetric(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -96,8 +89,8 @@ func TestMakeMetric(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -114,8 +107,8 @@ func TestMakeMetric(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -126,16 +119,13 @@ func TestMakeMetric(t *testing.T) {
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -148,24 +138,21 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Filter: Filter{NamePass: []string{"foobar"}},
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
Filter: Filter{NamePass: []string{"foobar"}},
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.NoError(t, ri.Config.Filter.Compile())
|
||||
@@ -182,16 +169,13 @@ func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
|
||||
func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
ri.SetDefaultTags(map[string]string{
|
||||
"foo": "bar",
|
||||
})
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -204,8 +188,8 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -214,13 +198,10 @@ func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -237,20 +218,17 @@ func TestMakeMetricInfFields(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
@@ -275,21 +253,28 @@ func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest a=10i,b=10i,c=10i,d=10i,e=10i,f=10i,g=10i,h=10i,i=10i,j=10,k=9223372036854775807i,l=\"foobar\",m=true %d", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
assert.Contains(t, m.String(), "a=10i")
|
||||
assert.Contains(t, m.String(), "b=10i")
|
||||
assert.Contains(t, m.String(), "c=10i")
|
||||
assert.Contains(t, m.String(), "d=10i")
|
||||
assert.Contains(t, m.String(), "e=10i")
|
||||
assert.Contains(t, m.String(), "f=10i")
|
||||
assert.Contains(t, m.String(), "g=10i")
|
||||
assert.Contains(t, m.String(), "h=10i")
|
||||
assert.Contains(t, m.String(), "i=10i")
|
||||
assert.Contains(t, m.String(), "j=10")
|
||||
assert.NotContains(t, m.String(), "j=10i")
|
||||
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||
assert.Contains(t, m.String(), "m=true")
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
NameOverride: "foobar",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
NameOverride: "foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -300,19 +285,17 @@ func TestMakeMetricNameOverride(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("foobar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementPrefix: "foobar_",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementPrefix: "foobar_",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -323,19 +306,17 @@ func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("foobar_RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementSuffix: "_foobar",
|
||||
},
|
||||
}
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementSuffix: "_foobar",
|
||||
})
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
@@ -346,7 +327,13 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest_foobar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
func (t *testInput) Description() string { return "" }
|
||||
func (t *testInput) SampleConfig() string { return "" }
|
||||
func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil }
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/buffer"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/selfstat"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,10 +23,15 @@ type RunningOutput struct {
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
Quiet bool
|
||||
MetricBufferLimit int
|
||||
MetricBatchSize int
|
||||
|
||||
MetricsFiltered selfstat.Stat
|
||||
MetricsWritten selfstat.Stat
|
||||
BufferSize selfstat.Stat
|
||||
BufferLimit selfstat.Stat
|
||||
WriteTime selfstat.Stat
|
||||
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
}
|
||||
@@ -50,29 +57,59 @@ func NewRunningOutput(
|
||||
Config: conf,
|
||||
MetricBufferLimit: bufferLimit,
|
||||
MetricBatchSize: batchSize,
|
||||
MetricsWritten: selfstat.Register(
|
||||
"write",
|
||||
"metrics_written",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
MetricsFiltered: selfstat.Register(
|
||||
"write",
|
||||
"metrics_filtered",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferSize: selfstat.Register(
|
||||
"write",
|
||||
"buffer_size",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
BufferLimit: selfstat.Register(
|
||||
"write",
|
||||
"buffer_limit",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
WriteTime: selfstat.RegisterTiming(
|
||||
"write",
|
||||
"write_time_ns",
|
||||
map[string]string{"output": name},
|
||||
),
|
||||
}
|
||||
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||
return ro
|
||||
}
|
||||
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if ro.Config.Filter.IsActive() {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
// metrics are immutable once created.
|
||||
name := metric.Name()
|
||||
tags := metric.Tags()
|
||||
fields := metric.Fields()
|
||||
t := metric.Time()
|
||||
name := m.Name()
|
||||
tags := m.Tags()
|
||||
fields := m.Fields()
|
||||
t := m.Time()
|
||||
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
ro.MetricsFiltered.Incr(1)
|
||||
return
|
||||
}
|
||||
// error is not possible if creating from another metric, so ignore.
|
||||
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
m, _ = metric.New(name, tags, fields, t)
|
||||
}
|
||||
|
||||
ro.metrics.Add(metric)
|
||||
ro.metrics.Add(m)
|
||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
err := ro.write(batch)
|
||||
@@ -84,28 +121,21 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if !ro.Quiet {
|
||||
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
|
||||
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||
ro.Name,
|
||||
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||
ro.MetricBufferLimit,
|
||||
ro.metrics.Total(),
|
||||
ro.metrics.Drops()+ro.failMetrics.Drops())
|
||||
}
|
||||
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
ro.BufferSize.Set(int64(nFails + nMetrics))
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
|
||||
var err error
|
||||
if !ro.failMetrics.IsEmpty() {
|
||||
bufLen := ro.failMetrics.Len()
|
||||
// how many batches of failed writes we need to write.
|
||||
nBatches := bufLen/ro.MetricBatchSize + 1
|
||||
nBatches := nFails/ro.MetricBatchSize + 1
|
||||
batchSize := ro.MetricBatchSize
|
||||
|
||||
for i := 0; i < nBatches; i++ {
|
||||
// If it's the last batch, only grab the metrics that have not had
|
||||
// a write attempt already (this is primarily to preserve order).
|
||||
if i == nBatches-1 {
|
||||
batchSize = bufLen % ro.MetricBatchSize
|
||||
batchSize = nFails % ro.MetricBatchSize
|
||||
}
|
||||
batch := ro.failMetrics.Batch(batchSize)
|
||||
// If we've already failed previous writes, don't bother trying to
|
||||
@@ -126,6 +156,7 @@ func (ro *RunningOutput) Write() error {
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
return err
|
||||
@@ -134,17 +165,18 @@ func (ro *RunningOutput) Write() error {
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if metrics == nil || len(metrics) == 0 {
|
||||
nMetrics := len(metrics)
|
||||
if nMetrics == 0 {
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, len(metrics), elapsed)
|
||||
}
|
||||
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, nMetrics, elapsed)
|
||||
ro.MetricsWritten.Incr(int64(nMetrics))
|
||||
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -36,10 +36,9 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
@@ -52,10 +51,9 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
if n%100 == 0 {
|
||||
ro.Write()
|
||||
}
|
||||
@@ -71,13 +69,29 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
m := &perfOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddingNilMetric(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(nil)
|
||||
ro.AddMetric(nil)
|
||||
ro.AddMetric(nil)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
}
|
||||
|
||||
// Test that NameDrop filters ger properly applied.
|
||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
@@ -140,7 +154,7 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
@@ -161,7 +175,7 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
@@ -182,7 +196,7 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
@@ -203,7 +217,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
|
||||
@@ -4,10 +4,14 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
|
||||
var prefixRegex = regexp.MustCompile("^[DIWE]!")
|
||||
|
||||
// newTelegrafWriter returns a logging-wrapped writer.
|
||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||
return &telegrafLog{
|
||||
@@ -19,8 +23,14 @@ type telegrafLog struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
||||
return t.writer.Write(p)
|
||||
func (t *telegrafLog) Write(b []byte) (n int, err error) {
|
||||
var line []byte
|
||||
if !prefixRegex.Match(b) {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
|
||||
} else {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
|
||||
}
|
||||
return t.writer.Write(line)
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
@@ -30,6 +40,7 @@ func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
||||
// interpreted as stderr. If there is an error opening the file the
|
||||
// logger will fallback to stderr.
|
||||
func SetupLogging(debug, quiet bool, logfile string) {
|
||||
log.SetFlags(0)
|
||||
if debug {
|
||||
wlog.SetLevel(wlog.DEBUG)
|
||||
}
|
||||
|
||||
75
logger/logger_test.go
Normal file
75
logger/logger_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, false, tmpfile.Name())
|
||||
log.Printf("I! TEST")
|
||||
log.Printf("D! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func TestDebugWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("D! TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z D! TEST\n"))
|
||||
}
|
||||
|
||||
func TestErrorWriteLogToFile(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(false, true, tmpfile.Name())
|
||||
log.Printf("E! TEST")
|
||||
log.Printf("I! TEST") // <- should be ignored
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
|
||||
}
|
||||
|
||||
func TestAddDefaultLogLevel(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func BenchmarkTelegrafLogWrite(b *testing.B) {
|
||||
var msg = []byte("test")
|
||||
var buf bytes.Buffer
|
||||
w := newTelegrafWriter(&buf)
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
w.Write(msg)
|
||||
}
|
||||
}
|
||||
185
metric.go
185
metric.go
@@ -2,9 +2,6 @@ package telegraf
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
@@ -19,159 +16,47 @@ const (
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
// Serialize serializes the metric into a line-protocol byte buffer,
|
||||
// including a newline at the end.
|
||||
Serialize() []byte
|
||||
// same as Serialize, but avoids an allocation.
|
||||
// returns number of bytes copied into dst.
|
||||
SerializeTo(dst []byte) int
|
||||
// String is the same as Serialize, but returns a string.
|
||||
String() string
|
||||
// Copy deep-copies the metric.
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
// Metrics with a single field may exceed the requested size.
|
||||
Split(maxSize int) []Metric
|
||||
|
||||
// Tag functions
|
||||
HasTag(key string) bool
|
||||
AddTag(key, value string)
|
||||
RemoveTag(key string)
|
||||
|
||||
// Field functions
|
||||
HasField(key string) bool
|
||||
AddField(key string, value interface{})
|
||||
RemoveField(key string) error
|
||||
|
||||
// Name functions
|
||||
SetName(name string)
|
||||
SetPrefix(prefix string)
|
||||
SetSuffix(suffix string)
|
||||
|
||||
// Getting data structure functions
|
||||
Name() string
|
||||
|
||||
// Name returns the tags associated with the metric
|
||||
Tags() map[string]string
|
||||
|
||||
// Time return the timestamp for the metric
|
||||
Fields() map[string]interface{}
|
||||
Time() time.Time
|
||||
|
||||
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
|
||||
Type() ValueType
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
// HashID returns a non-cryptographic hash of the metric (name + tags)
|
||||
// NOTE: do not persist & depend on this value to disk.
|
||||
Type() ValueType
|
||||
Len() int // returns the length of the serialized metric, including newline
|
||||
HashID() uint64
|
||||
|
||||
// Fields returns the fields for the metric
|
||||
Fields() map[string]interface{}
|
||||
|
||||
// String returns a line-protocol string of the metric
|
||||
String() string
|
||||
|
||||
// PrecisionString returns a line-protocol string of the metric, at precision
|
||||
PrecisionString(precison string) string
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
Point() *client.Point
|
||||
|
||||
// SetAggregate sets the metric's aggregate status
|
||||
// This is so that aggregate metrics don't get re-sent to aggregator plugins
|
||||
// aggregator things:
|
||||
SetAggregate(bool)
|
||||
// IsAggregate returns true if the metric is an aggregate
|
||||
IsAggregate() bool
|
||||
}
|
||||
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt models.Point
|
||||
|
||||
mType ValueType
|
||||
|
||||
isaggregate bool
|
||||
}
|
||||
|
||||
func NewMetricFromPoint(pt models.Point) Metric {
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Untyped,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMetric returns an untyped metric.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Untyped,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewGaugeMetric returns a gauge metric.
|
||||
// Gauge metrics should be used when the metric is can arbitrarily go up and
|
||||
// down. ie, temperature, memory usage, cpu usage, etc.
|
||||
func NewGaugeMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Gauge,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCounterMetric returns a Counter metric.
|
||||
// Counter metrics should be used when the metric being created is an
|
||||
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
|
||||
func NewCounterMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Counter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.pt.Name()
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
return m.pt.Tags().Map()
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) Type() ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
return m.pt.HashID()
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
return m.pt.Fields()
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return m.pt.String()
|
||||
}
|
||||
|
||||
func (m *metric) PrecisionString(precison string) string {
|
||||
return m.pt.PrecisionString(precison)
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
return client.NewPointFrom(m.pt)
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.isaggregate
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.isaggregate = b
|
||||
}
|
||||
|
||||
49
metric/escape.go
Normal file
49
metric/escape.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// escaper is for escaping:
|
||||
// - tag keys
|
||||
// - tag values
|
||||
// - field keys
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
|
||||
// nameEscaper is for escaping measurement names only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
|
||||
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return escaper.Replace(s)
|
||||
case "name":
|
||||
return nameEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func unescape(s string, t string) string {
|
||||
switch t {
|
||||
case "fieldkey", "tagkey", "tagval":
|
||||
return unEscaper.Replace(s)
|
||||
case "name":
|
||||
return nameUnEscaper.Replace(s)
|
||||
case "fieldval":
|
||||
return stringFieldUnEscaper.Replace(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
38
metric/inline_strconv_parse.go
Normal file
38
metric/inline_strconv_parse.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
||||
103
metric/inline_strconv_parse_test.go
Normal file
103
metric/inline_strconv_parse_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, base int, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
|
||||
got, gotErr := parseIntBytes(b, base, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n int64) bool {
|
||||
buf = strconv.AppendInt(buf[:0], n, 10)
|
||||
|
||||
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
|
||||
got, gotErr := parseIntBytes(buf, 10, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
|
||||
f := func(b []byte, bitSize int) bool {
|
||||
exp, expErr := strconv.ParseFloat(string(b), bitSize)
|
||||
got, gotErr := parseFloatBytes(b, bitSize)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
|
||||
buf := []byte{}
|
||||
f := func(n float64) bool {
|
||||
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
|
||||
|
||||
exp, expErr := strconv.ParseFloat(string(buf), 64)
|
||||
got, gotErr := parseFloatBytes(buf, 64)
|
||||
|
||||
return exp == got && checkErrs(expErr, gotErr)
|
||||
}
|
||||
|
||||
cfg := &quick.Config{
|
||||
MaxCount: 10000,
|
||||
}
|
||||
|
||||
if err := quick.Check(f, cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBoolBytesEquivalence(t *testing.T) {
|
||||
var buf []byte
|
||||
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
|
||||
buf = append(buf[:0], s...)
|
||||
|
||||
exp, expErr := strconv.ParseBool(s)
|
||||
got, gotErr := parseBoolBytes(buf)
|
||||
|
||||
if got != exp || !checkErrs(expErr, gotErr) {
|
||||
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkErrs(a, b error) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a == nil || a.Error() == b.Error()
|
||||
}
|
||||
585
metric/metric.go
Normal file
585
metric/metric.go
Normal file
@@ -0,0 +1,585 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made without any fields")
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
if len(mType) > 0 {
|
||||
thisType = mType[0]
|
||||
} else {
|
||||
thisType = telegraf.Untyped
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
name: []byte(escape(name, "name")),
|
||||
t: []byte(fmt.Sprint(t.UnixNano())),
|
||||
nsec: t.UnixNano(),
|
||||
mType: thisType,
|
||||
}
|
||||
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
m.tags[i] = '='
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(v, "tagval"))
|
||||
}
|
||||
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
fieldlen += len(k) + 10
|
||||
}
|
||||
m.fields = make([]byte, 0, fieldlen)
|
||||
|
||||
i = 0
|
||||
for k, v := range fields {
|
||||
if i != 0 {
|
||||
m.fields = append(m.fields, ',')
|
||||
}
|
||||
m.fields = appendField(m.fields, k, v)
|
||||
i++
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Returns -1 if not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if countBackslashes(buf, keyi-1)%2 == 0 {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// countBackslashes counts the number of preceding backslashes starting at
|
||||
// the 'start' index.
|
||||
func countBackslashes(buf []byte, index int) int {
|
||||
var count int
|
||||
for {
|
||||
if index < 0 {
|
||||
return count
|
||||
}
|
||||
if buf[index] == '\\' {
|
||||
count++
|
||||
index--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
name []byte
|
||||
tags []byte
|
||||
fields []byte
|
||||
t []byte
|
||||
|
||||
mType telegraf.ValueType
|
||||
aggregate bool
|
||||
|
||||
// cached values for reuse in "get" functions
|
||||
hashID uint64
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.aggregate = b
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.aggregate
|
||||
}
|
||||
|
||||
func (m *metric) Type() telegraf.ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) Len() int {
|
||||
// 3 is for 2 spaces surrounding the fields array + newline at the end.
|
||||
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
|
||||
}
|
||||
|
||||
func (m *metric) Serialize() []byte {
|
||||
tmp := make([]byte, m.Len())
|
||||
i := 0
|
||||
i += copy(tmp[i:], m.name)
|
||||
i += copy(tmp[i:], m.tags)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.fields)
|
||||
tmp[i] = ' '
|
||||
i++
|
||||
i += copy(tmp[i:], m.t)
|
||||
tmp[i] = '\n'
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) SerializeTo(dst []byte) int {
|
||||
i := 0
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.name)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.tags)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.fields)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.t)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
dst[i] = '\n'
|
||||
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() < maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
|
||||
// constant number of bytes for each metric (in addition to field bytes)
|
||||
constant := len(m.name) + len(m.tags) + len(m.t) + 3
|
||||
// currently selected fields
|
||||
fields := make([]byte, 0, maxSize)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
// hit the end of the field byte slice
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// find the end of the next field
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j == -1 {
|
||||
j = len(m.fields)
|
||||
} else {
|
||||
j += i
|
||||
}
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant > maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
if len(fields) > 0 {
|
||||
out = append(out, copyWith(m.name, m.tags, fields, m.t))
|
||||
}
|
||||
|
||||
fields = make([]byte, 0, maxSize)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
fields = append(fields, ',')
|
||||
}
|
||||
fields = append(fields, m.fields[i:j]...)
|
||||
|
||||
i = j + 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
fieldMap := map[string]interface{}{}
|
||||
i := 0
|
||||
for {
|
||||
if i >= len(m.fields) {
|
||||
break
|
||||
}
|
||||
// end index of field key
|
||||
i1 := indexUnescapedByte(m.fields[i:], '=')
|
||||
if i1 == -1 {
|
||||
break
|
||||
}
|
||||
// start index of field value
|
||||
i2 := i1 + 1
|
||||
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
i3 += i2 + 2 // increment index to the comma
|
||||
} else {
|
||||
i3 = indexUnescapedByte(m.fields[i:], ',')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
}
|
||||
|
||||
switch m.fields[i:][i2] {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
// integer field
|
||||
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
default:
|
||||
// float field
|
||||
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
|
||||
if err == nil {
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
|
||||
} else {
|
||||
// TODO handle error or just ignore field silently?
|
||||
}
|
||||
}
|
||||
case 'T', 't':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
|
||||
case 'F', 'f':
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
|
||||
default:
|
||||
// TODO handle unsupported field type
|
||||
}
|
||||
|
||||
i += i3 + 1
|
||||
}
|
||||
|
||||
return fieldMap
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
tagMap := map[string]string{}
|
||||
if len(m.tags) == 0 {
|
||||
return tagMap
|
||||
}
|
||||
|
||||
i := 0
|
||||
for {
|
||||
// start index of tag key
|
||||
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
|
||||
if i0 == 0 {
|
||||
// didn't find a tag start
|
||||
break
|
||||
}
|
||||
// end index of tag key
|
||||
i1 := indexUnescapedByte(m.tags[i:], '=')
|
||||
// start index of tag value
|
||||
i2 := i1 + 1
|
||||
// end index of tag value (starting from i2)
|
||||
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
|
||||
if i3 == -1 {
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
|
||||
break
|
||||
}
|
||||
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
|
||||
// increment start index for the next tag
|
||||
i += i2 + i3
|
||||
}
|
||||
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return unescape(string(m.name), "name")
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return time.Unix(0, m.nsec)
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
// assume metric has been verified already and ignore error:
|
||||
if m.nsec == 0 {
|
||||
m.nsec, _ = parseIntBytes(m.t, 10, 64)
|
||||
}
|
||||
return m.nsec
|
||||
}
|
||||
|
||||
func (m *metric) SetName(name string) {
|
||||
m.hashID = 0
|
||||
m.name = []byte(nameEscaper.Replace(name))
|
||||
}
|
||||
|
||||
func (m *metric) SetPrefix(prefix string) {
|
||||
m.hashID = 0
|
||||
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
|
||||
}
|
||||
|
||||
func (m *metric) SetSuffix(suffix string) {
|
||||
m.hashID = 0
|
||||
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
|
||||
}
|
||||
|
||||
func (m *metric) AddTag(key, value string) {
|
||||
m.RemoveTag(key)
|
||||
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
|
||||
}
|
||||
|
||||
func (m *metric) HasTag(key string) bool {
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveTag(key string) {
|
||||
m.hashID = 0
|
||||
|
||||
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
tmp := m.tags[0 : i-1]
|
||||
j := indexUnescapedByte(m.tags[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.tags[i+j:]...)
|
||||
}
|
||||
m.tags = tmp
|
||||
return
|
||||
}
|
||||
|
||||
func (m *metric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, ',')
|
||||
m.fields = appendField(m.fields, key, value)
|
||||
}
|
||||
|
||||
func (m *metric) HasField(key string) bool {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *metric) RemoveField(key string) error {
|
||||
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
|
||||
if i == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp []byte
|
||||
if i != 0 {
|
||||
tmp = m.fields[0 : i-1]
|
||||
}
|
||||
j := indexUnescapedByte(m.fields[i:], ',')
|
||||
if j != -1 {
|
||||
tmp = append(tmp, m.fields[i+j:]...)
|
||||
}
|
||||
|
||||
if len(tmp) == 0 {
|
||||
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
|
||||
}
|
||||
|
||||
m.fields = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *metric) Copy() telegraf.Metric {
|
||||
return copyWith(m.name, m.tags, m.fields, m.t)
|
||||
}
|
||||
|
||||
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
|
||||
out := metric{
|
||||
name: make([]byte, len(name)),
|
||||
tags: make([]byte, len(tags)),
|
||||
fields: make([]byte, len(fields)),
|
||||
t: make([]byte, len(t)),
|
||||
}
|
||||
copy(out.name, name)
|
||||
copy(out.tags, tags)
|
||||
copy(out.fields, fields)
|
||||
copy(out.t, t)
|
||||
return &out
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
if m.hashID == 0 {
|
||||
h := fnv.New64a()
|
||||
h.Write(m.name)
|
||||
|
||||
tags := m.Tags()
|
||||
tmp := make([]string, len(tags))
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
tmp[i] = k + v
|
||||
i++
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
|
||||
for _, s := range tmp {
|
||||
h.Write([]byte(s))
|
||||
}
|
||||
|
||||
m.hashID = h.Sum64()
|
||||
}
|
||||
return m.hashID
|
||||
}
|
||||
|
||||
func appendField(b []byte, k string, v interface{}) []byte {
|
||||
if v == nil {
|
||||
return b
|
||||
}
|
||||
b = append(b, []byte(escape(k, "tagkey")+"=")...)
|
||||
|
||||
// check popular types first
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
b = strconv.AppendFloat(b, v, 'f', -1, 64)
|
||||
case int64:
|
||||
b = strconv.AppendInt(b, v, 10)
|
||||
b = append(b, 'i')
|
||||
case string:
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(v, "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
case bool:
|
||||
b = strconv.AppendBool(b, v)
|
||||
case int32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case int:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint64:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint64(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case uint32:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint16:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint8:
|
||||
b = strconv.AppendInt(b, int64(v), 10)
|
||||
b = append(b, 'i')
|
||||
case uint:
|
||||
// Cap uints above the maximum int value
|
||||
var intv int64
|
||||
if v <= uint(MaxInt) {
|
||||
intv = int64(v)
|
||||
} else {
|
||||
intv = int64(MaxInt)
|
||||
}
|
||||
b = strconv.AppendInt(b, intv, 10)
|
||||
b = append(b, 'i')
|
||||
case float32:
|
||||
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
|
||||
case []byte:
|
||||
b = append(b, v...)
|
||||
default:
|
||||
// Can't determine the type, so convert to string
|
||||
b = append(b, '"')
|
||||
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
|
||||
b = append(b, '"')
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
148
metric/metric_benchmark_test.go
Normal file
148
metric/metric_benchmark_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// vars for making sure that the compiler doesnt optimize out the benchmarks:
|
||||
var (
|
||||
s string
|
||||
I interface{}
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
)
|
||||
|
||||
func BenchmarkNewMetric(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkAddTag(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
mt.AddTag("foo", "bar")
|
||||
}
|
||||
s = string(mt.String())
|
||||
}
|
||||
|
||||
func BenchmarkSplit(b *testing.B) {
|
||||
var mt telegraf.Metric
|
||||
mt = &metric{
|
||||
name: []byte("cpu"),
|
||||
tags: []byte(",host=localhost"),
|
||||
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
|
||||
t: []byte("1480614053000000000"),
|
||||
}
|
||||
var metrics []telegraf.Metric
|
||||
for n := 0; n < b.N; n++ {
|
||||
metrics = mt.Split(60)
|
||||
}
|
||||
s = string(metrics[0].String())
|
||||
}
|
||||
|
||||
func BenchmarkTags(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
tags = mt.Tags()
|
||||
}
|
||||
s = fmt.Sprint(tags)
|
||||
}
|
||||
|
||||
func BenchmarkFields(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var mt, _ = New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
fields = mt.Fields()
|
||||
}
|
||||
s = fmt.Sprint(fields)
|
||||
}
|
||||
|
||||
func BenchmarkString(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var S string
|
||||
for n := 0; n < b.N; n++ {
|
||||
S = mt.String()
|
||||
}
|
||||
s = S
|
||||
}
|
||||
|
||||
func BenchmarkSerialize(b *testing.B) {
|
||||
mt, _ := New("test_metric",
|
||||
map[string]string{
|
||||
"test_tag_1": "tag_value_1",
|
||||
"test_tag_2": "tag_value_2",
|
||||
"test_tag_3": "tag_value_3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"string_field": "string",
|
||||
"int_field": int64(1000),
|
||||
"float_field": float64(2.1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var B []byte
|
||||
for n := 0; n < b.N; n++ {
|
||||
B = mt.Serialize()
|
||||
}
|
||||
s = string(B)
|
||||
}
|
||||
650
metric/metric_test.go
Normal file
650
metric/metric_test.go
Normal file
@@ -0,0 +1,650 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewErrors(t *testing.T) {
|
||||
// creating a metric with an empty name produces an error:
|
||||
m, err := New(
|
||||
"",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
// creating a metric with empty fields produces an error:
|
||||
m, err = New(
|
||||
"foobar",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestNewMetric_Tags(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.AddTag("newtag", "foo")
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
|
||||
m.RemoveTag("host")
|
||||
assert.False(t, m.HasTag("host"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.True(t, m.HasTag("datacenter"))
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.False(t, m.HasTag("datacenter"))
|
||||
assert.True(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
|
||||
|
||||
m.RemoveTag("newtag")
|
||||
assert.False(t, m.HasTag("newtag"))
|
||||
assert.Equal(t, map[string]string{}, m.Tags())
|
||||
|
||||
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
|
||||
}
|
||||
|
||||
func TestSerialize(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t,
|
||||
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
|
||||
m.RemoveTag("datacenter")
|
||||
assert.Equal(t,
|
||||
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
|
||||
m.Serialize())
|
||||
}
|
||||
|
||||
func TestHashID(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
// adding a field doesn't change the hash:
|
||||
m.AddField("foo", int64(100))
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// removing a non-existent tag doesn't change the hash:
|
||||
m.RemoveTag("no-op")
|
||||
assert.Equal(t, hash, m.HashID())
|
||||
|
||||
// adding a tag does change it:
|
||||
m.AddTag("foo", "bar")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
|
||||
// removing a tag also changes it:
|
||||
m.RemoveTag("mytag")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
}
|
||||
|
||||
func TestHashID_Consistency(t *testing.T) {
|
||||
m, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
hash := m.HashID()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
m2, _ := New(
|
||||
"cpu",
|
||||
map[string]string{
|
||||
"datacenter": "us-east-1",
|
||||
"mytag": "foo",
|
||||
"another": "tag",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": float64(1),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
assert.Equal(t, hash, m2.HashID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMetric_NameModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash := m.HashID()
|
||||
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
|
||||
assert.Equal(t, "cpu"+suffix, m.String())
|
||||
|
||||
m.SetPrefix("pre_")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu"+suffix, m.String())
|
||||
|
||||
m.SetSuffix("_post")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
hash = m.HashID()
|
||||
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
|
||||
|
||||
m.SetName("mem")
|
||||
assert.NotEqual(t, hash, m.HashID())
|
||||
assert.Equal(t, "mem"+suffix, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_FieldModifiers(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"value": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, m.HasField("value"))
|
||||
assert.False(t, m.HasField("foo"))
|
||||
|
||||
m.AddField("newfield", "foo")
|
||||
assert.True(t, m.HasField("newfield"))
|
||||
|
||||
assert.NoError(t, m.RemoveField("newfield"))
|
||||
assert.False(t, m.HasField("newfield"))
|
||||
|
||||
// don't allow user to remove all fields:
|
||||
assert.Error(t, m.RemoveField("value"))
|
||||
|
||||
m.AddField("value2", int64(101))
|
||||
assert.NoError(t, m.RemoveField("value"))
|
||||
assert.False(t, m.HasField("value"))
|
||||
}
|
||||
|
||||
func TestNewMetric_Fields(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
}
|
||||
|
||||
func TestNewMetric_Time(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m = m.Copy()
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m2.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetric_Copy(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
m2 := m.Copy()
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
m.AddTag("host", "localhost")
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
|
||||
m2.String())
|
||||
}
|
||||
|
||||
func TestNewMetric_AllTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
tags := map[string]string{}
|
||||
fields := map[string]interface{}{
|
||||
"float64": float64(1),
|
||||
"float32": float32(1),
|
||||
"int64": int64(1),
|
||||
"int32": int32(1),
|
||||
"int16": int16(1),
|
||||
"int8": int8(1),
|
||||
"int": int(1),
|
||||
"uint64": uint64(1),
|
||||
"uint32": uint32(1),
|
||||
"uint16": uint16(1),
|
||||
"uint8": uint8(1),
|
||||
"uint": uint(1),
|
||||
"bytes": []byte("foo"),
|
||||
"nil": nil,
|
||||
"maxuint64": uint64(MaxInt) + 10,
|
||||
"maxuint": uint(MaxInt) + 10,
|
||||
"unsupported": []int{1, 2},
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, m.String(), "float64=1")
|
||||
assert.Contains(t, m.String(), "float32=1")
|
||||
assert.Contains(t, m.String(), "int64=1i")
|
||||
assert.Contains(t, m.String(), "int32=1i")
|
||||
assert.Contains(t, m.String(), "int16=1i")
|
||||
assert.Contains(t, m.String(), "int8=1i")
|
||||
assert.Contains(t, m.String(), "int=1i")
|
||||
assert.Contains(t, m.String(), "uint64=1i")
|
||||
assert.Contains(t, m.String(), "uint32=1i")
|
||||
assert.Contains(t, m.String(), "uint16=1i")
|
||||
assert.Contains(t, m.String(), "uint8=1i")
|
||||
assert.Contains(t, m.String(), "uint=1i")
|
||||
assert.NotContains(t, m.String(), "nil")
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
|
||||
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
|
||||
}
|
||||
|
||||
func TestIndexUnescapedByte(t *testing.T) {
|
||||
tests := []struct {
|
||||
in []byte
|
||||
b byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'b',
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\bar`),
|
||||
b: 'b',
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'f',
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
b: 'r',
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
in: []byte(`\foobar`),
|
||||
b: 'f',
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := indexUnescapedByte(test.in, test.b)
|
||||
assert.Equal(t, test.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now, telegraf.Counter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, telegraf.Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
func TestSplitMetric(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split80 := m.Split(80)
|
||||
assert.Len(t, split80, 2)
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 4)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
// use a simple regex check to verify that the split metrics are valid
|
||||
func TestSplitMetric_RegexVerify(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"foo": float64(98934259085),
|
||||
"bar": float64(19385292),
|
||||
"number": float64(19385292),
|
||||
"another": float64(19385292),
|
||||
"n": float64(19385292),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// verification regex
|
||||
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
|
||||
|
||||
split90 := m.Split(90)
|
||||
assert.Len(t, split90, 2)
|
||||
for _, splitM := range split90 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split70 := m.Split(70)
|
||||
assert.Len(t, split70, 3)
|
||||
for _, splitM := range split70 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
|
||||
split20 := m.Split(20)
|
||||
assert.Len(t, split20, 5)
|
||||
for _, splitM := range split20 {
|
||||
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting metric even when given length is shorter than
|
||||
// shortest possible length
|
||||
// Split should split metric as short as possible, ie, 1 field per metric
|
||||
func TestSplitMetric_TooShort(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(10)
|
||||
assert.Len(t, split, 5)
|
||||
strings := make([]string, 5)
|
||||
for i, splitM := range split {
|
||||
strings[i] = splitM.String()
|
||||
}
|
||||
|
||||
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
|
||||
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoOp(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, m, split[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_OneField(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
|
||||
|
||||
split := m.Split(1000)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(1)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
|
||||
split = m.Split(40)
|
||||
assert.Len(t, split, 1)
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, m.IsAggregate())
|
||||
m.SetAggregate(true)
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"emptytag": "",
|
||||
"": "valuewithoutkey",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.False(t, m.HasTag("emptytag"))
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
674
metric/parse.go
Normal file
674
metric/parse.go
Normal file
@@ -0,0 +1,674 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidNumber = errors.New("invalid number")
|
||||
)
|
||||
|
||||
const (
|
||||
// the number of characters for the largest possible int64 (9223372036854775807)
|
||||
maxInt64Digits = 19
|
||||
|
||||
// the number of characters for the smallest possible int64 (-9223372036854775808)
|
||||
minInt64Digits = 20
|
||||
|
||||
// the number of characters required for the largest float64 before a range check
|
||||
// would occur during parsing
|
||||
maxFloat64Digits = 25
|
||||
|
||||
// the number of characters required for smallest float64 before a range check occur
|
||||
// would occur during parsing
|
||||
minFloat64Digits = 27
|
||||
|
||||
MaxKeyLength = 65535
|
||||
)
|
||||
|
||||
// The following constants allow us to specify which state to move to
|
||||
// next, when scanning sections of a Point.
|
||||
const (
|
||||
tagKeyState = iota
|
||||
tagValueState
|
||||
fieldsState
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, t, "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTimePrecision(
|
||||
buf []byte,
|
||||
t time.Time,
|
||||
precision string,
|
||||
) ([]telegraf.Metric, error) {
|
||||
if len(buf) == 0 {
|
||||
return []telegraf.Metric{}, nil
|
||||
}
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
|
||||
var errStr string
|
||||
i := 0
|
||||
for {
|
||||
j := bytes.IndexByte(buf[i:], '\n')
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
if len(buf[i:i+j]) < 2 {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t, precision)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
continue
|
||||
}
|
||||
i += j + 1 // increment i past the previous newline
|
||||
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
if len(errStr) > 0 {
|
||||
return metrics, fmt.Errorf(errStr)
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte,
|
||||
defaultTime time.Time,
|
||||
precision string,
|
||||
) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// measurement name is required
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("missing measurement")
|
||||
}
|
||||
|
||||
if len(key) > MaxKeyLength {
|
||||
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
|
||||
}
|
||||
|
||||
// scan the second block is which is field1=value1[,field2=value2,...]
|
||||
pos, fields, err := scanFields(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// at least one field is required
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing fields")
|
||||
}
|
||||
|
||||
// scan the last block which is an optional integer timestamp
|
||||
pos, ts, err := scanTime(buf, pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nsec := multiplier * tsint
|
||||
ts = []byte(strconv.FormatInt(nsec, 10))
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
nsec: nsec,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
// namei is the index at which the "name" ends
|
||||
namei := indexUnescapedByte(key, ',')
|
||||
if namei < 1 {
|
||||
// no tags
|
||||
m.name = key
|
||||
} else {
|
||||
m.name = key[0:namei]
|
||||
m.tags = key[namei:]
|
||||
}
|
||||
|
||||
if len(m.t) == 0 {
|
||||
if len(dTime) == 0 {
|
||||
dTime = fmt.Sprint(defaultTime.UnixNano())
|
||||
}
|
||||
// use default time
|
||||
m.t = []byte(dTime)
|
||||
}
|
||||
|
||||
// here we copy on return because this allows us to later call
|
||||
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
|
||||
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
|
||||
return m.Copy(), nil
|
||||
}
|
||||
|
||||
// scanKey scans buf starting at i for the measurement and tag portion of the point.
|
||||
// It returns the ending position and the byte slice of key within buf. If there
|
||||
// are tags, they will be sorted if they are not already.
|
||||
func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
// First scan the Point's measurement.
|
||||
state, i, err := scanMeasurement(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
|
||||
// Optionally scan tags if needed.
|
||||
if state == tagKeyState {
|
||||
i, err = scanTags(buf, i)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanMeasurement examines the measurement part of a Point, returning
|
||||
// the next state to move to, and the current location in the buffer.
|
||||
func scanMeasurement(buf []byte, i int) (int, int, error) {
|
||||
// Check first byte of measurement, anything except a comma is fine.
|
||||
// It can't be a space, since whitespace is stripped prior to this
|
||||
// function call.
|
||||
if i >= len(buf) || buf[i] == ',' {
|
||||
return -1, i, makeError("missing measurement", buf, i)
|
||||
}
|
||||
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
if buf[i-1] == '\\' {
|
||||
// Skip character (it's escaped).
|
||||
continue
|
||||
}
|
||||
|
||||
// Unescaped comma; move onto scanning the tags.
|
||||
if buf[i] == ',' {
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// Unescaped space; move onto scanning the fields.
|
||||
if buf[i] == ' ' {
|
||||
// cpu value=1.0
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTags examines all the tags in a Point, keeping track of and
|
||||
// returning the updated indices slice, number of commas and location
|
||||
// in buf where to start examining the Point fields.
|
||||
func scanTags(buf []byte, i int) (int, error) {
|
||||
var (
|
||||
err error
|
||||
state = tagKeyState
|
||||
)
|
||||
|
||||
for {
|
||||
switch state {
|
||||
case tagKeyState:
|
||||
i, err = scanTagsKey(buf, i)
|
||||
state = tagValueState // tag value always follows a tag key
|
||||
case tagValueState:
|
||||
state, i, err = scanTagsValue(buf, i)
|
||||
case fieldsState:
|
||||
return i, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsKey scans each character in a tag key.
|
||||
func scanTagsKey(buf []byte, i int) (int, error) {
|
||||
// First character of the key.
|
||||
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
|
||||
// cpu,{'', ' ', ',', '='}
|
||||
return i, makeError("missing tag key", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag key until we hit an unescaped
|
||||
// equals (the tag value), or we hit an error (i.e., unescaped
|
||||
// space or comma).
|
||||
for {
|
||||
i++
|
||||
|
||||
// Either we reached the end of the buffer or we hit an
|
||||
// unescaped comma or space.
|
||||
if i >= len(buf) ||
|
||||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
|
||||
// cpu,tag{'', ' ', ','}
|
||||
return i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag=
|
||||
return i + 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanTagsValue scans each character in a tag value.
|
||||
func scanTagsValue(buf []byte, i int) (int, int, error) {
|
||||
// Tag value cannot be empty.
|
||||
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
|
||||
// cpu,tag={',', ' '}
|
||||
return -1, i, makeError("missing tag value", buf, i)
|
||||
}
|
||||
|
||||
// Examine each character in the tag value until we hit an unescaped
|
||||
// comma (move onto next tag key), an unescaped space (move onto
|
||||
// fields), or we error out.
|
||||
for {
|
||||
i++
|
||||
if i >= len(buf) {
|
||||
// cpu,tag=value
|
||||
return -1, i, makeError("missing fields", buf, i)
|
||||
}
|
||||
|
||||
// An unescaped equals sign is an invalid tag value.
|
||||
if buf[i] == '=' && buf[i-1] != '\\' {
|
||||
// cpu,tag={'=', 'fo=o'}
|
||||
return -1, i, makeError("invalid tag format", buf, i)
|
||||
}
|
||||
|
||||
if buf[i] == ',' && buf[i-1] != '\\' {
|
||||
// cpu,tag=foo,
|
||||
return tagKeyState, i + 1, nil
|
||||
}
|
||||
|
||||
// cpu,tag=foo value=1.0
|
||||
// cpu, tag=foo\= value=1.0
|
||||
if buf[i] == ' ' && buf[i-1] != '\\' {
|
||||
return fieldsState, i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanFields scans buf, starting at i for the fields section of a point. It returns
|
||||
// the ending position and the byte slice of the fields within buf
|
||||
func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
quoted := false
|
||||
|
||||
// tracks how many '=' we've seen
|
||||
equals := 0
|
||||
|
||||
// tracks how many commas we've seen
|
||||
commas := 0
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// escaped characters?
|
||||
if buf[i] == '\\' && i+1 < len(buf) {
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the value is quoted, scan until we get to the end quote
|
||||
// Only quote values in the field value since quotes are not significant
|
||||
// in the field key
|
||||
if buf[i] == '"' && equals > commas {
|
||||
quoted = !quoted
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// If we see an =, ensure that there is at least on char before and after it
|
||||
if buf[i] == '=' && !quoted {
|
||||
equals++
|
||||
|
||||
// check for "... =123" but allow "a\ =123"
|
||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||
return i, buf[start:i], makeError("missing field key", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value="
|
||||
if i+1 >= len(buf) {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
// check for "... value=,value2=..."
|
||||
if buf[i+1] == ',' || buf[i+1] == ' ' {
|
||||
return i, buf[start:i], makeError("missing field value", buf, i)
|
||||
}
|
||||
|
||||
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
|
||||
var err error
|
||||
i, err = scanNumber(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If next byte is not a double-quote, the value must be a boolean
|
||||
if buf[i+1] != '"' {
|
||||
var err error
|
||||
i, _, err = scanBoolean(buf, i+1)
|
||||
if err != nil {
|
||||
return i, buf[start:i], err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if buf[i] == ',' && !quoted {
|
||||
commas++
|
||||
}
|
||||
|
||||
// reached end of block?
|
||||
if buf[i] == ' ' && !quoted {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if quoted {
|
||||
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
|
||||
}
|
||||
|
||||
// check that all field sections had key and values (e.g. prevent "a=1,b"
|
||||
if equals == 0 || commas != equals-1 {
|
||||
return i, buf[start:i], makeError("invalid field format", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// scanTime scans buf, starting at i for the time section of a point. It
|
||||
// returns the ending position and the byte slice of the timestamp within buf
|
||||
// and and error if the timestamp is not in the correct numeric format.
|
||||
func scanTime(buf []byte, i int) (int, []byte, error) {
|
||||
start := skipWhitespace(buf, i)
|
||||
i = start
|
||||
|
||||
for {
|
||||
// reached the end of buf?
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
// Reached end of block or trailing whitespace?
|
||||
if buf[i] == '\n' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
// Handle negative timestamps
|
||||
if i == start && buf[i] == '-' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Timestamps should be integers, make sure they are so we don't need
|
||||
// to actually parse the timestamp until needed.
|
||||
if buf[i] < '0' || buf[i] > '9' {
|
||||
return i, buf[start:i], makeError("invalid timestamp", buf, i)
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
func isNumeric(b byte) bool {
|
||||
return (b >= '0' && b <= '9') || b == '.'
|
||||
}
|
||||
|
||||
// scanNumber returns the end position within buf, start at i after
|
||||
// scanning over buf for an integer, or float. It returns an
|
||||
// error if a invalid number is scanned.
|
||||
func scanNumber(buf []byte, i int) (int, error) {
|
||||
start := i
|
||||
var isInt bool
|
||||
|
||||
// Is negative number?
|
||||
if i < len(buf) && buf[i] == '-' {
|
||||
i++
|
||||
// There must be more characters now, as just '-' is illegal.
|
||||
if i == len(buf) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
}
|
||||
|
||||
// how many decimal points we've see
|
||||
decimal := false
|
||||
|
||||
// indicates the number is float in scientific notation
|
||||
scientific := false
|
||||
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == 'i' && i > start && !isInt {
|
||||
isInt = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if buf[i] == '.' {
|
||||
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
|
||||
if decimal {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
decimal = true
|
||||
}
|
||||
|
||||
// `e` is valid for floats but not as the first char
|
||||
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||
scientific = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// NaN is an unsupported value
|
||||
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
if !isNumeric(buf[i]) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if isInt && (decimal || scientific) {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
numericDigits := i - start
|
||||
if isInt {
|
||||
numericDigits--
|
||||
}
|
||||
if decimal {
|
||||
numericDigits--
|
||||
}
|
||||
if buf[start] == '-' {
|
||||
numericDigits--
|
||||
}
|
||||
|
||||
if numericDigits == 0 {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
|
||||
// It's more common that numbers will be within min/max range for their type but we need to prevent
|
||||
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
|
||||
// if we should parse the number to the actual type. It does not do it all the time because it incurs
|
||||
// extra allocations and we end up converting the type again when writing points to disk.
|
||||
if isInt {
|
||||
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
|
||||
if buf[i-1] != 'i' {
|
||||
return i, ErrInvalidNumber
|
||||
}
|
||||
// Parse the int to check bounds the number of digits could be larger than the max range
|
||||
// We subtract 1 from the index to remove the `i` from our tests
|
||||
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
|
||||
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
|
||||
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
|
||||
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
|
||||
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
|
||||
return i, makeError("invalid float", buf, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// scanBoolean returns the end position within buf, start at i after
|
||||
// scanning over buf for boolean. Valid values for a boolean are
|
||||
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
|
||||
// is scanned.
|
||||
func scanBoolean(buf []byte, i int) (int, []byte, error) {
|
||||
start := i
|
||||
|
||||
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
|
||||
return i, buf[start:i], makeError("invalid value", buf, i)
|
||||
}
|
||||
|
||||
i++
|
||||
for {
|
||||
if i >= len(buf) {
|
||||
break
|
||||
}
|
||||
|
||||
if buf[i] == ',' || buf[i] == ' ' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Single char bool (t, T, f, F) is ok
|
||||
if i-start == 1 {
|
||||
return i, buf[start:i], nil
|
||||
}
|
||||
|
||||
// length must be 4 for true or TRUE
|
||||
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// length must be 5 for false or FALSE
|
||||
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
// Otherwise
|
||||
valid := false
|
||||
switch buf[start] {
|
||||
case 't':
|
||||
valid = bytes.Equal(buf[start:i], []byte("true"))
|
||||
case 'f':
|
||||
valid = bytes.Equal(buf[start:i], []byte("false"))
|
||||
case 'T':
|
||||
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
|
||||
case 'F':
|
||||
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return i, buf[start:i], makeError("invalid boolean", buf, i)
|
||||
}
|
||||
|
||||
return i, buf[start:i], nil
|
||||
|
||||
}
|
||||
|
||||
// skipWhitespace returns the end position within buf, starting at i after
|
||||
// scanning over spaces in tags
|
||||
func skipWhitespace(buf []byte, i int) int {
|
||||
for i < len(buf) {
|
||||
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// makeError is a helper function for making a metric parsing error.
|
||||
// reason is the reason that the error occured.
|
||||
// buf should be the current buffer we are parsing.
|
||||
// i is the current index, to give some context on where in the buffer we are.
|
||||
func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
|
||||
// getPrecisionMultiplier will return a multiplier for the precision specified.
|
||||
func getPrecisionMultiplier(precision string) int64 {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return int64(d)
|
||||
}
|
||||
399
metric/parse_test.go
Normal file
399
metric/parse_test.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const trues = `booltest b=T
|
||||
booltest b=t
|
||||
booltest b=True
|
||||
booltest b=TRUE
|
||||
booltest b=true
|
||||
`
|
||||
|
||||
const falses = `booltest b=F
|
||||
booltest b=f
|
||||
booltest b=False
|
||||
booltest b=FALSE
|
||||
booltest b=false
|
||||
`
|
||||
|
||||
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
|
||||
w\,eather,host=local temp=99 1465839830100400200
|
||||
weather,location=us\,midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temp\=rature=82 1465839830100400200
|
||||
weather,location\ place=us-midwest temperature=82 1465839830100400200
|
||||
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
|
||||
`
|
||||
|
||||
const withTimestamps = `cpu usage=99 1480595849000000000
|
||||
cpu usage=99 1480595850000000000
|
||||
cpu usage=99 1480595851700030000
|
||||
cpu usage=99 1480595852000000300
|
||||
`
|
||||
|
||||
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
|
||||
cpu 1480595852000000300
|
||||
cpu usage=99 1480595852foobar300
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(sevenMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 7)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"idle": float64(99),
|
||||
"busy": int64(1),
|
||||
"b": true,
|
||||
"s": "string",
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegNumbers(t *testing.T) {
|
||||
metrics, err := Parse([]byte(negMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"temp": int64(-99),
|
||||
"temp_float": float64(-99.4),
|
||||
},
|
||||
metrics[0].Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "local",
|
||||
},
|
||||
metrics[0].Tags(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
firstTime := metrics[0].Time()
|
||||
for _, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
},
|
||||
m.Tags(),
|
||||
)
|
||||
assert.True(t, m.Time().After(start))
|
||||
assert.True(t, m.Time().Equal(firstTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWithTimestamps(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withTimestamps))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 4)
|
||||
|
||||
expectedTimestamps := []time.Time{
|
||||
time.Unix(0, 1480595849000000000),
|
||||
time.Unix(0, 1480595850000000000),
|
||||
time.Unix(0, 1480595851700030000),
|
||||
time.Unix(0, 1480595852000000300),
|
||||
}
|
||||
|
||||
// all metrics parsed together w/o a timestamp should have the same time.
|
||||
for i, m := range metrics {
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
},
|
||||
m.Fields(),
|
||||
)
|
||||
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEscapes(t *testing.T) {
|
||||
metrics, err := Parse([]byte(withEscapes))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 6)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
name: `w, eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `w,eather`,
|
||||
fields: map[string]interface{}{"temp": float64(99)},
|
||||
tags: map[string]string{"host": "local"},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{"location": `us,midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temp=rature`: float64(82)},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{"temperature": float64(82)},
|
||||
tags: map[string]string{`location place`: `us-midwest`},
|
||||
},
|
||||
{
|
||||
name: `weather`,
|
||||
fields: map[string]interface{}{`temperature`: `too"hot"`},
|
||||
tags: map[string]string{"location": `us-midwest`},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
assert.Equal(t, test.name, metrics[i].Name())
|
||||
assert.Equal(t, test.fields, metrics[i].Fields())
|
||||
assert.Equal(t, test.tags, metrics[i].Tags())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTrueBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(trues))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, true, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFalseBooleans(t *testing.T) {
|
||||
metrics, err := Parse([]byte(falses))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 5)
|
||||
|
||||
for _, metric := range metrics {
|
||||
assert.Equal(t, "booltest", metric.Name())
|
||||
assert.Equal(t, false, metric.Fields()["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointBadNumber(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu v=- ",
|
||||
"cpu v=-i ",
|
||||
"cpu v=-. ",
|
||||
"cpu v=. ",
|
||||
"cpu v=1.0i ",
|
||||
"cpu v=1ii ",
|
||||
"cpu v=1a ",
|
||||
"cpu v=-e-e-e ",
|
||||
"cpu v=42+3 ",
|
||||
"cpu v= ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTagsMissingParts(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu,host`,
|
||||
`cpu,host,`,
|
||||
`cpu,host=`,
|
||||
`cpu,f=oo=bar value=1`,
|
||||
`cpu,host value=1i`,
|
||||
`cpu,host=serverA,region value=1i`,
|
||||
`cpu,host=serverA,region= value=1i`,
|
||||
`cpu,host=serverA,region=,zone=us-west value=1i`,
|
||||
`cpu, value=1`,
|
||||
`cpu, ,,`,
|
||||
`cpu,,,`,
|
||||
`cpu,host=serverA,=us-east value=1i`,
|
||||
`cpu,host=serverAa\,,=us-east value=1i`,
|
||||
`cpu,host=serverA\,,=us-east value=1i`,
|
||||
`cpu, =serverA value=1i`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointWhitespace(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000`,
|
||||
`cpu value=1.0 1257894000000000000 `,
|
||||
} {
|
||||
m, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, "cpu", m[0].Name())
|
||||
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointInvalidFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test,foo=bar a=101,=value",
|
||||
"test,foo=bar =value",
|
||||
"test,foo=bar a=101,key=",
|
||||
"test,foo=bar key=",
|
||||
`test,foo=bar a=101,b="foo`,
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePointNoFields(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"cpu_load_short,host=server01,region=us-west",
|
||||
"very_long_measurement_name",
|
||||
"cpu,host==",
|
||||
"============",
|
||||
"cpu",
|
||||
"cpu\n\n\n\n\n\n\n",
|
||||
" ",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
// a b=1 << this is the shortest possible metric
|
||||
// any shorter is just ignored
|
||||
func TestParseBufTooShort(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"",
|
||||
"a",
|
||||
"a ",
|
||||
"a b=",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidBooleans(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=tru",
|
||||
"test b=fals",
|
||||
"test b=faLse",
|
||||
"test q=foo",
|
||||
"test b=lambchops",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidNumbers(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test b=-",
|
||||
"test b=1.1.1",
|
||||
"test b=nan",
|
||||
"test b=9i10",
|
||||
"test b=9999999999999999999i",
|
||||
} {
|
||||
_, err := Parse([]byte(tt + "\n"))
|
||||
assert.Error(t, err, tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegativeTimestamps(t *testing.T) {
|
||||
for _, tt := range []string{
|
||||
"test foo=101 -1257894000000000000",
|
||||
} {
|
||||
metrics, err := Parse([]byte(tt + "\n"))
|
||||
assert.NoError(t, err, tt)
|
||||
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecision(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
expected int64
|
||||
}{
|
||||
{"test v=42 1491847420", "s", 1491847420000000000},
|
||||
{"test v=42 1491847420123", "ms", 1491847420123000000},
|
||||
{"test v=42 1491847420123456", "u", 1491847420123456000},
|
||||
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
|
||||
|
||||
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
|
||||
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err, tt)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
if len(key) > MaxKeyLength {
|
||||
break
|
||||
}
|
||||
key += "test"
|
||||
}
|
||||
|
||||
_, err := Parse([]byte(key + " value=1\n"))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
155
metric/reader.go
Normal file
155
metric/reader.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
_ state = iota
|
||||
// normal state copies whole metrics into the given buffer until we can't
|
||||
// fit the next metric.
|
||||
normal
|
||||
// split state means that we have a metric that we were able to split, so
|
||||
// that we can fit it into multiple metrics (and calls to Read)
|
||||
split
|
||||
// overflow state means that we have a metric that didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
overflow
|
||||
// splitOverflow state means that a split metric didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
splitOverflow
|
||||
// done means we're done reading metrics, and now always return (0, io.EOF)
|
||||
done
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
metrics []telegraf.Metric
|
||||
splitMetrics []telegraf.Metric
|
||||
buf []byte
|
||||
state state
|
||||
|
||||
// metric index
|
||||
iM int
|
||||
// split metric index
|
||||
iSM int
|
||||
// buffer index
|
||||
iB int
|
||||
}
|
||||
|
||||
func NewReader(metrics []telegraf.Metric) io.Reader {
|
||||
return &reader{
|
||||
metrics: metrics,
|
||||
state: normal,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (n int, err error) {
|
||||
var i int
|
||||
switch r.state {
|
||||
case done:
|
||||
return 0, io.EOF
|
||||
case normal:
|
||||
for {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() < len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes, check if we can split the current
|
||||
// metric into multiple full metrics at a smaller size.
|
||||
if i == 0 {
|
||||
tmp := r.metrics[r.iM].Split(len(p))
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() < len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
// splitting didn't quite work, so we'll drop down and
|
||||
// overflow the metric.
|
||||
r.state = normal
|
||||
r.iSM = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes and we're not at the end of the metrics
|
||||
// slice, then it means we have a single metric that is larger than the
|
||||
// provided buffer.
|
||||
if i == 0 {
|
||||
r.buf = r.metrics[r.iM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = overflow
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() < len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
if r.iSM >= len(r.splitMetrics) {
|
||||
// done writing the current split metrics
|
||||
r.iSM = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
} else {
|
||||
// This would only happen if we split the metric, and then a
|
||||
// subsequent buffer was smaller than the initial one given,
|
||||
// so that our split metric no longer fits.
|
||||
r.buf = r.splitMetrics[r.iSM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = splitOverflow
|
||||
}
|
||||
|
||||
case splitOverflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
}
|
||||
}
|
||||
|
||||
case overflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
487
metric/reader_test.go
Normal file
487
metric/reader_test.go
Normal file
@@ -0,0 +1,487 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, time.Now())
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
r := NewReader(metrics)
|
||||
io.Copy(ioutil.Discard, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, ts)
|
||||
}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
buf := make([]byte, 35)
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
}
|
||||
assert.Equal(t, 33, n)
|
||||
assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n]))
|
||||
}
|
||||
|
||||
// reader should now be done, and always return 0, io.EOF
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 5)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo v",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"alue=",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"10i 1",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"48103",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"21900",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"00000",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m, m.Copy()}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 10)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
nil,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting a metric
|
||||
func TestMetricReader_SplitMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
57,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test an array with one split metric and one unsplit
|
||||
func TestMetricReader_SplitMetric2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split that results in metrics that are still too long, which results in
|
||||
// the reader falling back to regular overflow.
|
||||
func TestMetricReader_SplitMetricTooLong(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i 1481`,
|
||||
nil,
|
||||
30,
|
||||
},
|
||||
{
|
||||
`032190000000000\n`,
|
||||
io.EOF,
|
||||
16,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
111
metric_test.go
111
metric_test.go
@@ -1,111 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Untyped, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewGaugeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewGaugeMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Gauge, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewCounterMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewCounterMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Counter, m.Type())
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.Unix())
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
42
plugins/aggregators/minmax/README.md
Normal file
42
plugins/aggregators/minmax/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# MinMax Aggregator Plugin
|
||||
|
||||
The minmax aggregator plugin aggregates min & max values of each field it sees,
|
||||
emitting the aggrate every `period` seconds.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Keep the aggregate min/max of each metric passing through.
|
||||
[[aggregators.minmax]]
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- measurement1
|
||||
- field1_max
|
||||
- field1_min
|
||||
|
||||
### Tags:
|
||||
|
||||
No tags are applied by this aggregator.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf --config telegraf.conf --quiet
|
||||
system,host=tars load1=1.72 1475583980000000000
|
||||
system,host=tars load1=1.6 1475583990000000000
|
||||
system,host=tars load1=1.66 1475584000000000000
|
||||
system,host=tars load1=1.63 1475584010000000000
|
||||
system,host=tars load1_max=1.72,load1_min=1.6 1475584010000000000
|
||||
system,host=tars load1=1.46 1475584020000000000
|
||||
system,host=tars load1=1.39 1475584030000000000
|
||||
system,host=tars load1=1.41 1475584040000000000
|
||||
system,host=tars load1_max=1.46,load1_min=1.39 1475584040000000000
|
||||
```
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var m1, _ = telegraf.NewMetric("m1",
|
||||
var m1, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
@@ -24,7 +24,7 @@ var m1, _ = telegraf.NewMetric("m1",
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var m2, _ = telegraf.NewMetric("m1",
|
||||
var m2, _ = metric.New("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Example Input Plugin
|
||||
|
||||
The example plugin gathers metrics about example things
|
||||
The example plugin gathers metrics about example things. This description
|
||||
explains at a high level what the plugin does and provides links to where
|
||||
additional information can be found.
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -12,7 +14,8 @@ The example plugin gathers metrics about example things
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
<optional description>
|
||||
Here you should add an optional description and links to where the user can
|
||||
get more information about the measurements.
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
@@ -27,11 +30,14 @@ The example plugin gathers metrics about example things
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
|
||||
### Sample Queries:
|
||||
|
||||
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
|
||||
This section should contain some useful InfluxDB queries that can be used to
|
||||
get started with the plugin or to generate dashboards. For each query listed,
|
||||
describe at a high level what data is returned.
|
||||
|
||||
Get the max, mean, and min for the measurement in the last hour:
|
||||
```
|
||||
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
|
||||
```
|
||||
@@ -39,7 +45,7 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
$ telegraf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
@@ -41,17 +40,16 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := errchan.New(len(a.Servers))
|
||||
wg.Add(len(a.Servers))
|
||||
for _, server := range a.Servers {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
errChan.C <- a.gatherServer(serv, acc)
|
||||
acc.AddError(a.gatherServer(serv, acc))
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := a.Gather(&acc)
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
@@ -41,8 +41,7 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := a.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
require.Error(t, acc.GatherError(a.Gather))
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||
@@ -27,10 +29,13 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
@@ -65,6 +70,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
|
||||
47
plugins/inputs/amqp_consumer/README.md
Normal file
47
plugins/inputs/amqp_consumer/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# AMQP Consumer Input Plugin
|
||||
|
||||
This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
|
||||
|
||||
Metrics are read from a topic exchange using the configured queue and binding_key.
|
||||
|
||||
Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
For an introduction to AMQP see:
|
||||
- https://www.rabbitmq.com/tutorials/amqp-concepts.html
|
||||
- https://www.rabbitmq.com/getstarted.html
|
||||
|
||||
The following defaults are known to work with RabbitMQ:
|
||||
|
||||
```toml
|
||||
# AMQP consumer plugin
|
||||
[[inputs.amqp_consumer]]
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
exchange = "telegraf"
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Controls how many messages the server will try to keep on the network
|
||||
## for consumers before receiving delivery acks.
|
||||
#prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported.
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
280
plugins/inputs/amqp_consumer/amqp_consumer.go
Normal file
280
plugins/inputs/amqp_consumer/amqp_consumer.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package amqp_consumer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/streadway/amqp"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
// AMQPConsumer is the top level struct for this plugin
|
||||
type AMQPConsumer struct {
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
// Queue Name
|
||||
Queue string
|
||||
// Binding Key
|
||||
BindingKey string `toml:"binding_key"`
|
||||
|
||||
// Controls how many messages the server will try to keep on the network
|
||||
// for consumers before receiving delivery acks.
|
||||
PrefetchCount int
|
||||
|
||||
// AMQP Auth method
|
||||
AuthMethod string
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
parser parsers.Parser
|
||||
conn *amqp.Connection
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
type externalAuth struct{}
|
||||
|
||||
func (a *externalAuth) Mechanism() string {
|
||||
return "EXTERNAL"
|
||||
}
|
||||
func (a *externalAuth) Response() string {
|
||||
return fmt.Sprintf("\000")
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
DefaultPrefetchCount = 50
|
||||
)
|
||||
|
||||
func (a *AMQPConsumer) SampleConfig() string {
|
||||
return `
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
exchange = "telegraf"
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Maximum number of messages server should give to the worker.
|
||||
prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) Description() string {
|
||||
return "AMQP consumer plugin"
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) SetParser(parser parsers.Parser) {
|
||||
a.parser = parser
|
||||
}
|
||||
|
||||
// All gathering is done in the Start function
|
||||
func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
|
||||
// make new tls config
|
||||
tls, err := internal.GetTLSConfig(
|
||||
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse auth method
|
||||
var sasl []amqp.Authentication // nil by default
|
||||
|
||||
if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
|
||||
sasl = []amqp.Authentication{&externalAuth{}}
|
||||
}
|
||||
|
||||
config := amqp.Config{
|
||||
TLSClientConfig: tls,
|
||||
SASL: sasl, // if nil, it will be PLAIN
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// Start satisfies the telegraf.ServiceInput interface
|
||||
func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
amqpConf, err := a.createConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.wg = &sync.WaitGroup{}
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
|
||||
go func() {
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
|
||||
conn, err := amqp.DialConfig(a.URL, *amqpConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.conn = conn
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to open a channel: %s", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
a.Exchange, // name
|
||||
"topic", // type
|
||||
true, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||
}
|
||||
|
||||
q, err := ch.QueueDeclare(
|
||||
a.Queue, // queue
|
||||
true, // durable
|
||||
false, // delete when unused
|
||||
false, // exclusive
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare a queue: %s", err)
|
||||
}
|
||||
|
||||
err = ch.QueueBind(
|
||||
q.Name, // queue
|
||||
a.BindingKey, // binding-key
|
||||
a.Exchange, // exchange
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to bind a queue: %s", err)
|
||||
}
|
||||
|
||||
err = ch.Qos(
|
||||
a.PrefetchCount,
|
||||
0, // prefetch-size
|
||||
false, // global
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to set QoS: %s", err)
|
||||
}
|
||||
|
||||
msgs, err := ch.Consume(
|
||||
q.Name, // queue
|
||||
"", // consumer
|
||||
false, // auto-ack
|
||||
false, // exclusive
|
||||
false, // no-local
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed establishing connection to queue: %s", err)
|
||||
}
|
||||
|
||||
log.Println("I! Started AMQP consumer")
|
||||
return msgs, err
|
||||
}
|
||||
|
||||
// Read messages from queue and add them to the Accumulator
|
||||
func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
|
||||
defer a.wg.Done()
|
||||
for d := range msgs {
|
||||
metrics, err := a.parser.Parse(d.Body)
|
||||
if err != nil {
|
||||
log.Printf("E! %v: error parsing metric - %v", err, string(d.Body))
|
||||
} else {
|
||||
for _, m := range metrics {
|
||||
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
}
|
||||
|
||||
d.Ack(false)
|
||||
}
|
||||
log.Printf("I! AMQP consumer queue closed")
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) Stop() {
|
||||
err := a.conn.Close()
|
||||
if err != nil && err != amqp.ErrClosed {
|
||||
log.Printf("E! Error closing AMQP connection: %s", err)
|
||||
return
|
||||
}
|
||||
a.wg.Wait()
|
||||
log.Println("I! Stopped AMQP service")
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("amqp_consumer", func() telegraf.Input {
|
||||
return &AMQPConsumer{
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
PrefetchCount: DefaultPrefetchCount,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||
- **username** string: Username for HTTP basic authentication
|
||||
- **password** string: Password for HTTP basic authentication
|
||||
- **timeout** duration: time that the HTTP connection will remain waiting for response. Default 4 seconds ("4s")
|
||||
|
||||
##### Optional SSL Config
|
||||
|
||||
- **ssl_ca** string: the full path for the SSL CA certicate
|
||||
- **ssl_cert** string: the full path for the SSL certificate
|
||||
- **ssl_key** string: the full path for the key file
|
||||
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
|
||||
|
||||
#### Description
|
||||
|
||||
|
||||
@@ -8,20 +8,48 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
Urls []string
|
||||
Urls []string
|
||||
Username string
|
||||
Password string
|
||||
ResponseTimeout internal.Duration
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of Apache status URI to gather stats.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
## user credentials for basic HTTP authentication
|
||||
username = "myuser"
|
||||
password = "mypassword"
|
||||
|
||||
## Timeout to the complete conection and reponse time in seconds
|
||||
response_timeout = "25s" ## default to 5 seconds
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
func (n *Apache) SampleConfig() string {
|
||||
@@ -36,46 +64,70 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
if len(n.Urls) == 0 {
|
||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||
}
|
||||
if n.ResponseTimeout.Duration < time.Second {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
||||
var outerr error
|
||||
var errch = make(chan error)
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(n.Urls))
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
|
||||
continue
|
||||
}
|
||||
|
||||
go func(addr *url.URL) {
|
||||
errch <- n.gatherUrl(addr, acc)
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
// Drain channel, waiting for all requests to finish and save last error.
|
||||
for range n.Urls {
|
||||
if err := <-errch; err != nil {
|
||||
outerr = err
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return outerr
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
req, err := http.NewRequest("GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
|
||||
if len(n.Username) != 0 && len(n.Password) != 0 {
|
||||
req.SetBasicAuth(n.Username, n.Password)
|
||||
}
|
||||
|
||||
resp, err := n.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestHTTPApache(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -123,8 +122,8 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out)
|
||||
j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,8 +154,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
addCassandraMetric(k, c, v.(map[string]interface{}))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -164,8 +163,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
|
||||
c, values.(map[string]interface{}))
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -274,8 +273,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||
} else {
|
||||
// unsupported metric type
|
||||
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
|
||||
metric)
|
||||
acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping",
|
||||
metric))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -283,17 +282,21 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||
serverTokens["port"] + context + metric)
|
||||
if err != nil {
|
||||
return err
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
|
||||
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||
serverTokens["passwd"])
|
||||
}
|
||||
fmt.Printf("host %s url %s\n", serverTokens["host"], requestUrl)
|
||||
|
||||
out, err := c.getAttr(requestUrl)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
if out["status"] != 200.0 {
|
||||
fmt.Printf("URL returned with status %v\n", out["status"])
|
||||
acc.AddError(fmt.Errorf("URL returned with status %v\n", out["status"]))
|
||||
continue
|
||||
}
|
||||
m.addTagsFields(out)
|
||||
|
||||
@@ -151,7 +151,7 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
@@ -180,7 +180,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
@@ -197,16 +197,17 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
func TestHttp404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
|
||||
[]string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := jolokia.Gather(&acc)
|
||||
err := acc.GatherError(jolokia.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
assert.Contains(t, err.Error(), "has status code 404")
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Cassandra
|
||||
@@ -214,7 +215,7 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
@@ -246,7 +247,7 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
@@ -82,7 +82,7 @@ the cluster. The currently supported commands are:
|
||||
|
||||
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
|
||||
## to be specified
|
||||
gather_cluster_stats = true
|
||||
gather_cluster_stats = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
@@ -117,7 +117,7 @@ All fields are collected under the **ceph** measurement and stored as float64s.
|
||||
* recovering\_objects\_per\_sec (float)
|
||||
|
||||
* ceph\_pgmap\_state
|
||||
* state name e.g. active+clean (float)
|
||||
* count (float)
|
||||
|
||||
* ceph\_usage
|
||||
* bytes\_used (float)
|
||||
@@ -186,7 +186,7 @@ All measurements will have the following tags:
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_pg\_state has the following tags:
|
||||
* ceph\_pgmap\_state has the following tags:
|
||||
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
|
||||
* ceph\_pool\_usage has the following tags:
|
||||
* id
|
||||
@@ -213,7 +213,8 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
|
||||
<pre>
|
||||
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
|
||||
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0,state=active+clean count=22952 1468928660000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0,state=active+degraded count=16 1468928660000000000
|
||||
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
|
||||
|
||||
@@ -4,13 +4,14 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,7 +69,7 @@ var sampleConfig = `
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands
|
||||
gather_cluster_stats = true
|
||||
gather_cluster_stats = false
|
||||
`
|
||||
|
||||
func (c *Ceph) SampleConfig() string {
|
||||
@@ -100,15 +101,15 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||
for _, s := range sockets {
|
||||
dump, err := perfDump(c.CephBinary, s)
|
||||
if err != nil {
|
||||
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
|
||||
acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
|
||||
continue
|
||||
}
|
||||
data, err := parseDump(dump)
|
||||
if err != nil {
|
||||
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
|
||||
acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
|
||||
continue
|
||||
}
|
||||
for tag, metrics := range *data {
|
||||
for tag, metrics := range data {
|
||||
acc.AddFields(measurement,
|
||||
map[string]interface{}(metrics),
|
||||
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
|
||||
@@ -244,25 +245,19 @@ type taggedMetricMap map[string]metricMap
|
||||
|
||||
// Parses a raw JSON string into a taggedMetricMap
|
||||
// Delegates the actual parsing to newTaggedMetricMap(..)
|
||||
func parseDump(dump string) (*taggedMetricMap, error) {
|
||||
func parseDump(dump string) (taggedMetricMap, error) {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(dump), &data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
|
||||
}
|
||||
|
||||
tmm := newTaggedMetricMap(data)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
|
||||
}
|
||||
|
||||
return tmm, nil
|
||||
return newTaggedMetricMap(data), nil
|
||||
}
|
||||
|
||||
// Builds a TaggedMetricMap out of a generic string map.
|
||||
// The top-level key is used as a tag and all sub-keys are flattened into metrics
|
||||
func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||
func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap {
|
||||
tmm := make(taggedMetricMap)
|
||||
for tag, datapoints := range data {
|
||||
mm := make(metricMap)
|
||||
@@ -271,7 +266,7 @@ func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||
}
|
||||
tmm[tag] = mm
|
||||
}
|
||||
return &tmm
|
||||
return tmm
|
||||
}
|
||||
|
||||
// Recursively flattens any k-v hierarchy present in data.
|
||||
@@ -376,36 +371,53 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
func extractPgmapStates(data map[string]interface{}) ([]interface{}, error) {
|
||||
const key = "pgs_by_state"
|
||||
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
return nil, fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case []interface{}:
|
||||
if key != "pgs_by_state" {
|
||||
continue
|
||||
}
|
||||
for _, state := range value.([]interface{}) {
|
||||
state_map, ok := state.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
|
||||
}
|
||||
state_name, ok := state_map["state_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
|
||||
}
|
||||
state_count, ok := state_map["count"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
|
||||
}
|
||||
fields[state_name] = state_count
|
||||
}
|
||||
|
||||
s, ok := pgmap[key]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("WARNING %s - pgmap is missing the %s field", measurement, key)
|
||||
}
|
||||
|
||||
states, ok := s.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("WARNING %s - pgmap[%s] is not a list", measurement, key)
|
||||
}
|
||||
return states, nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
states, err := extractPgmapStates(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, state := range states {
|
||||
stateMap, ok := state.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
|
||||
}
|
||||
stateName, ok := stateMap["state_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
|
||||
}
|
||||
stateCount, ok := stateMap["count"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"state": stateName,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"count": stateCount,
|
||||
}
|
||||
acc.AddFields("ceph_pgmap_state", fields, tags)
|
||||
}
|
||||
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,15 +26,38 @@ func TestParseSockId(t *testing.T) {
|
||||
func TestParseMonDump(t *testing.T) {
|
||||
dump, err := parseDump(monPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
|
||||
assert.InEpsilon(t, 5678670180, dump["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
|
||||
}
|
||||
|
||||
func TestParseOsdDump(t *testing.T) {
|
||||
dump, err := parseDump(osdPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||
assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||
assert.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||
assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||
}
|
||||
|
||||
func TestDecodeStatusPgmapState(t *testing.T) {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(clusterStatusDump), &data)
|
||||
assert.NoError(t, err)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err = decodeStatusPgmapState(acc, data)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var results = []struct {
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{map[string]interface{}{"count": float64(2560)}, map[string]string{"state": "active+clean"}},
|
||||
{map[string]interface{}{"count": float64(10)}, map[string]string{"state": "active+scrubbing"}},
|
||||
{map[string]interface{}{"count": float64(5)}, map[string]string{"state": "active+backfilling"}},
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
acc.AssertContainsTaggedFields(t, "ceph_pgmap_state", r.fields, r.tags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
@@ -685,3 +710,127 @@ var osdPerfDump = `
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}}}
|
||||
`
|
||||
var clusterStatusDump = `
|
||||
{
|
||||
"health": {
|
||||
"health": {
|
||||
"health_services": [
|
||||
{
|
||||
"mons": [
|
||||
{
|
||||
"name": "a",
|
||||
"kb_total": 114289256,
|
||||
"kb_used": 26995516,
|
||||
"kb_avail": 81465132,
|
||||
"avail_percent": 71,
|
||||
"last_updated": "2017-01-03 17:20:57.595004",
|
||||
"store_stats": {
|
||||
"bytes_total": 942117141,
|
||||
"bytes_sst": 0,
|
||||
"bytes_log": 4345406,
|
||||
"bytes_misc": 937771735,
|
||||
"last_updated": "0.000000"
|
||||
},
|
||||
"health": "HEALTH_OK"
|
||||
},
|
||||
{
|
||||
"name": "b",
|
||||
"kb_total": 114289256,
|
||||
"kb_used": 27871624,
|
||||
"kb_avail": 80589024,
|
||||
"avail_percent": 70,
|
||||
"last_updated": "2017-01-03 17:20:47.784331",
|
||||
"store_stats": {
|
||||
"bytes_total": 454853104,
|
||||
"bytes_sst": 0,
|
||||
"bytes_log": 5788320,
|
||||
"bytes_misc": 449064784,
|
||||
"last_updated": "0.000000"
|
||||
},
|
||||
"health": "HEALTH_OK"
|
||||
},
|
||||
{
|
||||
"name": "c",
|
||||
"kb_total": 130258508,
|
||||
"kb_used": 38076996,
|
||||
"kb_avail": 85541692,
|
||||
"avail_percent": 65,
|
||||
"last_updated": "2017-01-03 17:21:03.311123",
|
||||
"store_stats": {
|
||||
"bytes_total": 455555199,
|
||||
"bytes_sst": 0,
|
||||
"bytes_log": 6950876,
|
||||
"bytes_misc": 448604323,
|
||||
"last_updated": "0.000000"
|
||||
},
|
||||
"health": "HEALTH_OK"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"timechecks": {
|
||||
"epoch": 504,
|
||||
"round": 34642,
|
||||
"round_status": "finished",
|
||||
"mons": [
|
||||
{ "name": "a", "skew": 0, "latency": 0, "health": "HEALTH_OK" },
|
||||
{ "name": "b", "skew": -0, "latency": 0.000951, "health": "HEALTH_OK" },
|
||||
{ "name": "c", "skew": -0, "latency": 0.000946, "health": "HEALTH_OK" }
|
||||
]
|
||||
},
|
||||
"summary": [],
|
||||
"overall_status": "HEALTH_OK",
|
||||
"detail": []
|
||||
},
|
||||
"fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
|
||||
"election_epoch": 504,
|
||||
"quorum": [ 0, 1, 2 ],
|
||||
"quorum_names": [ "a", "b", "c" ],
|
||||
"monmap": {
|
||||
"epoch": 17,
|
||||
"fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
|
||||
"modified": "2016-04-11 14:01:52.600198",
|
||||
"created": "0.000000",
|
||||
"mons": [
|
||||
{ "rank": 0, "name": "a", "addr": "192.168.0.1:6789/0" },
|
||||
{ "rank": 1, "name": "b", "addr": "192.168.0.2:6789/0" },
|
||||
{ "rank": 2, "name": "c", "addr": "192.168.0.3:6789/0" }
|
||||
]
|
||||
},
|
||||
"osdmap": {
|
||||
"osdmap": {
|
||||
"epoch": 21734,
|
||||
"num_osds": 24,
|
||||
"num_up_osds": 24,
|
||||
"num_in_osds": 24,
|
||||
"full": false,
|
||||
"nearfull": false,
|
||||
"num_remapped_pgs": 0
|
||||
}
|
||||
},
|
||||
"pgmap": {
|
||||
"pgs_by_state": [
|
||||
{ "state_name": "active+clean", "count": 2560 },
|
||||
{ "state_name": "active+scrubbing", "count": 10 },
|
||||
{ "state_name": "active+backfilling", "count": 5 }
|
||||
],
|
||||
"version": 52314277,
|
||||
"num_pgs": 2560,
|
||||
"data_bytes": 2700031960713,
|
||||
"bytes_used": 7478347665408,
|
||||
"bytes_avail": 9857462382592,
|
||||
"bytes_total": 17335810048000,
|
||||
"read_bytes_sec": 0,
|
||||
"write_bytes_sec": 367217,
|
||||
"op_per_sec": 98
|
||||
},
|
||||
"mdsmap": {
|
||||
"epoch": 1,
|
||||
"up": 0,
|
||||
"in": 0,
|
||||
"max": 0,
|
||||
"by_rank": []
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
@@ -22,10 +22,11 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
for dir := range list {
|
||||
if dir.err != nil {
|
||||
return dir.err
|
||||
acc.AddError(dir.err)
|
||||
continue
|
||||
}
|
||||
if err := g.gatherDir(dir.path, acc); err != nil {
|
||||
return err
|
||||
acc.AddError(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ var cg1 = &CGroup{
|
||||
func TestCgroupStatistics_1(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg1.Gather(&acc)
|
||||
err := acc.GatherError(cg1.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -56,7 +56,7 @@ var cg2 = &CGroup{
|
||||
func TestCgroupStatistics_2(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg2.Gather(&acc)
|
||||
err := acc.GatherError(cg2.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -81,7 +81,7 @@ var cg3 = &CGroup{
|
||||
func TestCgroupStatistics_3(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg3.Gather(&acc)
|
||||
err := acc.GatherError(cg3.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -108,7 +108,7 @@ var cg4 = &CGroup{
|
||||
func TestCgroupStatistics_4(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg4.Gather(&acc)
|
||||
err := acc.GatherError(cg4.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -140,7 +140,7 @@ var cg5 = &CGroup{
|
||||
func TestCgroupStatistics_5(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg5.Gather(&acc)
|
||||
err := acc.GatherError(cg5.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
@@ -167,7 +167,7 @@ var cg6 = &CGroup{
|
||||
func TestCgroupStatistics_6(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg6.Gather(&acc)
|
||||
err := acc.GatherError(cg6.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
|
||||
@@ -42,9 +42,10 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 400. Optional - default value is 200.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
ratelimit = 200
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/internal/limiter"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
@@ -105,9 +104,10 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 400. Optional - default value is 200.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
ratelimit = 200
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
@@ -126,11 +126,7 @@ func (c *CloudWatch) Description() string {
|
||||
return "Pull Metric Statistics from Amazon CloudWatch"
|
||||
}
|
||||
|
||||
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if c.client == nil {
|
||||
c.initializeCloudWatch()
|
||||
}
|
||||
|
||||
func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) {
|
||||
var metrics []*cloudwatch.Metric
|
||||
|
||||
// check for provided metric filter
|
||||
@@ -155,11 +151,11 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
} else {
|
||||
allMetrics, err := c.fetchNamespaceMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
for _, name := range m.MetricNames {
|
||||
for _, metric := range allMetrics {
|
||||
if isSelected(metric, m.Dimensions) {
|
||||
if isSelected(name, metric, m.Dimensions) {
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String(c.Namespace),
|
||||
MetricName: aws.String(name),
|
||||
@@ -169,18 +165,26 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
metrics, err = c.fetchNamespaceMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
metricCount := len(metrics)
|
||||
errChan := errchan.New(metricCount)
|
||||
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if c.client == nil {
|
||||
c.initializeCloudWatch()
|
||||
}
|
||||
|
||||
metrics, err := SelectMetrics(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
@@ -195,12 +199,12 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
<-lmtr.C
|
||||
go func(inm *cloudwatch.Metric) {
|
||||
defer wg.Done()
|
||||
c.gatherMetric(acc, inm, now, errChan.C)
|
||||
acc.AddError(c.gatherMetric(acc, inm, now))
|
||||
}(m)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -208,7 +212,7 @@ func init() {
|
||||
ttl, _ := time.ParseDuration("1hr")
|
||||
return &CloudWatch{
|
||||
CacheTTL: internal.Duration{Duration: ttl},
|
||||
RateLimit: 10,
|
||||
RateLimit: 200,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -278,13 +282,11 @@ func (c *CloudWatch) gatherMetric(
|
||||
acc telegraf.Accumulator,
|
||||
metric *cloudwatch.Metric,
|
||||
now time.Time,
|
||||
errChan chan error,
|
||||
) {
|
||||
) error {
|
||||
params := c.getStatisticsInput(metric, now)
|
||||
resp, err := c.client.GetMetricStatistics(params)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
for _, point := range resp.Datapoints {
|
||||
@@ -319,7 +321,7 @@ func (c *CloudWatch) gatherMetric(
|
||||
acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -380,7 +382,10 @@ func hasWilcard(dimensions []*Dimension) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isSelected(metric *cloudwatch.Metric, dimensions []*Dimension) bool {
|
||||
func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool {
|
||||
if name != *metric.MetricName {
|
||||
return false
|
||||
}
|
||||
if len(metric.Dimensions) != len(dimensions) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockCloudWatchClient struct{}
|
||||
type mockGatherCloudWatchClient struct{}
|
||||
|
||||
func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||
func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||
metric := &cloudwatch.Metric{
|
||||
Namespace: params.Namespace,
|
||||
MetricName: aws.String("Latency"),
|
||||
@@ -31,7 +31,7 @@ func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
|
||||
func (m *mockGatherCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
|
||||
dataPoint := &cloudwatch.Datapoint{
|
||||
Timestamp: params.EndTime,
|
||||
Minimum: aws.Float64(0.1),
|
||||
@@ -58,13 +58,13 @@ func TestGather(t *testing.T) {
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 10,
|
||||
RateLimit: 200,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
c.client = &mockCloudWatchClient{}
|
||||
c.client = &mockGatherCloudWatchClient{}
|
||||
|
||||
c.Gather(&acc)
|
||||
acc.GatherError(c.Gather)
|
||||
|
||||
fields := map[string]interface{}{}
|
||||
fields["latency_minimum"] = 0.1
|
||||
@@ -83,6 +83,94 @@ func TestGather(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
type mockSelectMetricsCloudWatchClient struct{}
|
||||
|
||||
func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||
metrics := []*cloudwatch.Metric{}
|
||||
// 4 metrics are available
|
||||
metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"}
|
||||
// for 3 ELBs
|
||||
loadBalancers := []string{"lb-1", "lb-2", "lb-3"}
|
||||
// in 2 AZs
|
||||
availabilityZones := []string{"us-east-1a", "us-east-1b"}
|
||||
for _, m := range metricNames {
|
||||
for _, lb := range loadBalancers {
|
||||
// For each metric/ELB pair, we get an aggregate value across all AZs.
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
},
|
||||
})
|
||||
for _, az := range availabilityZones {
|
||||
// We get a metric for each metric/ELB/AZ triplet.
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
&cloudwatch.Dimension{
|
||||
Name: aws.String("AvailabilityZone"),
|
||||
Value: aws.String(az),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result := &cloudwatch.ListMetricsOutput{
|
||||
Metrics: metrics,
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectMetricsCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestSelectMetrics(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
internalDuration := internal.Duration{
|
||||
Duration: duration,
|
||||
}
|
||||
c := &CloudWatch{
|
||||
Region: "us-east-1",
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 200,
|
||||
Metrics: []*Metric{
|
||||
&Metric{
|
||||
MetricNames: []string{"Latency", "RequestCount"},
|
||||
Dimensions: []*Dimension{
|
||||
&Dimension{
|
||||
Name: "LoadBalancerName",
|
||||
Value: "*",
|
||||
},
|
||||
&Dimension{
|
||||
Name: "AvailabilityZone",
|
||||
Value: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
c.client = &mockSelectMetricsCloudWatchClient{}
|
||||
metrics, err := SelectMetrics(c)
|
||||
// We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2
|
||||
// AZs. We should get 12 metrics.
|
||||
assert.Equal(t, 12, len(metrics))
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||
d := &cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
@@ -119,14 +207,13 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetricsCacheTimeout(t *testing.T) {
|
||||
ttl, _ := time.ParseDuration("5ms")
|
||||
cache := &MetricCache{
|
||||
Metrics: []*cloudwatch.Metric{},
|
||||
Fetched: time.Now(),
|
||||
TTL: ttl,
|
||||
TTL: time.Minute,
|
||||
}
|
||||
|
||||
assert.True(t, cache.IsValid())
|
||||
time.Sleep(ttl)
|
||||
cache.Fetched = time.Now().Add(-time.Minute)
|
||||
assert.False(t, cache.IsValid())
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"log"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
@@ -93,15 +92,15 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
contents, err := ioutil.ReadFile(fName)
|
||||
if err != nil {
|
||||
log.Printf("E! failed to read file '%s': %v", fName, err)
|
||||
acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err))
|
||||
continue
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(string(contents))
|
||||
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err)
|
||||
acc.AddError(fmt.Errorf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,18 +29,25 @@ to query the data. It will not report the [telemetry](https://www.consul.io/docs
|
||||
Tags:
|
||||
- node: on which node check/service is registered on
|
||||
- service_name: name of the service (this is the service name not the service ID)
|
||||
- check_id
|
||||
|
||||
Fields:
|
||||
- check_id
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing
|
||||
- critical
|
||||
- warning
|
||||
|
||||
`passing`, `critical`, and `warning` are integer representations of the health
|
||||
check state. A value of `1` represents that the status was the state of the
|
||||
the health check at this sample.
|
||||
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf -input-filter consul -test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node check_id="serfHealth",check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com check_id="service:www-example-com.test01",check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
```
|
||||
|
||||
@@ -95,13 +95,18 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt
|
||||
record := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
||||
record["check_id"] = check.CheckID
|
||||
record["check_name"] = check.Name
|
||||
record["service_id"] = check.ServiceID
|
||||
|
||||
record["status"] = check.Status
|
||||
record["passing"] = 0
|
||||
record["critical"] = 0
|
||||
record["warning"] = 0
|
||||
record[check.Status] = 1
|
||||
|
||||
tags["node"] = check.Node
|
||||
tags["service_name"] = check.ServiceName
|
||||
tags["check_id"] = check.CheckID
|
||||
|
||||
acc.AddFields("consul_health_checks", record, tags)
|
||||
}
|
||||
|
||||
@@ -22,15 +22,18 @@ var sampleChecks = []*api.HealthCheck{
|
||||
|
||||
func TestGatherHealtCheck(t *testing.T) {
|
||||
expectedFields := map[string]interface{}{
|
||||
"check_id": "foo.health123",
|
||||
"check_name": "foo.health",
|
||||
"status": "passing",
|
||||
"passing": 1,
|
||||
"critical": 0,
|
||||
"warning": 0,
|
||||
"service_id": "foo.123",
|
||||
}
|
||||
|
||||
expectedTags := map[string]string{
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
@@ -42,19 +42,17 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range r.Servers {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = r.gatherServer(serv, acc, nil)
|
||||
acc.AddError(r.gatherServer(serv, acc, nil))
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error {
|
||||
|
||||
@@ -2,13 +2,11 @@ package couchdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -83,34 +81,20 @@ func (*CouchDB) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
|
||||
errorChannel := make(chan error, len(c.HOSTs))
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range c.HOSTs {
|
||||
wg.Add(1)
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if err := c.fetchAndInsertData(accumulator, host); err != nil {
|
||||
errorChannel <- fmt.Errorf("[host=%s]: %s", host, err)
|
||||
accumulator.AddError(fmt.Errorf("[host=%s]: %s", host, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
|
||||
@@ -316,5 +316,5 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
@@ -75,12 +75,11 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
|
||||
continue
|
||||
} else if u.Scheme == "" {
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
u.Scheme = "tcp"
|
||||
@@ -90,13 +89,13 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(u, acc)
|
||||
acc.AddError(g.gatherServer(u, acc))
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
return nil
|
||||
}
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestDisqueGeneratesMetrics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
err = acc.GatherError(r.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -117,7 +117,7 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
err = acc.GatherError(r.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
47
plugins/inputs/dmcache/README.md
Normal file
47
plugins/inputs/dmcache/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# DMCache Input Plugin
|
||||
|
||||
This plugin provide a native collection for dmsetup based statistics for dm-cache.
|
||||
|
||||
This plugin requires sudo, that is why you should setup and be sure that the telegraf is able to execute sudo without a password.
|
||||
|
||||
`sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes.
|
||||
|
||||
### Configuration
|
||||
|
||||
```toml
|
||||
[[inputs.dmcache]]
|
||||
## Whether to report per-device stats or not
|
||||
per_device = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- dmcache
|
||||
- length
|
||||
- target
|
||||
- metadata_blocksize
|
||||
- metadata_used
|
||||
- metadata_total
|
||||
- cache_blocksize
|
||||
- cache_used
|
||||
- cache_total
|
||||
- read_hits
|
||||
- read_misses
|
||||
- write_hits
|
||||
- write_misses
|
||||
- demotions
|
||||
- promotions
|
||||
- dirty
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache
|
||||
* Plugin: inputs.dmcache, Collection 1
|
||||
> dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000
|
||||
```
|
||||
33
plugins/inputs/dmcache/dmcache.go
Normal file
33
plugins/inputs/dmcache/dmcache.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type DMCache struct {
|
||||
PerDevice bool `toml:"per_device"`
|
||||
getCurrentStatus func() ([]string, error)
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Whether to report per-device stats or not
|
||||
per_device = true
|
||||
`
|
||||
|
||||
func (c *DMCache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *DMCache) Description() string {
|
||||
return "Provide a native collection for dmsetup based statistics for dm-cache"
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dmcache", func() telegraf.Input {
|
||||
return &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: dmSetupStatus,
|
||||
}
|
||||
})
|
||||
}
|
||||
190
plugins/inputs/dmcache/dmcache_linux.go
Normal file
190
plugins/inputs/dmcache/dmcache_linux.go
Normal file
@@ -0,0 +1,190 @@
|
||||
// +build linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const metricName = "dmcache"
|
||||
|
||||
type cacheStatus struct {
|
||||
device string
|
||||
length int
|
||||
target string
|
||||
metadataBlocksize int
|
||||
metadataUsed int
|
||||
metadataTotal int
|
||||
cacheBlocksize int
|
||||
cacheUsed int
|
||||
cacheTotal int
|
||||
readHits int
|
||||
readMisses int
|
||||
writeHits int
|
||||
writeMisses int
|
||||
demotions int
|
||||
promotions int
|
||||
dirty int
|
||||
}
|
||||
|
||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||
outputLines, err := c.getCurrentStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalStatus := cacheStatus{}
|
||||
|
||||
for _, s := range outputLines {
|
||||
status, err := parseDMSetupStatus(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.PerDevice {
|
||||
tags := map[string]string{"device": status.device}
|
||||
acc.AddFields(metricName, toFields(status), tags)
|
||||
}
|
||||
aggregateStats(&totalStatus, status)
|
||||
}
|
||||
|
||||
acc.AddFields(metricName, toFields(totalStatus), map[string]string{"device": "all"})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||
var err error
|
||||
parseError := errors.New("Output from dmsetup could not be parsed")
|
||||
status := cacheStatus{}
|
||||
values := strings.Fields(line)
|
||||
if len(values) < 15 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
|
||||
status.device = strings.TrimRight(values[0], ":")
|
||||
status.length, err = strconv.Atoi(values[2])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.target = values[3]
|
||||
status.metadataBlocksize, err = strconv.Atoi(values[4])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
metadata := strings.Split(values[5], "/")
|
||||
if len(metadata) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.metadataUsed, err = strconv.Atoi(metadata[0])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.metadataTotal, err = strconv.Atoi(metadata[1])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheBlocksize, err = strconv.Atoi(values[6])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
cache := strings.Split(values[7], "/")
|
||||
if len(cache) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.cacheUsed, err = strconv.Atoi(cache[0])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheTotal, err = strconv.Atoi(cache[1])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readHits, err = strconv.Atoi(values[8])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readMisses, err = strconv.Atoi(values[9])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeHits, err = strconv.Atoi(values[10])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeMisses, err = strconv.Atoi(values[11])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.demotions, err = strconv.Atoi(values[12])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.promotions, err = strconv.Atoi(values[13])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.dirty, err = strconv.Atoi(values[14])
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func aggregateStats(totalStatus *cacheStatus, status cacheStatus) {
|
||||
totalStatus.length += status.length
|
||||
totalStatus.metadataBlocksize += status.metadataBlocksize
|
||||
totalStatus.metadataUsed += status.metadataUsed
|
||||
totalStatus.metadataTotal += status.metadataTotal
|
||||
totalStatus.cacheBlocksize += status.cacheBlocksize
|
||||
totalStatus.cacheUsed += status.cacheUsed
|
||||
totalStatus.cacheTotal += status.cacheTotal
|
||||
totalStatus.readHits += status.readHits
|
||||
totalStatus.readMisses += status.readMisses
|
||||
totalStatus.writeHits += status.writeHits
|
||||
totalStatus.writeMisses += status.writeMisses
|
||||
totalStatus.demotions += status.demotions
|
||||
totalStatus.promotions += status.promotions
|
||||
totalStatus.dirty += status.dirty
|
||||
}
|
||||
|
||||
func toFields(status cacheStatus) map[string]interface{} {
|
||||
fields := make(map[string]interface{})
|
||||
fields["length"] = status.length
|
||||
fields["metadata_blocksize"] = status.metadataBlocksize
|
||||
fields["metadata_used"] = status.metadataUsed
|
||||
fields["metadata_total"] = status.metadataTotal
|
||||
fields["cache_blocksize"] = status.cacheBlocksize
|
||||
fields["cache_used"] = status.cacheUsed
|
||||
fields["cache_total"] = status.cacheTotal
|
||||
fields["read_hits"] = status.readHits
|
||||
fields["read_misses"] = status.readMisses
|
||||
fields["write_hits"] = status.writeHits
|
||||
fields["write_misses"] = status.writeMisses
|
||||
fields["demotions"] = status.demotions
|
||||
fields["promotions"] = status.promotions
|
||||
fields["dirty"] = status.dirty
|
||||
return fields
|
||||
}
|
||||
|
||||
func dmSetupStatus() ([]string, error) {
|
||||
out, err := exec.Command("/bin/sh", "-c", "sudo /sbin/dmsetup status --target cache").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if string(out) == "No devices found\n" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
outString := strings.TrimRight(string(out), "\n")
|
||||
status := strings.Split(outString, "\n")
|
||||
|
||||
return status, nil
|
||||
}
|
||||
15
plugins/inputs/dmcache/dmcache_notlinux.go
Normal file
15
plugins/inputs/dmcache/dmcache_notlinux.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build !linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dmSetupStatus() ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
169
plugins/inputs/dmcache/dmcache_test.go
Normal file
169
plugins/inputs/dmcache/dmcache_test.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
measurement = "dmcache"
|
||||
badFormatOutput = []string{"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 "}
|
||||
good2DevicesFormatOutput = []string{
|
||||
"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 15 46 0 7 0 1 writeback 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
|
||||
"cs-2: 0 4294967296 cache 8 72352/1310720 128 26/24327168 2409 286 265 524682 0 0 0 1 writethrough 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
|
||||
}
|
||||
)
|
||||
|
||||
func TestPerDeviceGoodOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return good2DevicesFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"device": "cs-1",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"length": 4883791872,
|
||||
"metadata_blocksize": 8,
|
||||
"metadata_used": 1018,
|
||||
"metadata_total": 1501122,
|
||||
"cache_blocksize": 512,
|
||||
"cache_used": 7,
|
||||
"cache_total": 464962,
|
||||
"read_hits": 139,
|
||||
"read_misses": 352643,
|
||||
"write_hits": 15,
|
||||
"write_misses": 46,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
tags2 := map[string]string{
|
||||
"device": "cs-2",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"length": 4294967296,
|
||||
"metadata_blocksize": 8,
|
||||
"metadata_used": 72352,
|
||||
"metadata_total": 1310720,
|
||||
"cache_blocksize": 128,
|
||||
"cache_used": 26,
|
||||
"cache_total": 24327168,
|
||||
"read_hits": 2409,
|
||||
"read_misses": 286,
|
||||
"write_hits": 265,
|
||||
"write_misses": 524682,
|
||||
"demotions": 0,
|
||||
"promotions": 0,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
|
||||
tags3 := map[string]string{
|
||||
"device": "all",
|
||||
}
|
||||
|
||||
fields3 := map[string]interface{}{
|
||||
"length": 9178759168,
|
||||
"metadata_blocksize": 16,
|
||||
"metadata_used": 73370,
|
||||
"metadata_total": 2811842,
|
||||
"cache_blocksize": 640,
|
||||
"cache_used": 33,
|
||||
"cache_total": 24792130,
|
||||
"read_hits": 2548,
|
||||
"read_misses": 352929,
|
||||
"write_hits": 280,
|
||||
"write_misses": 524728,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||
}
|
||||
|
||||
func TestNotPerDeviceGoodOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: false,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return good2DevicesFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"device": "all",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"length": 9178759168,
|
||||
"metadata_blocksize": 16,
|
||||
"metadata_used": 73370,
|
||||
"metadata_total": 2811842,
|
||||
"cache_blocksize": 640,
|
||||
"cache_used": 33,
|
||||
"cache_total": 24792130,
|
||||
"read_hits": 2548,
|
||||
"read_misses": 352929,
|
||||
"write_hits": 280,
|
||||
"write_misses": 524728,
|
||||
"demotions": 0,
|
||||
"promotions": 7,
|
||||
"dirty": 0,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
|
||||
}
|
||||
|
||||
func TestNoDevicesOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return []string{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestErrorDuringGettingStatus(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return nil, errors.New("dmsetup doesn't exist")
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestBadFormatOfStatus(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return badFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -58,11 +57,10 @@ func (d *DnsQuery) Description() string {
|
||||
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
d.setDefaultValues()
|
||||
|
||||
errChan := errchan.New(len(d.Domains) * len(d.Servers))
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
errChan.C <- err
|
||||
acc.AddError(err)
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
@@ -74,7 +72,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestGathering(t *testing.T) {
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -44,7 +44,7 @@ func TestGatheringMxRecord(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.RecordType = "MX"
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -70,7 +70,7 @@ func TestGatheringRootDomain(t *testing.T) {
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -96,7 +96,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -121,7 +121,7 @@ func TestGatheringTimeout(t *testing.T) {
|
||||
|
||||
channel := make(chan error, 1)
|
||||
go func() {
|
||||
channel <- dnsConfig.Gather(&acc)
|
||||
channel <- acc.GatherError(dnsConfig.Gather)
|
||||
}()
|
||||
select {
|
||||
case res := <-channel:
|
||||
|
||||
@@ -16,12 +16,26 @@ for the stat structure can be found
|
||||
```
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
## Docker Endpoint
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
||||
## network (eth0, eth1, ...) stats or not
|
||||
perdevice = true
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
@@ -122,30 +136,32 @@ based on the availability of per-cpu stats on your system.
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
#### Docker Engine tags
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
|
||||
#### Docker Container tags
|
||||
- Tags on all containers:
|
||||
- engine_host
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- docker_container_mem specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- docker_container_cpu specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
@@ -1,42 +1,82 @@
|
||||
package system
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type DockerLabelFilter struct {
|
||||
labelInclude filter.Filter
|
||||
labelExclude filter.Filter
|
||||
}
|
||||
|
||||
// Docker object
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
Timeout internal.Duration
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
LabelInclude []string `toml:"docker_label_include"`
|
||||
LabelExclude []string `toml:"docker_label_exclude"`
|
||||
|
||||
client DockerClient
|
||||
LabelFilter DockerLabelFilter
|
||||
|
||||
client *client.Client
|
||||
engine_host string
|
||||
|
||||
testing bool
|
||||
labelFiltersCreated bool
|
||||
}
|
||||
|
||||
// DockerClient interface, useful for testing
|
||||
type DockerClient interface {
|
||||
Info(ctx context.Context) (types.Info, error)
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error)
|
||||
// infoWrapper wraps client.Client.List for testing.
|
||||
func infoWrapper(c *client.Client, ctx context.Context) (types.Info, error) {
|
||||
if c != nil {
|
||||
return c.Info(ctx)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.Info(ctx)
|
||||
}
|
||||
|
||||
// listWrapper wraps client.Client.ContainerList for testing.
|
||||
func listWrapper(
|
||||
c *client.Client,
|
||||
ctx context.Context,
|
||||
options types.ContainerListOptions,
|
||||
) ([]types.Container, error) {
|
||||
if c != nil {
|
||||
return c.ContainerList(ctx, options)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.ContainerList(ctx, options)
|
||||
}
|
||||
|
||||
// statsWrapper wraps client.Client.ContainerStats for testing.
|
||||
func statsWrapper(
|
||||
c *client.Client,
|
||||
ctx context.Context,
|
||||
containerID string,
|
||||
stream bool,
|
||||
) (types.ContainerStats, error) {
|
||||
if c != nil {
|
||||
return c.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
fc := FakeDockerClient{}
|
||||
return fc.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
@@ -68,6 +108,10 @@ var sampleConfig = `
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
`
|
||||
|
||||
// Description returns input description
|
||||
@@ -80,7 +124,7 @@ func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// Gather starts stats collection
|
||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.client == nil {
|
||||
if d.client == nil && !d.testing {
|
||||
var c *client.Client
|
||||
var err error
|
||||
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
@@ -102,18 +146,26 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
// Create label filters if not already created
|
||||
if !d.labelFiltersCreated {
|
||||
err := d.createLabelFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.labelFiltersCreated = true
|
||||
}
|
||||
|
||||
// Get daemon info
|
||||
err := d.gatherInfo(acc)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
acc.AddError(err)
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := types.ContainerListOptions{}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
containers, err := d.client.ContainerList(ctx, opts)
|
||||
containers, err := listWrapper(d.client, ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -126,8 +178,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
log.Printf("E! Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error())
|
||||
acc.AddError(fmt.Errorf("E! Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error()))
|
||||
}
|
||||
}(container)
|
||||
}
|
||||
@@ -144,7 +196,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// Get info from docker daemon
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
info, err := d.client.Info(ctx)
|
||||
info, err := infoWrapper(d.client, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -221,14 +273,18 @@ func (d *Docker) gatherContainer(
|
||||
cname = strings.TrimPrefix(container.Names[0], "/")
|
||||
}
|
||||
|
||||
// the image name sometimes has a version part.
|
||||
// ie, rabbitmq:3-management
|
||||
imageParts := strings.Split(container.Image, ":")
|
||||
imageName := imageParts[0]
|
||||
// the image name sometimes has a version part, or a private repo
|
||||
// ie, rabbitmq:3-management or docker.someco.net:4443/rabbitmq:3-management
|
||||
imageName := ""
|
||||
imageVersion := "unknown"
|
||||
if len(imageParts) > 1 {
|
||||
imageVersion = imageParts[1]
|
||||
i := strings.LastIndex(container.Image, ":") // index of last ':' character
|
||||
if i > -1 {
|
||||
imageVersion = container.Image[i+1:]
|
||||
imageName = container.Image[:i]
|
||||
} else {
|
||||
imageName = container.Image
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"container_name": cname,
|
||||
@@ -243,12 +299,12 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
||||
r, err := statsWrapper(d.client, ctx, container.ID, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting docker stats: %s", err.Error())
|
||||
}
|
||||
defer r.Close()
|
||||
dec := json.NewDecoder(r)
|
||||
defer r.Body.Close()
|
||||
dec := json.NewDecoder(r.Body)
|
||||
if err = dec.Decode(&v); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
@@ -258,7 +314,11 @@ func (d *Docker) gatherContainer(
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
tags[k] = label
|
||||
if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) {
|
||||
if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
|
||||
@@ -364,11 +424,22 @@ func gatherContainerStats(
|
||||
if field == "container_id" {
|
||||
continue
|
||||
}
|
||||
|
||||
var uintV uint64
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
uintV = v
|
||||
case int64:
|
||||
uintV = uint64(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := totalNetworkStatMap[field]
|
||||
if ok {
|
||||
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + value.(uint64)
|
||||
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV
|
||||
} else {
|
||||
totalNetworkStatMap[field] = value
|
||||
totalNetworkStatMap[field] = uintV
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -487,11 +558,22 @@ func gatherBlockIOMetrics(
|
||||
if field == "container_id" {
|
||||
continue
|
||||
}
|
||||
|
||||
var uintV uint64
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
uintV = v
|
||||
case int64:
|
||||
uintV = uint64(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := totalStatMap[field]
|
||||
if ok {
|
||||
totalStatMap[field] = totalStatMap[field].(uint64) + value.(uint64)
|
||||
totalStatMap[field] = totalStatMap[field].(uint64) + uintV
|
||||
} else {
|
||||
totalStatMap[field] = value
|
||||
totalStatMap[field] = uintV
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -542,11 +624,32 @@ func parseSize(sizeStr string) (int64, error) {
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
func (d *Docker) createLabelFilters() error {
|
||||
if len(d.LabelInclude) != 0 && d.LabelFilter.labelInclude == nil {
|
||||
var err error
|
||||
d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.LabelExclude) != 0 && d.LabelFilter.labelExclude == nil {
|
||||
var err error
|
||||
d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
labelFiltersCreated: false,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
package system
|
||||
package docker
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/engine-api/types/registry"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -250,147 +244,65 @@ func testStats() *types.StatsJSON {
|
||||
return stats
|
||||
}
|
||||
|
||||
type FakeDockerClient struct {
|
||||
var gatherLabelsTests = []struct {
|
||||
include []string
|
||||
exclude []string
|
||||
expected []string
|
||||
notexpected []string
|
||||
}{
|
||||
{[]string{}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}},
|
||||
{[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}},
|
||||
{[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}},
|
||||
{[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}},
|
||||
{[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}},
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
||||
env := types.Info{
|
||||
Containers: 108,
|
||||
ContainersRunning: 98,
|
||||
ContainersStopped: 6,
|
||||
ContainersPaused: 3,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||
Debug: false,
|
||||
LoggingDriver: "json-file",
|
||||
KernelVersion: "4.3.0-1-amd64",
|
||||
IndexServerAddress: "https://index.docker.io/v1/",
|
||||
MemTotal: 3840757760,
|
||||
Images: 199,
|
||||
CPUCfsQuota: true,
|
||||
Name: "absol",
|
||||
SwapLimit: false,
|
||||
IPv4Forwarding: true,
|
||||
ExecutionDriver: "native-0.2",
|
||||
ExperimentalBuild: false,
|
||||
CPUCfsPeriod: true,
|
||||
RegistryConfig: ®istry.ServiceConfig{
|
||||
IndexConfigs: map[string]*registry.IndexInfo{
|
||||
"docker.io": {
|
||||
Name: "docker.io",
|
||||
Mirrors: []string{},
|
||||
Official: true,
|
||||
Secure: true,
|
||||
},
|
||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||
BridgeNfIptables: true,
|
||||
HTTPSProxy: "",
|
||||
Labels: []string{},
|
||||
MemoryLimit: false,
|
||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||
NFd: 19,
|
||||
HTTPProxy: "",
|
||||
Driver: "devicemapper",
|
||||
NGoroutines: 39,
|
||||
NCPU: 4,
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
func TestDockerGatherLabels(t *testing.T) {
|
||||
for _, tt := range gatherLabelsTests {
|
||||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
client: nil,
|
||||
testing: true,
|
||||
}
|
||||
|
||||
for _, label := range tt.include {
|
||||
d.LabelInclude = append(d.LabelInclude, label)
|
||||
}
|
||||
for _, label := range tt.exclude {
|
||||
d.LabelExclude = append(d.LabelExclude, label)
|
||||
}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, label := range tt.expected {
|
||||
if !acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range tt.notexpected {
|
||||
if acc.HasTag("docker_container_cpu", label) {
|
||||
t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s",
|
||||
label, tt.include, tt.exclude)
|
||||
}
|
||||
}
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
container1 := types.Container{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
container2 := types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
|
||||
containers := []types.Container{container1, container2}
|
||||
return containers, nil
|
||||
|
||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
|
||||
var stat io.ReadCloser
|
||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
||||
stat = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func TestDockerGatherInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
client := FakeDockerClient{}
|
||||
d := Docker{client: client}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
d := Docker{
|
||||
client: nil,
|
||||
testing: true,
|
||||
}
|
||||
|
||||
err := acc.GatherError(d.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -429,10 +341,12 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
},
|
||||
map[string]string{
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io/coreos/etcd",
|
||||
"container_image": "quay.io:4443/coreos/etcd",
|
||||
"cpu": "cpu3",
|
||||
"container_version": "v2.2.2",
|
||||
"engine_host": "absol",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
@@ -477,8 +391,10 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
map[string]string{
|
||||
"engine_host": "absol",
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io/coreos/etcd",
|
||||
"container_image": "quay.io:4443/coreos/etcd",
|
||||
"container_version": "v2.2.2",
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
151
plugins/inputs/docker/fake_client.go
Normal file
151
plugins/inputs/docker/fake_client.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
type FakeDockerClient struct {
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
||||
env := types.Info{
|
||||
Containers: 108,
|
||||
ContainersRunning: 98,
|
||||
ContainersStopped: 6,
|
||||
ContainersPaused: 3,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||
Debug: false,
|
||||
LoggingDriver: "json-file",
|
||||
KernelVersion: "4.3.0-1-amd64",
|
||||
IndexServerAddress: "https://index.docker.io/v1/",
|
||||
MemTotal: 3840757760,
|
||||
Images: 199,
|
||||
CPUCfsQuota: true,
|
||||
Name: "absol",
|
||||
SwapLimit: false,
|
||||
IPv4Forwarding: true,
|
||||
ExperimentalBuild: false,
|
||||
CPUCfsPeriod: true,
|
||||
RegistryConfig: ®istry.ServiceConfig{
|
||||
IndexConfigs: map[string]*registry.IndexInfo{
|
||||
"docker.io": {
|
||||
Name: "docker.io",
|
||||
Mirrors: []string{},
|
||||
Official: true,
|
||||
Secure: true,
|
||||
},
|
||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||
BridgeNfIptables: true,
|
||||
HTTPSProxy: "",
|
||||
Labels: []string{},
|
||||
MemoryLimit: false,
|
||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||
NFd: 19,
|
||||
HTTPProxy: "",
|
||||
Driver: "devicemapper",
|
||||
NGoroutines: 39,
|
||||
NCPU: 4,
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
container1 := types.Container{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
container2 := types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"label1": "test_value_1",
|
||||
"label2": "test_value_2",
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
|
||||
containers := []types.Container{container1, container2}
|
||||
return containers, nil
|
||||
|
||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
var stat types.ContainerStats
|
||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
||||
stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat, nil
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -66,19 +65,18 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := errchan.New(len(d.Servers) * len(d.Filters))
|
||||
for _, server := range d.Servers {
|
||||
for _, filter := range d.Filters {
|
||||
wg.Add(1)
|
||||
go func(s string, f string) {
|
||||
defer wg.Done()
|
||||
errChan.C <- d.gatherServer(s, acc, d.Type, f)
|
||||
acc.AddError(d.gatherServer(s, acc, d.Type, f))
|
||||
}(server, filter)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error {
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
||||
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
||||
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
||||
and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)
|
||||
or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics.
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -14,13 +15,18 @@ and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference
|
||||
## Timeout for HTTP requests to the elastic search server(s)
|
||||
http_timeout = "5s"
|
||||
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
## When local is true (the default), the node will read only its own stats.
|
||||
## Set local to false when you want to read the node stats from all nodes
|
||||
## of the cluster.
|
||||
local = true
|
||||
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
cluster_health = false
|
||||
|
||||
## Set cluster_stats to true when you want to obtain cluster stats from the
|
||||
## Master node.
|
||||
cluster_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
|
||||
@@ -4,21 +4,26 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// mask for masking username/password from error messages
|
||||
var mask = regexp.MustCompile(`https?:\/\/\S+:\S+@`)
|
||||
|
||||
// Nodestats are always generated, so simply define a constant for these endpoints
|
||||
const statsPath = "/_nodes/stats"
|
||||
const statsPathLocal = "/_nodes/_local/stats"
|
||||
const healthPath = "/_cluster/health"
|
||||
|
||||
type node struct {
|
||||
type nodeStat struct {
|
||||
Host string `json:"host"`
|
||||
Name string `json:"name"`
|
||||
Attributes map[string]string `json:"attributes"`
|
||||
@@ -58,20 +63,41 @@ type indexHealth struct {
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
}
|
||||
|
||||
type clusterStats struct {
|
||||
NodeName string `json:"node_name"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
Indices interface{} `json:"indices"`
|
||||
Nodes interface{} `json:"nodes"`
|
||||
}
|
||||
|
||||
type catMaster struct {
|
||||
NodeID string `json:"id"`
|
||||
NodeIP string `json:"ip"`
|
||||
NodeName string `json:"node"`
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
# you can add username and password to your url to use basic authentication:
|
||||
# servers = ["http://user:pass@localhost:9200"]
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
## Timeout for HTTP requests to the elastic search server(s)
|
||||
http_timeout = "5s"
|
||||
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
## When local is true (the default), the node will read only its own stats.
|
||||
## Set local to false when you want to read the node stats from all nodes
|
||||
## of the cluster.
|
||||
local = true
|
||||
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
cluster_health = false
|
||||
|
||||
## Set cluster_stats to true when you want to also obtain cluster stats from the
|
||||
## Master node.
|
||||
cluster_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -83,15 +109,18 @@ const sampleConfig = `
|
||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||
// servers.
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||
client *http.Client
|
||||
Local bool
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
ClusterStats bool
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||
client *http.Client
|
||||
catMasterResponseTokens []string
|
||||
isMaster bool
|
||||
}
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
@@ -123,7 +152,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
e.client = client
|
||||
}
|
||||
|
||||
errChan := errchan.New(len(e.Servers))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
@@ -136,18 +164,39 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
} else {
|
||||
url = s + statsPath
|
||||
}
|
||||
e.isMaster = false
|
||||
|
||||
if e.ClusterStats {
|
||||
// get cat/master information here so NodeStats can determine
|
||||
// whether this node is the Master
|
||||
e.setCatMaster(s + "/_cat/master")
|
||||
}
|
||||
|
||||
// Always gather node states
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
errChan.C <- err
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
||||
url = s + "/_cluster/health?level=indices"
|
||||
if err := e.gatherClusterHealth(url, acc); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if e.ClusterStats && e.isMaster {
|
||||
if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil {
|
||||
acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@")))
|
||||
return
|
||||
}
|
||||
}
|
||||
}(serv, acc)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
@@ -169,12 +218,13 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*node `json:"nodes"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*nodeStat `json:"nodes"`
|
||||
}{}
|
||||
if err := e.gatherData(url, nodeStats); err != nil {
|
||||
if err := e.gatherJsonData(url, nodeStats); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, n := range nodeStats.Nodes {
|
||||
tags := map[string]string{
|
||||
"node_id": id,
|
||||
@@ -183,6 +233,11 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
||||
"cluster_name": nodeStats.ClusterName,
|
||||
}
|
||||
|
||||
if e.ClusterStats {
|
||||
// check for master
|
||||
e.isMaster = (id == e.catMasterResponseTokens[0])
|
||||
}
|
||||
|
||||
for k, v := range n.Attributes {
|
||||
tags["node_attribute_"+k] = v
|
||||
}
|
||||
@@ -202,6 +257,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
// parse Json, ignoring strings and bools
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -212,31 +268,31 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterHealth{}
|
||||
if err := e.gatherData(url, clusterStats); err != nil {
|
||||
func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator) error {
|
||||
healthStats := &clusterHealth{}
|
||||
if err := e.gatherJsonData(url, healthStats); err != nil {
|
||||
return err
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"status": clusterStats.Status,
|
||||
"timed_out": clusterStats.TimedOut,
|
||||
"number_of_nodes": clusterStats.NumberOfNodes,
|
||||
"number_of_data_nodes": clusterStats.NumberOfDataNodes,
|
||||
"active_primary_shards": clusterStats.ActivePrimaryShards,
|
||||
"active_shards": clusterStats.ActiveShards,
|
||||
"relocating_shards": clusterStats.RelocatingShards,
|
||||
"initializing_shards": clusterStats.InitializingShards,
|
||||
"unassigned_shards": clusterStats.UnassignedShards,
|
||||
"status": healthStats.Status,
|
||||
"timed_out": healthStats.TimedOut,
|
||||
"number_of_nodes": healthStats.NumberOfNodes,
|
||||
"number_of_data_nodes": healthStats.NumberOfDataNodes,
|
||||
"active_primary_shards": healthStats.ActivePrimaryShards,
|
||||
"active_shards": healthStats.ActiveShards,
|
||||
"relocating_shards": healthStats.RelocatingShards,
|
||||
"initializing_shards": healthStats.InitializingShards,
|
||||
"unassigned_shards": healthStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": clusterStats.ClusterName},
|
||||
map[string]string{"name": healthStats.ClusterName},
|
||||
measurementTime,
|
||||
)
|
||||
|
||||
for name, health := range clusterStats.Indices {
|
||||
for name, health := range healthStats.Indices {
|
||||
indexFields := map[string]interface{}{
|
||||
"status": health.Status,
|
||||
"number_of_shards": health.NumberOfShards,
|
||||
@@ -257,7 +313,60 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterStats{}
|
||||
if err := e.gatherJsonData(url, clusterStats); err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"node_name": clusterStats.NodeName,
|
||||
"cluster_name": clusterStats.ClusterName,
|
||||
"status": clusterStats.Status,
|
||||
}
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"nodes": clusterStats.Nodes,
|
||||
"indices": clusterStats.Indices,
|
||||
}
|
||||
|
||||
for p, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
// parse json, including bools and strings
|
||||
err := f.FullFlattenJSON("", s, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_clusterstats_"+p, f.Fields, tags, now)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) setCatMaster(url string) error {
|
||||
r, err := e.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
}
|
||||
response, err := ioutil.ReadAll(r.Body)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.catMasterResponseTokens = strings.Split(string(response), " ")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error {
|
||||
r, err := e.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -270,9 +379,11 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -37,16 +39,13 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
func (t *transportMock) CancelRequest(_ *http.Request) {
|
||||
}
|
||||
|
||||
func TestElasticsearch(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.Gather(&acc); err != nil {
|
||||
t.Fatal(err)
|
||||
func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) {
|
||||
if es.isMaster != expected {
|
||||
msg := fmt.Sprintf("IsMaster set incorrectly")
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
||||
}
|
||||
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
@@ -55,25 +54,55 @@ func TestElasticsearch(t *testing.T) {
|
||||
"node_host": "test",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
func TestGather(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := acc.GatherError(es.Gather); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherNodeStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherClusterHealth(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.Gather(&acc))
|
||||
require.NoError(t, es.gatherClusterHealth("junk", &acc))
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
@@ -88,6 +117,77 @@ func TestGatherClusterStats(t *testing.T) {
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
||||
func TestGatherClusterStatsMaster(t *testing.T) {
|
||||
// This needs multiple steps to replicate the multiple calls internally.
|
||||
es := newElasticsearchWithClient()
|
||||
es.ClusterStats = true
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
|
||||
// first get catMaster
|
||||
es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult)
|
||||
require.NoError(t, es.setCatMaster("junk"))
|
||||
|
||||
IsMasterResultTokens := strings.Split(string(IsMasterResult), " ")
|
||||
if es.catMasterResponseTokens[0] != IsMasterResultTokens[0] {
|
||||
msg := fmt.Sprintf("catmaster is incorrect")
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
||||
// now get node status, which determines whether we're master
|
||||
var acc testutil.Accumulator
|
||||
es.Local = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkIsMaster(es, true, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
|
||||
// now test the clusterstats method
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse)
|
||||
require.NoError(t, es.gatherClusterStats("junk", &acc))
|
||||
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_name": "test.host.com",
|
||||
"status": "red",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_nodes", clusterstatsNodesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_indices", clusterstatsIndicesExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStatsNonMaster(t *testing.T) {
|
||||
// This needs multiple steps to replicate the multiple calls internally.
|
||||
es := newElasticsearchWithClient()
|
||||
es.ClusterStats = true
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
|
||||
// first get catMaster
|
||||
es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult)
|
||||
require.NoError(t, es.setCatMaster("junk"))
|
||||
|
||||
IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ")
|
||||
if es.catMasterResponseTokens[0] != IsNotMasterResultTokens[0] {
|
||||
msg := fmt.Sprintf("catmaster is incorrect")
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
||||
// now get node status, which determines whether we're master
|
||||
var acc testutil.Accumulator
|
||||
es.Local = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure flag is clear so Cluster Stats would not be done
|
||||
checkIsMaster(es, false, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
|
||||
}
|
||||
|
||||
func newElasticsearchWithClient() *Elasticsearch {
|
||||
es := NewElasticsearch()
|
||||
es.client = &http.Client{}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterResponse = `
|
||||
const clusterHealthResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
@@ -71,7 +71,7 @@ var v2IndexExpected = map[string]interface{}{
|
||||
"unassigned_shards": 20,
|
||||
}
|
||||
|
||||
const statsResponse = `
|
||||
const nodeStatsResponse = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
@@ -489,7 +489,7 @@ const statsResponse = `
|
||||
}
|
||||
`
|
||||
|
||||
var indicesExpected = map[string]interface{}{
|
||||
var nodestatsIndicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"suggest_total": float64(0),
|
||||
@@ -561,7 +561,7 @@ var indicesExpected = map[string]interface{}{
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var osExpected = map[string]interface{}{
|
||||
var nodestatsOsExpected = map[string]interface{}{
|
||||
"load_average_0": float64(0.01),
|
||||
"load_average_1": float64(0.04),
|
||||
"load_average_2": float64(0.05),
|
||||
@@ -576,7 +576,7 @@ var osExpected = map[string]interface{}{
|
||||
"mem_used_in_bytes": float64(1621868544),
|
||||
}
|
||||
|
||||
var processExpected = map[string]interface{}{
|
||||
var nodestatsProcessExpected = map[string]interface{}{
|
||||
"mem_total_virtual_in_bytes": float64(4747890688),
|
||||
"timestamp": float64(1436460392945),
|
||||
"open_file_descriptors": float64(160),
|
||||
@@ -586,7 +586,7 @@ var processExpected = map[string]interface{}{
|
||||
"cpu_user_in_millis": float64(13610),
|
||||
}
|
||||
|
||||
var jvmExpected = map[string]interface{}{
|
||||
var nodestatsJvmExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392945),
|
||||
"uptime_in_millis": float64(202245),
|
||||
"mem_non_heap_used_in_bytes": float64(39634576),
|
||||
@@ -621,7 +621,7 @@ var jvmExpected = map[string]interface{}{
|
||||
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var threadPoolExpected = map[string]interface{}{
|
||||
var nodestatsThreadPoolExpected = map[string]interface{}{
|
||||
"merge_threads": float64(6),
|
||||
"merge_queue": float64(4),
|
||||
"merge_active": float64(5),
|
||||
@@ -726,7 +726,7 @@ var threadPoolExpected = map[string]interface{}{
|
||||
"flush_completed": float64(3),
|
||||
}
|
||||
|
||||
var fsExpected = map[string]interface{}{
|
||||
var nodestatsFsExpected = map[string]interface{}{
|
||||
"data_0_total_in_bytes": float64(19507089408),
|
||||
"data_0_free_in_bytes": float64(16909316096),
|
||||
"data_0_available_in_bytes": float64(15894814720),
|
||||
@@ -736,7 +736,7 @@ var fsExpected = map[string]interface{}{
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var transportExpected = map[string]interface{}{
|
||||
var nodestatsTransportExpected = map[string]interface{}{
|
||||
"server_open": float64(13),
|
||||
"rx_count": float64(6),
|
||||
"rx_size_in_bytes": float64(1380),
|
||||
@@ -744,12 +744,12 @@ var transportExpected = map[string]interface{}{
|
||||
"tx_size_in_bytes": float64(1380),
|
||||
}
|
||||
|
||||
var httpExpected = map[string]interface{}{
|
||||
var nodestatsHttpExpected = map[string]interface{}{
|
||||
"current_open": float64(3),
|
||||
"total_opened": float64(3),
|
||||
}
|
||||
|
||||
var breakersExpected = map[string]interface{}{
|
||||
var nodestatsBreakersExpected = map[string]interface{}{
|
||||
"fielddata_estimated_size_in_bytes": float64(0),
|
||||
"fielddata_overhead": float64(1.03),
|
||||
"fielddata_tripped": float64(0),
|
||||
@@ -763,3 +763,273 @@ var breakersExpected = map[string]interface{}{
|
||||
"parent_limit_size_in_bytes": float64(727213670),
|
||||
"parent_estimated_size_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
const clusterStatsResponse = `
|
||||
{
|
||||
"host":"ip-10-0-1-214",
|
||||
"log_type":"metrics",
|
||||
"timestamp":1475767451229,
|
||||
"log_level":"INFO",
|
||||
"node_name":"test.host.com",
|
||||
"cluster_name":"es-testcluster",
|
||||
"status":"red",
|
||||
"indices":{
|
||||
"count":1,
|
||||
"shards":{
|
||||
"total":4,
|
||||
"primaries":4,
|
||||
"replication":0.0,
|
||||
"index":{
|
||||
"shards":{
|
||||
"min":4,
|
||||
"max":4,
|
||||
"avg":4.0
|
||||
},
|
||||
"primaries":{
|
||||
"min":4,
|
||||
"max":4,
|
||||
"avg":4.0
|
||||
},
|
||||
"replication":{
|
||||
"min":0.0,
|
||||
"max":0.0,
|
||||
"avg":0.0
|
||||
}
|
||||
}
|
||||
},
|
||||
"docs":{
|
||||
"count":4,
|
||||
"deleted":0
|
||||
},
|
||||
"store":{
|
||||
"size_in_bytes":17084,
|
||||
"throttle_time_in_millis":0
|
||||
},
|
||||
"fielddata":{
|
||||
"memory_size_in_bytes":0,
|
||||
"evictions":0
|
||||
},
|
||||
"query_cache":{
|
||||
"memory_size_in_bytes":0,
|
||||
"total_count":0,
|
||||
"hit_count":0,
|
||||
"miss_count":0,
|
||||
"cache_size":0,
|
||||
"cache_count":0,
|
||||
"evictions":0
|
||||
},
|
||||
"completion":{
|
||||
"size_in_bytes":0
|
||||
},
|
||||
"segments":{
|
||||
"count":4,
|
||||
"memory_in_bytes":11828,
|
||||
"terms_memory_in_bytes":8932,
|
||||
"stored_fields_memory_in_bytes":1248,
|
||||
"term_vectors_memory_in_bytes":0,
|
||||
"norms_memory_in_bytes":1280,
|
||||
"doc_values_memory_in_bytes":368,
|
||||
"index_writer_memory_in_bytes":0,
|
||||
"index_writer_max_memory_in_bytes":2048000,
|
||||
"version_map_memory_in_bytes":0,
|
||||
"fixed_bit_set_memory_in_bytes":0
|
||||
},
|
||||
"percolate":{
|
||||
"total":0,
|
||||
"time_in_millis":0,
|
||||
"current":0,
|
||||
"memory_size_in_bytes":-1,
|
||||
"memory_size":"-1b",
|
||||
"queries":0
|
||||
}
|
||||
},
|
||||
"nodes":{
|
||||
"count":{
|
||||
"total":1,
|
||||
"master_only":0,
|
||||
"data_only":0,
|
||||
"master_data":1,
|
||||
"client":0
|
||||
},
|
||||
"versions":[
|
||||
{
|
||||
"version": "2.3.3"
|
||||
}
|
||||
],
|
||||
"os":{
|
||||
"available_processors":1,
|
||||
"allocated_processors":1,
|
||||
"mem":{
|
||||
"total_in_bytes":593301504
|
||||
},
|
||||
"names":[
|
||||
{
|
||||
"name":"Linux",
|
||||
"count":1
|
||||
}
|
||||
]
|
||||
},
|
||||
"process":{
|
||||
"cpu":{
|
||||
"percent":0
|
||||
},
|
||||
"open_file_descriptors":{
|
||||
"min":145,
|
||||
"max":145,
|
||||
"avg":145
|
||||
}
|
||||
},
|
||||
"jvm":{
|
||||
"max_uptime_in_millis":11580527,
|
||||
"versions":[
|
||||
{
|
||||
"version":"1.8.0_101",
|
||||
"vm_name":"OpenJDK 64-Bit Server VM",
|
||||
"vm_version":"25.101-b13",
|
||||
"vm_vendor":"Oracle Corporation",
|
||||
"count":1
|
||||
}
|
||||
],
|
||||
"mem":{
|
||||
"heap_used_in_bytes":70550288,
|
||||
"heap_max_in_bytes":1065025536
|
||||
},
|
||||
"threads":30
|
||||
},
|
||||
"fs":{
|
||||
"total_in_bytes":8318783488,
|
||||
"free_in_bytes":6447439872,
|
||||
"available_in_bytes":6344785920
|
||||
},
|
||||
"plugins":[
|
||||
{
|
||||
"name":"cloud-aws",
|
||||
"version":"2.3.3",
|
||||
"description":"The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
|
||||
"jvm":true,
|
||||
"classname":"org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
|
||||
"isolated":true,
|
||||
"site":false
|
||||
},
|
||||
{
|
||||
"name":"kopf",
|
||||
"version":"2.0.1",
|
||||
"description":"kopf - simple web administration tool for Elasticsearch",
|
||||
"url":"/_plugin/kopf/",
|
||||
"jvm":false,
|
||||
"site":true
|
||||
},
|
||||
{
|
||||
"name":"tr-metrics",
|
||||
"version":"7bd5b4b",
|
||||
"description":"Logs cluster and node stats for performance monitoring.",
|
||||
"jvm":true,
|
||||
"classname":"com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
|
||||
"isolated":true,
|
||||
"site":false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var clusterstatsIndicesExpected = map[string]interface{}{
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"count": float64(1),
|
||||
"docs_count": float64(4),
|
||||
"docs_deleted": float64(0),
|
||||
"fielddata_evictions": float64(0),
|
||||
"fielddata_memory_size_in_bytes": float64(0),
|
||||
"percolate_current": float64(0),
|
||||
"percolate_memory_size_in_bytes": float64(-1),
|
||||
"percolate_queries": float64(0),
|
||||
"percolate_time_in_millis": float64(0),
|
||||
"percolate_total": float64(0),
|
||||
"percolate_memory_size": "-1b",
|
||||
"query_cache_cache_count": float64(0),
|
||||
"query_cache_cache_size": float64(0),
|
||||
"query_cache_evictions": float64(0),
|
||||
"query_cache_hit_count": float64(0),
|
||||
"query_cache_memory_size_in_bytes": float64(0),
|
||||
"query_cache_miss_count": float64(0),
|
||||
"query_cache_total_count": float64(0),
|
||||
"segments_count": float64(4),
|
||||
"segments_doc_values_memory_in_bytes": float64(368),
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
"segments_index_writer_max_memory_in_bytes": float64(2.048e+06),
|
||||
"segments_index_writer_memory_in_bytes": float64(0),
|
||||
"segments_memory_in_bytes": float64(11828),
|
||||
"segments_norms_memory_in_bytes": float64(1280),
|
||||
"segments_stored_fields_memory_in_bytes": float64(1248),
|
||||
"segments_term_vectors_memory_in_bytes": float64(0),
|
||||
"segments_terms_memory_in_bytes": float64(8932),
|
||||
"segments_version_map_memory_in_bytes": float64(0),
|
||||
"shards_index_primaries_avg": float64(4),
|
||||
"shards_index_primaries_max": float64(4),
|
||||
"shards_index_primaries_min": float64(4),
|
||||
"shards_index_replication_avg": float64(0),
|
||||
"shards_index_replication_max": float64(0),
|
||||
"shards_index_replication_min": float64(0),
|
||||
"shards_index_shards_avg": float64(4),
|
||||
"shards_index_shards_max": float64(4),
|
||||
"shards_index_shards_min": float64(4),
|
||||
"shards_primaries": float64(4),
|
||||
"shards_replication": float64(0),
|
||||
"shards_total": float64(4),
|
||||
"store_size_in_bytes": float64(17084),
|
||||
"store_throttle_time_in_millis": float64(0),
|
||||
}
|
||||
|
||||
var clusterstatsNodesExpected = map[string]interface{}{
|
||||
"count_client": float64(0),
|
||||
"count_data_only": float64(0),
|
||||
"count_master_data": float64(1),
|
||||
"count_master_only": float64(0),
|
||||
"count_total": float64(1),
|
||||
"fs_available_in_bytes": float64(6.34478592e+09),
|
||||
"fs_free_in_bytes": float64(6.447439872e+09),
|
||||
"fs_total_in_bytes": float64(8.318783488e+09),
|
||||
"jvm_max_uptime_in_millis": float64(1.1580527e+07),
|
||||
"jvm_mem_heap_max_in_bytes": float64(1.065025536e+09),
|
||||
"jvm_mem_heap_used_in_bytes": float64(7.0550288e+07),
|
||||
"jvm_threads": float64(30),
|
||||
"jvm_versions_0_count": float64(1),
|
||||
"jvm_versions_0_version": "1.8.0_101",
|
||||
"jvm_versions_0_vm_name": "OpenJDK 64-Bit Server VM",
|
||||
"jvm_versions_0_vm_vendor": "Oracle Corporation",
|
||||
"jvm_versions_0_vm_version": "25.101-b13",
|
||||
"os_allocated_processors": float64(1),
|
||||
"os_available_processors": float64(1),
|
||||
"os_mem_total_in_bytes": float64(5.93301504e+08),
|
||||
"os_names_0_count": float64(1),
|
||||
"os_names_0_name": "Linux",
|
||||
"process_cpu_percent": float64(0),
|
||||
"process_open_file_descriptors_avg": float64(145),
|
||||
"process_open_file_descriptors_max": float64(145),
|
||||
"process_open_file_descriptors_min": float64(145),
|
||||
"versions_0_version": "2.3.3",
|
||||
"plugins_0_classname": "org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
|
||||
"plugins_0_description": "The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
|
||||
"plugins_0_isolated": true,
|
||||
"plugins_0_jvm": true,
|
||||
"plugins_0_name": "cloud-aws",
|
||||
"plugins_0_site": false,
|
||||
"plugins_0_version": "2.3.3",
|
||||
"plugins_1_description": "kopf - simple web administration tool for Elasticsearch",
|
||||
"plugins_1_jvm": false,
|
||||
"plugins_1_name": "kopf",
|
||||
"plugins_1_site": true,
|
||||
"plugins_1_url": "/_plugin/kopf/",
|
||||
"plugins_1_version": "2.0.1",
|
||||
"plugins_2_classname": "com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
|
||||
"plugins_2_description": "Logs cluster and node stats for performance monitoring.",
|
||||
"plugins_2_isolated": true,
|
||||
"plugins_2_jvm": true,
|
||||
"plugins_2_name": "tr-metrics",
|
||||
"plugins_2_site": false,
|
||||
"plugins_2_version": "7bd5b4b",
|
||||
}
|
||||
|
||||
const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com "
|
||||
|
||||
const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com "
|
||||
|
||||
@@ -11,11 +11,10 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/nagios"
|
||||
@@ -36,7 +35,7 @@ const sampleConfig = `
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -49,8 +48,7 @@ type Exec struct {
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
runner Runner
|
||||
errChan chan error
|
||||
runner Runner
|
||||
}
|
||||
|
||||
func NewExec() *Exec {
|
||||
@@ -150,13 +148,13 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync
|
||||
|
||||
out, err := e.runner.Run(e, command, acc)
|
||||
if err != nil {
|
||||
e.errChan <- err
|
||||
acc.AddError(err)
|
||||
return
|
||||
}
|
||||
|
||||
metrics, err := e.parser.Parse(out)
|
||||
if err != nil {
|
||||
e.errChan <- err
|
||||
acc.AddError(err)
|
||||
} else {
|
||||
for _, metric := range metrics {
|
||||
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
@@ -193,7 +191,8 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
matches, err := filepath.Glob(cmdAndArgs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
@@ -214,15 +213,12 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
errChan := errchan.New(len(commands))
|
||||
e.errChan = errChan.C
|
||||
|
||||
wg.Add(len(commands))
|
||||
for _, command := range commands {
|
||||
go e.ProcessCommand(command, acc, &wg)
|
||||
}
|
||||
wg.Wait()
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -36,7 +36,9 @@ const malformedJson = `
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1"
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n"
|
||||
const lineProtocolEmpty = ""
|
||||
const lineProtocolShort = "ab"
|
||||
|
||||
const lineProtocolMulti = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
@@ -99,7 +101,7 @@ func TestExec(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
|
||||
|
||||
@@ -125,8 +127,7 @@ func TestExecMalformed(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
require.Error(t, acc.GatherError(e.Gather))
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
@@ -139,8 +140,7 @@ func TestCommandError(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
require.Error(t, acc.GatherError(e.Gather))
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
@@ -153,8 +153,7 @@ func TestLineProtocolParse(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, acc.GatherError(e.Gather))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
@@ -167,6 +166,33 @@ func TestLineProtocolParse(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
|
||||
func TestLineProtocolEmptyParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolEmpty), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLineProtocolShortParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolShort), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected")
|
||||
}
|
||||
|
||||
func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
@@ -176,7 +202,7 @@ func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -202,7 +228,7 @@ func TestExecCommandWithGlob(t *testing.T) {
|
||||
e.SetParser(parser)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -218,7 +244,7 @@ func TestExecCommandWithoutGlob(t *testing.T) {
|
||||
e.SetParser(parser)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -234,7 +260,7 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) {
|
||||
e.SetParser(parser)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
err := acc.GatherError(e.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -47,7 +48,6 @@ func (_ *FileStat) Description() string {
|
||||
func (_ *FileStat) SampleConfig() string { return sampleConfig }
|
||||
|
||||
func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
var errS string
|
||||
var err error
|
||||
|
||||
for _, filepath := range f.Files {
|
||||
@@ -55,7 +55,7 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
g, ok := f.globs[filepath]
|
||||
if !ok {
|
||||
if g, err = globpath.Compile(filepath); err != nil {
|
||||
errS += err.Error() + " "
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
f.globs[filepath] = g
|
||||
@@ -78,14 +78,20 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
"file": fileName,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"exists": int64(1),
|
||||
"size_bytes": fileInfo.Size(),
|
||||
"exists": int64(1),
|
||||
}
|
||||
|
||||
if fileInfo == nil {
|
||||
log.Printf("E! Unable to get info for file [%s], possible permissions issue",
|
||||
fileName)
|
||||
} else {
|
||||
fields["size_bytes"] = fileInfo.Size()
|
||||
}
|
||||
|
||||
if f.Md5 {
|
||||
md5, err := getMd5(fileName)
|
||||
if err != nil {
|
||||
errS += err.Error() + " "
|
||||
acc.AddError(err)
|
||||
} else {
|
||||
fields["md5_sum"] = md5
|
||||
}
|
||||
@@ -95,9 +101,6 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
if errS != "" {
|
||||
return fmt.Errorf(errS)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestGatherNoMd5(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
acc.GatherError(fs.Gather)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
@@ -59,7 +59,7 @@ func TestGatherExplicitFiles(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
acc.GatherError(fs.Gather)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
@@ -99,7 +99,7 @@ func TestGatherGlob(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
acc.GatherError(fs.Gather)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
@@ -131,7 +131,7 @@ func TestGatherSuperAsterisk(t *testing.T) {
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
acc.GatherError(fs.Gather)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@@ -149,31 +148,17 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error {
|
||||
h.client.SetHTTPClient(client)
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, server := range h.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherServer(acc, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
acc.AddError(h.gatherServer(acc, server))
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gathers data from a particular server
|
||||
|
||||
@@ -157,7 +157,7 @@ func TestNormalResponse(t *testing.T) {
|
||||
|
||||
for _, service := range graylog {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
err := acc.GatherError(service.Gather)
|
||||
require.NoError(t, err)
|
||||
for k, v := range expectedFields {
|
||||
acc.AssertContainsTaggedFields(t, k, v, validTags[k])
|
||||
@@ -170,9 +170,9 @@ func TestHttpJson500(t *testing.T) {
|
||||
graylog := genMockGrayLog(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := graylog[0].Gather(&acc)
|
||||
err := acc.GatherError(graylog[0].Gather)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -181,9 +181,9 @@ func TestHttpJsonBadJson(t *testing.T) {
|
||||
graylog := genMockGrayLog(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := graylog[0].Gather(&acc)
|
||||
err := acc.GatherError(graylog[0].Gather)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
@@ -192,8 +192,8 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
graylog := genMockGrayLog(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := graylog[0].Gather(&acc)
|
||||
err := acc.GatherError(graylog[0].Gather)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user