Compare commits
518 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ddcb93188f | ||
|
|
cb193d0e8a | ||
|
|
600f9fa067 | ||
|
|
4cedae9d2c | ||
|
|
4c8e8fc2f1 | ||
|
|
7c5bcfe84e | ||
|
|
efa20d05fa | ||
|
|
187c7e12a8 | ||
|
|
f29a994743 | ||
|
|
f416f429d7 | ||
|
|
ec6b1aae94 | ||
|
|
b473b6a659 | ||
|
|
e5d08a4d86 | ||
|
|
3c894bb056 | ||
|
|
d2d173b792 | ||
|
|
145f7da42e | ||
|
|
f9f8d9ed7e | ||
|
|
0dd3b0507b | ||
|
|
c44b4fcc89 | ||
|
|
cb9c1653d3 | ||
|
|
cf7590b88e | ||
|
|
5a7d889908 | ||
|
|
ef652678dd | ||
|
|
c4cc57956b | ||
|
|
7b8a761c63 | ||
|
|
7d66319f59 | ||
|
|
22f64f8417 | ||
|
|
6b4deb01bb | ||
|
|
e4835cdc30 | ||
|
|
e32ffdde06 | ||
|
|
0f905eaee7 | ||
|
|
4d48dcb84f | ||
|
|
17377b4942 | ||
|
|
0cc5fc0ce4 | ||
|
|
8011109466 | ||
|
|
588f0c77f8 | ||
|
|
4301b8e32a | ||
|
|
3c9d7db0a0 | ||
|
|
f7b3eb1ebd | ||
|
|
b8ab827629 | ||
|
|
d03e2fca32 | ||
|
|
eca00c10e0 | ||
|
|
9cf19df04e | ||
|
|
e77c2b76e7 | ||
|
|
c749c43dab | ||
|
|
1be17ea5af | ||
|
|
e1155bec20 | ||
|
|
cfac750469 | ||
|
|
f10d5b43c4 | ||
|
|
47b2d04d5b | ||
|
|
0e0da57b9a | ||
|
|
8e7cf0109e | ||
|
|
5b791fd2e5 | ||
|
|
293b1a0093 | ||
|
|
761ea06d6a | ||
|
|
8fafe9878b | ||
|
|
5da3eef38b | ||
|
|
2de7aa23d7 | ||
|
|
52cd38150c | ||
|
|
c08f492f78 | ||
|
|
66cfe80e37 | ||
|
|
ba5e5ec283 | ||
|
|
259f8e4002 | ||
|
|
558ab0c730 | ||
|
|
8d4fbe29e7 | ||
|
|
72337a1c97 | ||
|
|
86537899b2 | ||
|
|
a727d5d1f0 | ||
|
|
7ec194a482 | ||
|
|
5a77d28837 | ||
|
|
47927c353d | ||
|
|
b9e7fa27aa | ||
|
|
0d437140bd | ||
|
|
36969a63c2 | ||
|
|
e9a12bb694 | ||
|
|
34b7a4c361 | ||
|
|
f46370d982 | ||
|
|
07b7e09749 | ||
|
|
e54795795d | ||
|
|
b2b2bd8a27 | ||
|
|
f96cbb48c7 | ||
|
|
9077cb83bc | ||
|
|
0f188f280f | ||
|
|
b9420e73bd | ||
|
|
1e43e5e7ae | ||
|
|
5e104ad974 | ||
|
|
cc9d8c700c | ||
|
|
b15ec21ba7 | ||
|
|
a9abfe8f08 | ||
|
|
307210242c | ||
|
|
0a41db16f1 | ||
|
|
7480267fd2 | ||
|
|
30949c4596 | ||
|
|
47264bc860 | ||
|
|
67e693e9a8 | ||
|
|
851352bc8a | ||
|
|
c807452c14 | ||
|
|
48e00f7ea0 | ||
|
|
8ce901aaa4 | ||
|
|
78d1715601 | ||
|
|
1b0a18897d | ||
|
|
257b6a09d9 | ||
|
|
e6feac735c | ||
|
|
6616065acf | ||
|
|
98774d60e2 | ||
|
|
d4cd1b7eb4 | ||
|
|
7254111d37 | ||
|
|
4551efb459 | ||
|
|
2610eba0e3 | ||
|
|
c277dc27a6 | ||
|
|
a4f5c6fbc3 | ||
|
|
7608251633 | ||
|
|
1e9d7cd6e9 | ||
|
|
a91457e001 | ||
|
|
fd3a9bf46a | ||
|
|
ca394fcfb2 | ||
|
|
3819607511 | ||
|
|
eb0215c382 | ||
|
|
09153c815c | ||
|
|
9bc13f143e | ||
|
|
032348c7a5 | ||
|
|
5fbdd09aaf | ||
|
|
7d5dae5a08 | ||
|
|
54be037911 | ||
|
|
5003809e97 | ||
|
|
1b50f14d55 | ||
|
|
b0109b3550 | ||
|
|
257b460f61 | ||
|
|
287a44de5e | ||
|
|
73897d1f1c | ||
|
|
1e2d594af0 | ||
|
|
83c003e594 | ||
|
|
84ce9629a8 | ||
|
|
3c14b46f6f | ||
|
|
8a2373e8c8 | ||
|
|
cb04fa1e9c | ||
|
|
92af42a847 | ||
|
|
bceb020d72 | ||
|
|
d9deb266df | ||
|
|
f3435f1c59 | ||
|
|
f9573ad969 | ||
|
|
40aacd9046 | ||
|
|
5e73f3e816 | ||
|
|
a1e7a5f474 | ||
|
|
828c5817f9 | ||
|
|
3e27134872 | ||
|
|
1fb5373962 | ||
|
|
75e6ebcf93 | ||
|
|
e21f2de8b8 | ||
|
|
795f02ab88 | ||
|
|
360d03e301 | ||
|
|
137b312fa9 | ||
|
|
ce12913bc2 | ||
|
|
d82c5062b8 | ||
|
|
6666e6a5a7 | ||
|
|
9c0aadf445 | ||
|
|
3bd14ed229 | ||
|
|
5e95367f6c | ||
|
|
c31e7d0b91 | ||
|
|
f8c84302a4 | ||
|
|
9143670d6e | ||
|
|
f0bd69d904 | ||
|
|
7179290dea | ||
|
|
c4297f40ad | ||
|
|
0d4c954e01 | ||
|
|
d6cf9f4f30 | ||
|
|
5f88be022c | ||
|
|
284ab79a37 | ||
|
|
2bd6c80506 | ||
|
|
0ca936a12e | ||
|
|
a26fc52181 | ||
|
|
83f575fcea | ||
|
|
ffd1f25b75 | ||
|
|
1658404cea | ||
|
|
82ea04f188 | ||
|
|
273d0b85b0 | ||
|
|
f3917ec5ff | ||
|
|
428455e032 | ||
|
|
573bd4aa32 | ||
|
|
ab5205f8c3 | ||
|
|
85aa212467 | ||
|
|
840d19db35 | ||
|
|
1c267e9b16 | ||
|
|
1ff6e92193 | ||
|
|
c82c0e596b | ||
|
|
31ce98fa91 | ||
|
|
4d66db1603 | ||
|
|
681d20083a | ||
|
|
4ee74ff54b | ||
|
|
16073e4172 | ||
|
|
3c204d409d | ||
|
|
d903a9142d | ||
|
|
a2d4453269 | ||
|
|
34c042c7dc | ||
|
|
4dfe2312d0 | ||
|
|
c740dce36d | ||
|
|
475a926d43 | ||
|
|
d2626f1da6 | ||
|
|
f5a8415c78 | ||
|
|
1d416a4213 | ||
|
|
731ab9773d | ||
|
|
d8f7b76253 | ||
|
|
dbe2f79019 | ||
|
|
ef63908541 | ||
|
|
27e47614c6 | ||
|
|
dc4a133b11 | ||
|
|
f4d67d8c3c | ||
|
|
785798611e | ||
|
|
b165ce4cd5 | ||
|
|
d9d1ca5a46 | ||
|
|
2c10806fef | ||
|
|
5d2c093105 | ||
|
|
f68bab1667 | ||
|
|
1388e2cf92 | ||
|
|
af318f4959 | ||
|
|
9f244cf1ac | ||
|
|
885aa8e6e1 | ||
|
|
945446b36f | ||
|
|
4209ebfa6e | ||
|
|
79f8ed874a | ||
|
|
739d97639a | ||
|
|
ac8e28f436 | ||
|
|
2740a3ba44 | ||
|
|
0f850400f2 | ||
|
|
74a764d549 | ||
|
|
a8a637809e | ||
|
|
75dbf2b0f8 | ||
|
|
90909ae708 | ||
|
|
d40e441240 | ||
|
|
cc3d420551 | ||
|
|
f2bb4acd4a | ||
|
|
a7595c918a | ||
|
|
a52f90122b | ||
|
|
d217cdc1a6 | ||
|
|
d5b6f92f3f | ||
|
|
1a636abaaf | ||
|
|
1fdbfa4719 | ||
|
|
22fc130e97 | ||
|
|
a726579d50 | ||
|
|
d774c2a170 | ||
|
|
6d5bb35f84 | ||
|
|
e028f10586 | ||
|
|
9276318faf | ||
|
|
82a04d904d | ||
|
|
364da9a83d | ||
|
|
ca9cec2c84 | ||
|
|
9211985c63 | ||
|
|
929ba0a637 | ||
|
|
dcdcb70cb1 | ||
|
|
cb5a12de3d | ||
|
|
193e8fa5ad | ||
|
|
00b37a7c0d | ||
|
|
736322dfc9 | ||
|
|
ba364988de | ||
|
|
a729a44284 | ||
|
|
3ecfd32df5 | ||
|
|
ea1888bd26 | ||
|
|
674c24f987 | ||
|
|
ca72df5868 | ||
|
|
ea787b83bf | ||
|
|
949072e8dc | ||
|
|
246f342e6a | ||
|
|
619b5d4c14 | ||
|
|
b0efc22140 | ||
|
|
5d1efdbfda | ||
|
|
e3ccd473d2 | ||
|
|
f0cbfe4d67 | ||
|
|
40d8e582ee | ||
|
|
02b55fe77f | ||
|
|
0c53de6700 | ||
|
|
b277e6e2d7 | ||
|
|
de4a312eba | ||
|
|
4b3b16ef1a | ||
|
|
4c534433aa | ||
|
|
f9447d01d4 | ||
|
|
2092443cd7 | ||
|
|
1c73caba04 | ||
|
|
84dbf8bb25 | ||
|
|
a275e6792a | ||
|
|
de7fb2acfe | ||
|
|
91f2764cd5 | ||
|
|
4e91b18bbe | ||
|
|
56a7ffe0e4 | ||
|
|
f9462d4fff | ||
|
|
035905d65e | ||
|
|
a47e6e6efe | ||
|
|
5bab4616ff | ||
|
|
37e01808b5 | ||
|
|
0b6db905ff | ||
|
|
9529199a44 | ||
|
|
be03abd464 | ||
|
|
04aa732e94 | ||
|
|
e7f9db297e | ||
|
|
24ea9fdc4d | ||
|
|
02d168705c | ||
|
|
7d7206b3e2 | ||
|
|
03ca3975b5 | ||
|
|
e1088b9eee | ||
|
|
f47924ffc5 | ||
|
|
a96f85c847 | ||
|
|
9148871608 | ||
|
|
7d198f0a68 | ||
|
|
1459fab4d6 | ||
|
|
b0bd4d55f5 | ||
|
|
9ab688d62c | ||
|
|
8fdc2aec80 | ||
|
|
91690b1d3e | ||
|
|
c61cd73eff | ||
|
|
93e638d63e | ||
|
|
501c22478e | ||
|
|
7155e90f66 | ||
|
|
c53d9fa9b7 | ||
|
|
ac5ac3161f | ||
|
|
bfeb3020a3 | ||
|
|
b01ecdccff | ||
|
|
da99777f6f | ||
|
|
dd537b3382 | ||
|
|
f74687dcc0 | ||
|
|
a47aa0dcc2 | ||
|
|
17d883c602 | ||
|
|
a1446a60f7 | ||
|
|
1931aac284 | ||
|
|
b88eb0f59d | ||
|
|
e7ad2d0463 | ||
|
|
c28ffb11cb | ||
|
|
018fd5ce5b | ||
|
|
cd0ec0185a | ||
|
|
8124cfa3ed | ||
|
|
5af985ef5f | ||
|
|
1ebd1aaa41 | ||
|
|
de3f52b990 | ||
|
|
4200018a0b | ||
|
|
67cd1669cc | ||
|
|
a8cfe03ba8 | ||
|
|
e2983383e4 | ||
|
|
8cf0dc769b | ||
|
|
613de8a80d | ||
|
|
f5c890cc1d | ||
|
|
f7f1eaef65 | ||
|
|
188703e204 | ||
|
|
52c19af0ba | ||
|
|
5c88965084 | ||
|
|
6e76731b7e | ||
|
|
c7a0e40c87 | ||
|
|
086a2f5f12 | ||
|
|
1da1c4753e | ||
|
|
a083e1af7d | ||
|
|
052e88ad5e | ||
|
|
b9ce455bba | ||
|
|
cd103c85db | ||
|
|
a3feacbd2f | ||
|
|
e1a734c525 | ||
|
|
53ab56de72 | ||
|
|
4e2fe598ac | ||
|
|
5fe5c46c6d | ||
|
|
153304d92b | ||
|
|
cb9aecbf04 | ||
|
|
c66e2896c6 | ||
|
|
9b874dff8d | ||
|
|
b243faa22b | ||
|
|
8f5cd6c2ae | ||
|
|
3c28b93514 | ||
|
|
06baf7cf78 | ||
|
|
801f6cb8a0 | ||
|
|
3684ec6315 | ||
|
|
da0773151b | ||
|
|
38e1c1de77 | ||
|
|
799c8bed29 | ||
|
|
a237301932 | ||
|
|
b03d78d00f | ||
|
|
748ca7d503 | ||
|
|
bf30ef89ee | ||
|
|
3690e1b9bf | ||
|
|
2542ef6d62 | ||
|
|
eb7ef5392e | ||
|
|
70b3e763e7 | ||
|
|
58ee962679 | ||
|
|
dc5779e2a7 | ||
|
|
b968759d10 | ||
|
|
b90a5b48a1 | ||
|
|
a12e082dbe | ||
|
|
cadd845b36 | ||
|
|
dff216c44d | ||
|
|
45c9b867f6 | ||
|
|
9388fff1f7 | ||
|
|
3e0c55bff9 | ||
|
|
49ab4e26f8 | ||
|
|
360b10c4de | ||
|
|
2c98e5ae66 | ||
|
|
0193cbee51 | ||
|
|
f55af7d21f | ||
|
|
516dffa4c4 | ||
|
|
62b5c1f7e7 | ||
|
|
07c428ef89 | ||
|
|
aa722fac9b | ||
|
|
7cc4ca2341 | ||
|
|
92fa20cef2 | ||
|
|
c9f8308f27 | ||
|
|
5ffc9fd379 | ||
|
|
8bf193dc06 | ||
|
|
f2805fd4aa | ||
|
|
35e4390168 | ||
|
|
51c99d5b67 | ||
|
|
540f98e228 | ||
|
|
c980c92cd5 | ||
|
|
9495b615f5 | ||
|
|
fb1c7d0154 | ||
|
|
03ee6022f3 | ||
|
|
cc5b2f68b6 | ||
|
|
2d7f612bd7 | ||
|
|
9e036b2d65 | ||
|
|
1100a98f11 | ||
|
|
37689f4df6 | ||
|
|
78c7f4e4af | ||
|
|
84a9f91f5c | ||
|
|
5612df48f9 | ||
|
|
0fa9001453 | ||
|
|
995546e7c6 | ||
|
|
1402c158b7 | ||
|
|
616b66f5cb | ||
|
|
70a0a84882 | ||
|
|
5c33c760c7 | ||
|
|
bb28fb256b | ||
|
|
a962e958eb | ||
|
|
8514acdc3c | ||
|
|
426182b81a | ||
|
|
7a5d857846 | ||
|
|
13f314a507 | ||
|
|
ea6e0b8259 | ||
|
|
e811e2600d | ||
|
|
49c212337f | ||
|
|
d243d69a09 | ||
|
|
ae6a5d2255 | ||
|
|
56aa89e5c8 | ||
|
|
7513fcac4e | ||
|
|
9df2974a0f | ||
|
|
ceb36adac7 | ||
|
|
7a8e821731 | ||
|
|
76bcdecd21 | ||
|
|
10744646db | ||
|
|
1873abd248 | ||
|
|
9618515926 | ||
|
|
a251adb838 | ||
|
|
9e810ac463 | ||
|
|
b9457a1092 | ||
|
|
6f2eeae498 | ||
|
|
42a41d33cc | ||
|
|
81408f9da7 | ||
|
|
c4212d69c9 | ||
|
|
e17164d3f0 | ||
|
|
e5349393f8 | ||
|
|
06176ef410 | ||
|
|
2a3448c8f3 | ||
|
|
5da40d56ad | ||
|
|
54c9a385d5 | ||
|
|
25c55419df | ||
|
|
c19fb1535e | ||
|
|
45a168e425 | ||
|
|
22243a8354 | ||
|
|
ff9369f1a1 | ||
|
|
21cf79163c | ||
|
|
f05fac74cb | ||
|
|
c8cc01ba6a | ||
|
|
694955c87b | ||
|
|
b1945c0493 | ||
|
|
1c4673e900 | ||
|
|
dfb4038654 | ||
|
|
b3537ef2a8 | ||
|
|
0ce44648cf | ||
|
|
55d3f70771 | ||
|
|
a610f8bd03 | ||
|
|
dfba3ff37a | ||
|
|
285be648c4 | ||
|
|
f7d551a807 | ||
|
|
3f224a15d5 | ||
|
|
c0bbde03ea | ||
|
|
97050e9669 | ||
|
|
eafd1dcc7c | ||
|
|
c528c53e5b | ||
|
|
07a6223932 | ||
|
|
aeb849d744 | ||
|
|
9003efc3fa | ||
|
|
32e06a489d | ||
|
|
2932db8480 | ||
|
|
19dee32287 | ||
|
|
4dad723088 | ||
|
|
54cfbb5b87 | ||
|
|
3e37dda7b0 | ||
|
|
fb7931591d | ||
|
|
e87ce22af9 | ||
|
|
738cbbdbb6 | ||
|
|
074e6d177c | ||
|
|
1d864ebd40 | ||
|
|
e9decadf75 | ||
|
|
3fa37a9212 | ||
|
|
c9e87a39f8 | ||
|
|
4a5d313693 | ||
|
|
168270ea5f | ||
|
|
c4d4185fb5 | ||
|
|
822333690f | ||
|
|
d7a8bb2214 | ||
|
|
a505123e60 | ||
|
|
be10b19760 | ||
|
|
b9ae3d6a57 | ||
|
|
c882570983 | ||
|
|
80411f99f0 | ||
|
|
6df3f0fdae | ||
|
|
22340ad984 | ||
|
|
c15504c509 | ||
|
|
20bf90ee52 | ||
|
|
3de6bfbcb8 | ||
|
|
e0c6262e0b | ||
|
|
9b2f6499e7 | ||
|
|
9262712f0a | ||
|
|
0c9da0985a | ||
|
|
b89c45b858 | ||
|
|
b60b360f13 | ||
|
|
734988d732 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,7 +1,7 @@
|
||||
## Directions
|
||||
|
||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
||||
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
|
||||
|
||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||
|
||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,5 +1,5 @@
|
||||
### Required for all PRs:
|
||||
|
||||
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
|
||||
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
|
||||
- [ ] README.md updated (if adding a new plugin)
|
||||
- [ ] Signed [CLA](https://influxdata.com/community/cla/).
|
||||
- [ ] Associated README.md updated.
|
||||
- [ ] Has appropriate unit tests.
|
||||
|
||||
353
CHANGELOG.md
353
CHANGELOG.md
@@ -1,4 +1,345 @@
|
||||
## v1.2 [unreleased]
|
||||
## v1.4.4 [2017-11-08]
|
||||
|
||||
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
|
||||
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
|
||||
- [#3311](https://github.com/influxdata/telegraf/issues/3311): Fix error getting pids in netstat input.
|
||||
- [#3339](https://github.com/influxdata/telegraf/issues/3339): Support HOST_VAR envvar to locate /var in system input.
|
||||
- [#3383](https://github.com/influxdata/telegraf/issues/3383): Use current time if docker container read time is zero value.
|
||||
|
||||
## v1.4.3 [2017-10-25]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
|
||||
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
|
||||
- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query.
|
||||
- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux.
|
||||
- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb.
|
||||
- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output.
|
||||
- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit.
|
||||
- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value.
|
||||
- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin.
|
||||
- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6.
|
||||
- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems.
|
||||
|
||||
## v1.4.2 [2017-10-10]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
|
||||
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
|
||||
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
|
||||
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
|
||||
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
|
||||
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
|
||||
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
|
||||
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
|
||||
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
|
||||
|
||||
## v1.4.1 [2017-09-26]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
|
||||
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
|
||||
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
|
||||
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
|
||||
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
|
||||
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
|
||||
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
|
||||
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
|
||||
|
||||
## v1.4 [2017-09-05]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The `kafka_consumer` input has been updated to support Kafka 0.9 and
|
||||
above style consumer offset handling. The previous version of this plugin
|
||||
supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy`
|
||||
plugin.
|
||||
|
||||
- In the `aerospike` input the `node_name` field has been changed to be a tag
|
||||
for both the `aerospike_node` and `aerospike_namespace` measurements.
|
||||
|
||||
- The default prometheus_client port has been changed to 9273.
|
||||
|
||||
### New Plugins
|
||||
|
||||
- [fail2ban](./plugins/inputs/fail2ban/README.md) - Thanks to @grugrut
|
||||
- [fluentd](./plugins/inputs/fluentd/README.md) - Thanks to @DanKans
|
||||
- [histogram](./plugins/aggregators/histogram/README.md) - Thanks to @vlamug
|
||||
- [minecraft](./plugins/inputs/minecraft/README.md) - Thanks to @adamperlin & @Ayrdrie
|
||||
- [openldap](./plugins/inputs/openldap/README.md) - Thanks to @cobaugh
|
||||
- [salesforce](./plugins/inputs/salesforce/README.md) - Thanks to @rody
|
||||
- [tomcat](./plugins/inputs/tomcat/README.md) - Thanks to @mlindes
|
||||
- [win_services](./plugins/inputs/win_services/README.md) - Thanks to @vlastahajek
|
||||
- [zipkin](./plugins/inputs/zipkin/README.md) - Thanks to @adamperlin & @Ayrdrie
|
||||
|
||||
### Features
|
||||
|
||||
- [#2487](https://github.com/influxdata/telegraf/pull/2487): Add Kafka 0.9+ consumer support
|
||||
- [#2773](https://github.com/influxdata/telegraf/pull/2773): Add support for self-signed certs to InfluxDB input plugin
|
||||
- [#2293](https://github.com/influxdata/telegraf/pull/2293): Add TCP listener for statsd input
|
||||
- [#2581](https://github.com/influxdata/telegraf/pull/2581): Add Docker container environment variables as tags. Only whitelisted
|
||||
- [#2817](https://github.com/influxdata/telegraf/pull/2817): Add timeout option to IPMI sensor plugin
|
||||
- [#2883](https://github.com/influxdata/telegraf/pull/2883): Add support for an optional SSL/TLS configuration to nginx input plugin
|
||||
- [#2882](https://github.com/influxdata/telegraf/pull/2882): Add timezone support for logparser timestamps.
|
||||
- [#2814](https://github.com/influxdata/telegraf/pull/2814): Add result_type field for http_response input.
|
||||
- [#2734](https://github.com/influxdata/telegraf/pull/2734): Add include/exclude filters for docker containers.
|
||||
- [#2602](https://github.com/influxdata/telegraf/pull/2602): Add secure connection support to graphite output.
|
||||
- [#2908](https://github.com/influxdata/telegraf/pull/2908): Add min/max response time on linux/darwin to ping.
|
||||
- [#2929](https://github.com/influxdata/telegraf/pull/2929): Add HTTP Proxy support to influxdb output.
|
||||
- [#2933](https://github.com/influxdata/telegraf/pull/2933): Add standard SSL options to mysql input.
|
||||
- [#2875](https://github.com/influxdata/telegraf/pull/2875): Add input plugin for fail2ban.
|
||||
- [#2924](https://github.com/influxdata/telegraf/pull/2924): Support HOST_PROC in processes and linux_sysctl_fs inputs.
|
||||
- [#2960](https://github.com/influxdata/telegraf/pull/2960): Add Minecraft input plugin.
|
||||
- [#2963](https://github.com/influxdata/telegraf/pull/2963): Add support for RethinkDB 1.0 handshake protocol.
|
||||
- [#2943](https://github.com/influxdata/telegraf/pull/2943): Add optional usage_active and time_active CPU metrics.
|
||||
- [#2973](https://github.com/influxdata/telegraf/pull/2973): Change default prometheus_client port.
|
||||
- [#2661](https://github.com/influxdata/telegraf/pull/2661): Add fluentd input plugin.
|
||||
- [#2990](https://github.com/influxdata/telegraf/pull/2990): Add result_type field to net_response input plugin.
|
||||
- [#2571](https://github.com/influxdata/telegraf/pull/2571): Add read timeout to socket_listener
|
||||
- [#2612](https://github.com/influxdata/telegraf/pull/2612): Add input plugin for OpenLDAP.
|
||||
- [#3042](https://github.com/influxdata/telegraf/pull/3042): Add network option to dns_query.
|
||||
- [#3054](https://github.com/influxdata/telegraf/pull/3054): Add redis_version field to redis input.
|
||||
- [#3063](https://github.com/influxdata/telegraf/pull/3063): Add tls options to docker input.
|
||||
- [#2387](https://github.com/influxdata/telegraf/pull/2387): Add histogram aggregator plugin.
|
||||
- [#3080](https://github.com/influxdata/telegraf/pull/3080): Add zipkin input plugin.
|
||||
- [#3023](https://github.com/influxdata/telegraf/pull/3023): Add Windows Services input plugin.
|
||||
- [#3098](https://github.com/influxdata/telegraf/pull/3098): Add path tag to logparser containing path of logfile.
|
||||
- [#3075](https://github.com/influxdata/telegraf/pull/3075): Add salesforce input plugin.
|
||||
- [#3097](https://github.com/influxdata/telegraf/pull/3097): Add option to run varnish under sudo.
|
||||
- [#3119](https://github.com/influxdata/telegraf/pull/3119): Add weighted_io_time to diskio input.
|
||||
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
|
||||
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
|
||||
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
|
||||
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2607](https://github.com/influxdata/telegraf/issues/2607): Improve logging of errors in Cassandra input.
|
||||
- [#2819](https://github.com/influxdata/telegraf/pull/2819): [enh] set db_version at 0 if query version fails
|
||||
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
|
||||
- [#2716](https://github.com/influxdata/telegraf/pull/2716): Systemd does not see all shutdowns as failures
|
||||
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
|
||||
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
|
||||
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix multiple plugin loading in win_perf_counters.
|
||||
- [#2855](https://github.com/influxdata/telegraf/pull/2855): MySQL input: log and continue on field parse error.
|
||||
- [#2885](https://github.com/influxdata/telegraf/pull/2885): Fix timeout option in Windows ping input sample configuration.
|
||||
- [#2911](https://github.com/influxdata/telegraf/issues/2911): Fix Kinesis output plugin in govcloud.
|
||||
- [#2917](https://github.com/influxdata/telegraf/issues/2917): Fix Aerospike input adds all nodes to a single series.
|
||||
- [#2452](https://github.com/influxdata/telegraf/pull/2452): Improve Prometheus Client output documentation.
|
||||
- [#2984](https://github.com/influxdata/telegraf/pull/2984): Display error message if prometheus output fails to listen.
|
||||
- [#2997](https://github.com/influxdata/telegraf/issues/2997): Fix elasticsearch output content type detection warning.
|
||||
- [#2914](https://github.com/influxdata/telegraf/issues/2914): Prevent possible deadlock when using aggregators.
|
||||
- [#2860](https://github.com/influxdata/telegraf/issues/2860): Fix combined tagdrop/tagpass filtering.
|
||||
- [#3036](https://github.com/influxdata/telegraf/pull/3036): Fix filtering when both pass and drop match an item.
|
||||
- [#2964](https://github.com/influxdata/telegraf/issues/2964): Only report cpu usage for online cpus in docker input.
|
||||
- [#3050](https://github.com/influxdata/telegraf/pull/3050): Start first aggregator period at startup time.
|
||||
- [#2906](https://github.com/influxdata/telegraf/issues/2906): Fix panic in logparser if file cannot be opened.
|
||||
- [#2886](https://github.com/influxdata/telegraf/issues/2886): Default to localhost if zookeeper has no servers set.
|
||||
- [#2457](https://github.com/influxdata/telegraf/issues/2457): Fix docker memory and cpu reporting in Windows.
|
||||
- [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text.
|
||||
- [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric.
|
||||
- [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output.
|
||||
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
|
||||
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
|
||||
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
|
||||
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
|
||||
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
|
||||
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
|
||||
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
|
||||
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
|
||||
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
|
||||
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
|
||||
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
|
||||
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
|
||||
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
|
||||
|
||||
## v1.3.5 [2017-07-26]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3049](https://github.com/influxdata/telegraf/issues/3049): Fix prometheus output cannot be reloaded.
|
||||
- [#3037](https://github.com/influxdata/telegraf/issues/3037): Fix filestat reporting exists when cannot list directory.
|
||||
- [#2386](https://github.com/influxdata/telegraf/issues/2386): Fix ntpq parse issue when using dns_lookup.
|
||||
- [#2554](https://github.com/influxdata/telegraf/issues/2554): Fix panic when agent.interval = "0s".
|
||||
|
||||
## v1.3.4 [2017-07-12]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3001](https://github.com/influxdata/telegraf/issues/3001): Fix handling of escape characters within fields.
|
||||
- [#2988](https://github.com/influxdata/telegraf/issues/2988): Fix chrony plugin does not track system time offset.
|
||||
- [#3004](https://github.com/influxdata/telegraf/issues/3004): Do not allow metrics with trailing slashes.
|
||||
- [#3011](https://github.com/influxdata/telegraf/issues/3011): Prevent Write from being called concurrently.
|
||||
|
||||
## v1.3.3 [2017-06-28]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2915](https://github.com/influxdata/telegraf/issues/2915): Allow dos line endings in tail and logparser.
|
||||
- [#2937](https://github.com/influxdata/telegraf/issues/2937): Remove label value sanitization in prometheus output.
|
||||
- [#2948](https://github.com/influxdata/telegraf/issues/2948): Fix bug parsing default timestamps with modified precision.
|
||||
- [#2954](https://github.com/influxdata/telegraf/issues/2954): Fix panic in elasticsearch input if cannot determine master.
|
||||
|
||||
## v1.3.2 [2017-06-14]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2862](https://github.com/influxdata/telegraf/issues/2862): Fix InfluxDB UDP metric splitting.
|
||||
- [#2888](https://github.com/influxdata/telegraf/issues/2888): Fix mongodb/leofs urls without scheme.
|
||||
- [#2822](https://github.com/influxdata/telegraf/issues/2822): Fix inconsistent label dimensions in prometheus output.
|
||||
|
||||
## v1.3.1 [2017-05-31]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
|
||||
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
|
||||
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
|
||||
- [#2851](https://github.com/influxdata/telegraf/pull/2851): Fix InfluxDB output database quoting.
|
||||
- [#2856](https://github.com/influxdata/telegraf/issues/2856): Fix net input on older Linux kernels.
|
||||
- [#2848](https://github.com/influxdata/telegraf/pull/2848): Fix panic in mongo input.
|
||||
- [#2869](https://github.com/influxdata/telegraf/pull/2869): Fix length calculation of split metric buffer.
|
||||
|
||||
## v1.3 [2017-05-15]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Users of the windows `ping` plugin will need to drop or migrate their
|
||||
measurements in order to continue using the plugin. The reason for this is that
|
||||
the windows plugin was outputting a different type than the linux plugin. This
|
||||
made it impossible to use the `ping` plugin for both windows and linux
|
||||
machines.
|
||||
|
||||
- Ceph: the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag.
|
||||
|
||||
Telegraf < 1.3:
|
||||
|
||||
```
|
||||
# field_name value
|
||||
active+clean 123
|
||||
active+clean+scrubbing 3
|
||||
```
|
||||
|
||||
Telegraf >= 1.3:
|
||||
|
||||
```
|
||||
# field_name value tag
|
||||
count 123 state=active+clean
|
||||
count 3 state=active+clean+scrubbing
|
||||
```
|
||||
|
||||
- The [Riemann output plugin](./plugins/outputs/riemann) has been rewritten
|
||||
and the previous riemann plugin is _incompatible_ with the new one. The reasons
|
||||
for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878).
|
||||
The previous riemann output will still be available using
|
||||
`outputs.riemann_legacy` if needed, but that will eventually be deprecated.
|
||||
It is highly recommended that all users migrate to the new riemann output plugin.
|
||||
|
||||
- Generic [socket_listener](./plugins/inputs/socket_listener) and
|
||||
[socket_writer](./plugins/outputs/socket_writer) plugins have been implemented
|
||||
for receiving and sending UDP, TCP, unix, & unix-datagram data. These plugins
|
||||
will replace udp_listener and tcp_listener, which are still available but will
|
||||
be deprecated eventually.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2721](https://github.com/influxdata/telegraf/pull/2721): Added SASL options for kafka output plugin.
|
||||
- [#2723](https://github.com/influxdata/telegraf/pull/2723): Added SSL configuration for input haproxy.
|
||||
- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin.
|
||||
- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
|
||||
- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
|
||||
- [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
|
||||
- [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin.
|
||||
- [#2229](https://github.com/influxdata/telegraf/pull/2229): `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag.
|
||||
- [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations.
|
||||
- [#2330](https://github.com/influxdata/telegraf/pull/2330): Keep -config-directory when running as Windows service.
|
||||
- [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite.
|
||||
- [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags.
|
||||
- [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state.
|
||||
- [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin.
|
||||
- [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
|
||||
- [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
|
||||
- [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
|
||||
- [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
|
||||
- [#2512](https://github.com/influxdata/telegraf/pull/2512): Added pprof tool.
|
||||
- [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
|
||||
- [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
|
||||
- [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
|
||||
- [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
|
||||
- [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
|
||||
- [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
|
||||
- [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
|
||||
- [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
|
||||
- [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
|
||||
- [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
|
||||
- [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
|
||||
- [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
|
||||
- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
|
||||
- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
|
||||
- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
|
||||
- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin
|
||||
- [#2705](https://github.com/influxdata/telegraf/pull/2705): Kinesis output: add use_random_partitionkey option
|
||||
- [#2635](https://github.com/influxdata/telegraf/issues/2635): add tcp keep-alive to socket_listener & socket_writer
|
||||
- [#2031](https://github.com/influxdata/telegraf/pull/2031): Add Kapacitor input plugin
|
||||
- [#2732](https://github.com/influxdata/telegraf/pull/2732): Use go 1.8.1
|
||||
- [#2712](https://github.com/influxdata/telegraf/issues/2712): Documentation for rabbitmq input plugin
|
||||
- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password
|
||||
- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
|
||||
- [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
|
||||
- [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
|
||||
- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
|
||||
- [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
|
||||
- [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
|
||||
- [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
|
||||
- [#2360](https://github.com/influxdata/telegraf/pull/2360): Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems
|
||||
- [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
|
||||
- [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
|
||||
- [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
|
||||
- [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
|
||||
- [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin.
|
||||
- [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
|
||||
- [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
|
||||
- [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
|
||||
- [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
|
||||
- [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
|
||||
- [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
|
||||
- [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
|
||||
- [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
|
||||
- [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
|
||||
- [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
|
||||
- [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
|
||||
- [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
|
||||
- [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
|
||||
- [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
|
||||
- [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
|
||||
- [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
|
||||
- [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
|
||||
- [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
|
||||
- [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces
|
||||
- [#1911](https://github.com/influxdata/telegraf/issues/1911): Sysstat plugin needs LANG=C or similar locale
|
||||
- [#2528](https://github.com/influxdata/telegraf/issues/2528): File output closes standard streams on reload.
|
||||
- [#2603](https://github.com/influxdata/telegraf/issues/2603): AMQP output disconnect blocks all outputs
|
||||
- [#2706](https://github.com/influxdata/telegraf/issues/2706): Improve documentation for redis input plugin
|
||||
|
||||
## v1.2.1 [2017-02-01]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2317](https://github.com/influxdata/telegraf/issues/2317): Fix segfault on nil metrics with influxdb output.
|
||||
- [#2324](https://github.com/influxdata/telegraf/issues/2324): Fix negative number handling.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2348](https://github.com/influxdata/telegraf/pull/2348): Go version 1.7.4 -> 1.7.5
|
||||
|
||||
## v1.2 [2017-01-00]
|
||||
|
||||
### Release Notes
|
||||
|
||||
@@ -54,7 +395,7 @@ plugins, not just statsd.
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus.
|
||||
- [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker.
|
||||
- [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag.
|
||||
- [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters.
|
||||
- [#1730](https://github.com/influxdata/telegraf/issues/1730) & [#2261](https://github.com/influxdata/telegraf/pull/2261): Fix win_perf_counters not gathering non-English counters.
|
||||
- [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s).
|
||||
- [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field.
|
||||
- [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature.
|
||||
@@ -67,7 +408,10 @@ plugins, not just statsd.
|
||||
- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses.
|
||||
- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin.
|
||||
- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix.
|
||||
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages
|
||||
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages.
|
||||
- [#2299](https://github.com/influxdata/telegraf/issues/2299): opentsdb: add tcp:// prefix if no scheme provided.
|
||||
- [#2297](https://github.com/influxdata/telegraf/issues/2297): influx parser: parse line-protocol without newlines.
|
||||
- [#2245](https://github.com/influxdata/telegraf/issues/2245): influxdb output: fix field type conflict blocking output buffer.
|
||||
|
||||
## v1.1.2 [2016-12-12]
|
||||
|
||||
@@ -218,8 +562,11 @@ which can be installed via
|
||||
evaluated at every flush interval, rather than once at startup. This makes it
|
||||
consistent with the behavior of `collection_jitter`.
|
||||
|
||||
- postgresql plugins now handle oid and name typed columns seamlessly, previously they were ignored/skipped.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1617](https://github.com/influxdata/telegraf/pull/1617): postgresql_extensible now handles name and oid types correctly.
|
||||
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
|
||||
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
|
||||
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()
|
||||
|
||||
@@ -124,7 +124,7 @@ You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -254,7 +254,7 @@ You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
134
Godeps
134
Godeps
@@ -1,65 +1,89 @@
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||
github.com/eclipse/paho.mqtt.golang d4f545eb108a2d19f9b1a735689dbfb719bc21fb
|
||||
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
||||
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
||||
github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
|
||||
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||
github.com/nsqio/go-nsq a53d495e81424aaf7a7665a9d32a97715c40e953
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
||||
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8
|
||||
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 48fc5612898a1213aa5d6a0fb2d4f7b968e898fb
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||
141
Makefile
141
Makefile
@@ -1,56 +1,73 @@
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
|
||||
PREFIX := /usr/local
|
||||
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
# Standard Telegraf build
|
||||
default: prepare build
|
||||
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||
|
||||
# Windows build
|
||||
windows: prepare-windows build-windows
|
||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||
ifdef VERSION
|
||||
LDFLAGS += -X main.version=$(VERSION)
|
||||
endif
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go install -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
|
||||
|
||||
build-windows:
|
||||
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
all:
|
||||
$(MAKE) deps
|
||||
$(MAKE) telegraf
|
||||
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
deps:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
telegraf:
|
||||
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||
|
||||
go-install:
|
||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||
|
||||
install: telegraf
|
||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
||||
|
||||
test:
|
||||
go test -short ./...
|
||||
|
||||
test-windows:
|
||||
go test ./plugins/inputs/ping/...
|
||||
go test ./plugins/inputs/win_perf_counters/...
|
||||
go test ./plugins/inputs/win_services/...
|
||||
|
||||
lint:
|
||||
go vet ./...
|
||||
|
||||
test-all: lint
|
||||
go test ./...
|
||||
|
||||
# run package script
|
||||
package:
|
||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
clean:
|
||||
-rm -f telegraf
|
||||
-rm -f telegraf.exe
|
||||
|
||||
# Use the windows godeps file to prepare dependencies
|
||||
prepare-windows:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
gdm restore -f Godeps_windows
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
# Run all docker containers necessary for integration tests
|
||||
docker-run:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
@@ -58,39 +75,43 @@ docker-run:
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
# Run docker containers necessary for integration tests; skipping services provided
|
||||
# by CircleCI
|
||||
docker-run-circle:
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
||||
docker run --name zookeeper -p "2181:2181" -d wurstmeister/zookeeper
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
--link zookeeper:zookeeper \
|
||||
-e KAFKA_ADVERTISED_HOST_NAME=localhost \
|
||||
-e KAFKA_ADVERTISED_PORT=9092 \
|
||||
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
|
||||
-e KAFKA_CREATE_TOPICS="test:1:1" \
|
||||
-p "9092:9092" \
|
||||
-d wurstmeister/kafka
|
||||
docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann
|
||||
docker run --name nats -p "4222:4222" -d nats
|
||||
docker run --name openldap \
|
||||
-e SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" \
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
||||
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper
|
||||
-docker rm aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
openldap postgres rabbitmq redis riemann zookeeper
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||
sleep 60
|
||||
# SUCCESS, running tests
|
||||
go test -race ./...
|
||||
|
||||
# Run "short" unit tests
|
||||
test-short: vet
|
||||
go test -short ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
.PHONY: test test-short vet build default
|
||||
.PHONY: deps telegraf telegraf.exe install test test-windows lint test-all \
|
||||
package clean docker-run docker-run-circle docker-kill
|
||||
|
||||
145
README.md
145
README.md
@@ -20,74 +20,35 @@ For more information on Processor and Aggregator plugins please [read this](./do
|
||||
New plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
new plugins.
|
||||
|
||||
## Contributing
|
||||
|
||||
There are many ways to contribute:
|
||||
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
|
||||
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
|
||||
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
|
||||
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
|
||||
- [Contribute plugins](CONTRIBUTING.md)
|
||||
|
||||
## Installation:
|
||||
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||
* On sysv systems, the telegraf daemon can be controlled via
|
||||
`service telegraf [action]`
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### yum/apt Repositories:
|
||||
|
||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||
[here](https://docs.influxdata.com/influxdb/latest/introduction/installation/#installation)
|
||||
for instructions on setting up the repo. Once it is configured, you will be able
|
||||
to use this repo to install & update telegraf.
|
||||
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_armhf.tar.gz
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_freebsd_amd64.tar.gz
|
||||
You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page
|
||||
or from the [releases](https://github.com/influxdata/telegraf/releases) section.
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
```
|
||||
brew update
|
||||
brew install telegraf
|
||||
```
|
||||
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.5+.
|
||||
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
|
||||
|
||||
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
||||
which is installed by the Makefile if you don't have it already.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get github.com/influxdata/telegraf`
|
||||
3. Run `go get -d github.com/influxdata/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
@@ -96,37 +57,37 @@ if you don't have it already. You also must build with golang version 1.5+.
|
||||
See usage with:
|
||||
|
||||
```
|
||||
telegraf --help
|
||||
./telegraf --help
|
||||
```
|
||||
|
||||
### Generate a telegraf config file:
|
||||
#### Generate a telegraf config file:
|
||||
|
||||
```
|
||||
telegraf config > telegraf.conf
|
||||
./telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
### Generate config with only cpu input & influxdb output plugins defined
|
||||
#### Generate config with only cpu input & influxdb output plugins defined:
|
||||
|
||||
```
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
./telegraf --input-filter cpu --output-filter influxdb config
|
||||
```
|
||||
|
||||
### Run a single telegraf collection, outputing metrics to stdout
|
||||
#### Run a single telegraf collection, outputing metrics to stdout:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -test
|
||||
./telegraf --config telegraf.conf --test
|
||||
```
|
||||
|
||||
### Run telegraf with all plugins defined in config file
|
||||
#### Run telegraf with all plugins defined in config file:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf
|
||||
./telegraf --config telegraf.conf
|
||||
```
|
||||
|
||||
### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
```
|
||||
|
||||
|
||||
@@ -137,38 +98,48 @@ configuration options.
|
||||
|
||||
## Input Plugins
|
||||
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [aerospike](./plugins/inputs/aerospike)
|
||||
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
||||
* [apache](./plugins/inputs/apache)
|
||||
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||
* [bcache](./plugins/inputs/bcache)
|
||||
* [cassandra](./plugins/inputs/cassandra)
|
||||
* [ceph](./plugins/inputs/ceph)
|
||||
* [cgroup](./plugins/inputs/cgroup)
|
||||
* [chrony](./plugins/inputs/chrony)
|
||||
* [consul](./plugins/inputs/consul)
|
||||
* [conntrack](./plugins/inputs/conntrack)
|
||||
* [couchbase](./plugins/inputs/couchbase)
|
||||
* [couchdb](./plugins/inputs/couchdb)
|
||||
* [disque](./plugins/inputs/disque)
|
||||
* [dmcache](./plugins/inputs/dmcache)
|
||||
* [dns query time](./plugins/inputs/dns_query)
|
||||
* [docker](./plugins/inputs/docker)
|
||||
* [dovecot](./plugins/inputs/dovecot)
|
||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [fail2ban](./plugins/inputs/fail2ban)
|
||||
* [filestat](./plugins/inputs/filestat)
|
||||
* [fluentd](./plugins/inputs/fluentd)
|
||||
* [graylog](./plugins/inputs/graylog)
|
||||
* [haproxy](./plugins/inputs/haproxy)
|
||||
* [hddtemp](./plugins/inputs/hddtemp)
|
||||
* [http_response](./plugins/inputs/http_response)
|
||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [internal](./plugins/inputs/internal)
|
||||
* [influxdb](./plugins/inputs/influxdb)
|
||||
* [interrupts](./plugins/inputs/interrupts)
|
||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||
* [iptables](./plugins/inputs/iptables)
|
||||
* [jolokia](./plugins/inputs/jolokia)
|
||||
* [kapacitor](./plugins/inputs/kapacitor)
|
||||
* [kubernetes](./plugins/inputs/kubernetes)
|
||||
* [leofs](./plugins/inputs/leofs)
|
||||
* [lustre2](./plugins/inputs/lustre2)
|
||||
* [mailchimp](./plugins/inputs/mailchimp)
|
||||
* [memcached](./plugins/inputs/memcached)
|
||||
* [mesos](./plugins/inputs/mesos)
|
||||
* [minecraft](./plugins/inputs/minecraft)
|
||||
* [mongodb](./plugins/inputs/mongodb)
|
||||
* [mysql](./plugins/inputs/mysql)
|
||||
* [net_response](./plugins/inputs/net_response)
|
||||
@@ -176,6 +147,7 @@ configuration options.
|
||||
* [nsq](./plugins/inputs/nsq)
|
||||
* [nstat](./plugins/inputs/nstat)
|
||||
* [ntpq](./plugins/inputs/ntpq)
|
||||
* [openldap](./plugins/inputs/openldap)
|
||||
* [phpfpm](./plugins/inputs/phpfpm)
|
||||
* [phusion passenger](./plugins/inputs/passenger)
|
||||
* [ping](./plugins/inputs/ping)
|
||||
@@ -183,22 +155,25 @@ configuration options.
|
||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](./plugins/inputs/powerdns)
|
||||
* [procstat](./plugins/inputs/procstat)
|
||||
* [prometheus](./plugins/inputs/prometheus)
|
||||
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
|
||||
* [puppetagent](./plugins/inputs/puppetagent)
|
||||
* [rabbitmq](./plugins/inputs/rabbitmq)
|
||||
* [raindrops](./plugins/inputs/raindrops)
|
||||
* [redis](./plugins/inputs/redis)
|
||||
* [rethinkdb](./plugins/inputs/rethinkdb)
|
||||
* [riak](./plugins/inputs/riak)
|
||||
* [salesforce](./plugins/inputs/salesforce)
|
||||
* [sensors](./plugins/inputs/sensors)
|
||||
* [snmp](./plugins/inputs/snmp)
|
||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||
* [tomcat](./plugins/inputs/tomcat)
|
||||
* [twemproxy](./plugins/inputs/twemproxy)
|
||||
* [varnish](./plugins/inputs/varnish)
|
||||
* [zfs](./plugins/inputs/zfs)
|
||||
* [zookeeper](./plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [win_services](./plugins/inputs/win_services)
|
||||
* [sysstat](./plugins/inputs/sysstat)
|
||||
* [system](./plugins/inputs/system)
|
||||
* cpu
|
||||
@@ -211,6 +186,7 @@ configuration options.
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
* kernel (/proc/vmstat)
|
||||
* linux_sysctl_fs (/proc/sys/fs)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
@@ -221,14 +197,27 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
* [nsq_consumer](./plugins/inputs/nsq_consumer)
|
||||
* [logparser](./plugins/inputs/logparser)
|
||||
* [statsd](./plugins/inputs/statsd)
|
||||
* [socket_listener](./plugins/inputs/socket_listener)
|
||||
* [tail](./plugins/inputs/tail)
|
||||
* [tcp_listener](./plugins/inputs/tcp_listener)
|
||||
* [udp_listener](./plugins/inputs/udp_listener)
|
||||
* [tcp_listener](./plugins/inputs/socket_listener)
|
||||
* [udp_listener](./plugins/inputs/socket_listener)
|
||||
* [webhooks](./plugins/inputs/webhooks)
|
||||
* [filestack](./plugins/inputs/webhooks/filestack)
|
||||
* [github](./plugins/inputs/webhooks/github)
|
||||
* [mandrill](./plugins/inputs/webhooks/mandrill)
|
||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
||||
* [zipkin](./plugins/inputs/zipkin)
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics, these
|
||||
formats may be used with input plugins supporting the `data_format` option:
|
||||
|
||||
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
|
||||
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
|
||||
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
|
||||
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
|
||||
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
@@ -237,16 +226,18 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
## Aggregator Plugins
|
||||
|
||||
* [minmax](./plugins/aggregators/minmax)
|
||||
* [histogram](./plugins/aggregators/histogram)
|
||||
|
||||
## Output Plugins
|
||||
|
||||
* [influxdb](./plugins/outputs/influxdb)
|
||||
* [amon](./plugins/outputs/amon)
|
||||
* [amqp](./plugins/outputs/amqp)
|
||||
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
||||
* [aws kinesis](./plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||
* [datadog](./plugins/outputs/datadog)
|
||||
* [discard](./plugins/outputs/discard)
|
||||
* [elasticsearch](./plugins/outputs/elasticsearch)
|
||||
* [file](./plugins/outputs/file)
|
||||
* [graphite](./plugins/outputs/graphite)
|
||||
* [graylog](./plugins/outputs/graylog)
|
||||
@@ -259,9 +250,7 @@ Telegraf can also collect metrics via the following service plugins:
|
||||
* [opentsdb](./plugins/outputs/opentsdb)
|
||||
* [prometheus](./plugins/outputs/prometheus_client)
|
||||
* [riemann](./plugins/outputs/riemann)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see the
|
||||
[contributing guide](CONTRIBUTING.md)
|
||||
for details on contributing a plugin to Telegraf.
|
||||
* [riemann_legacy](./plugins/outputs/riemann_legacy)
|
||||
* [socket_writer](./plugins/outputs/socket_writer)
|
||||
* [tcp](./plugins/outputs/socket_writer)
|
||||
* [udp](./plugins/outputs/socket_writer)
|
||||
|
||||
@@ -157,13 +157,13 @@ func gatherWithTimeout(
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
log.Printf("E! ERROR in input [%s]: %s", input.Name(), err)
|
||||
acc.AddError(err)
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Printf("E! ERROR: input [%s] took longer to collect than "+
|
||||
"collection interval (%s)",
|
||||
input.Name(), timeout)
|
||||
err := fmt.Errorf("took longer to collect than collection interval (%s)",
|
||||
timeout)
|
||||
acc.AddError(err)
|
||||
continue
|
||||
case <-shutdown:
|
||||
return
|
||||
@@ -191,6 +191,12 @@ func (a *Agent) Test() error {
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
if _, ok := input.Input.(telegraf.ServiceInput); ok {
|
||||
fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n",
|
||||
input.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
@@ -209,7 +215,7 @@ func (a *Agent) Test() error {
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name() {
|
||||
case "cpu", "mongodb", "procstat":
|
||||
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
@@ -241,7 +247,7 @@ func (a *Agent) flush() {
|
||||
}
|
||||
|
||||
// flusher monitors the metrics input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
@@ -285,7 +291,31 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(aggC) > 0 {
|
||||
// keep going until aggC is flushed
|
||||
continue
|
||||
}
|
||||
return
|
||||
case metric := <-aggC:
|
||||
metrics := []telegraf.Metric{metric}
|
||||
for _, processor := range a.Config.Processors {
|
||||
metrics = processor.Apply(metrics...)
|
||||
}
|
||||
for _, m := range metrics {
|
||||
outMetricC <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
semaphore := make(chan struct{}, 1)
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
@@ -295,8 +325,18 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
go func() {
|
||||
select {
|
||||
case semaphore <- struct{}{}:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
<-semaphore
|
||||
default:
|
||||
// skipping this flush because one is already happening
|
||||
log.Println("W! Skipping a scheduled flush because there is" +
|
||||
" already a flush ongoing.")
|
||||
}
|
||||
}()
|
||||
case metric := <-metricC:
|
||||
// NOTE potential bottleneck here as we put each metric through the
|
||||
// processors serially.
|
||||
@@ -322,6 +362,9 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
@@ -350,7 +393,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
if err := a.flusher(shutdown, metricC, aggC); err != nil {
|
||||
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
@@ -360,10 +403,10 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
for _, aggregator := range a.Config.Aggregators {
|
||||
go func(agg *models.RunningAggregator) {
|
||||
defer wg.Done()
|
||||
acc := NewAccumulator(agg, metricC)
|
||||
acc := NewAccumulator(agg, aggC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, shutdown)
|
||||
agg.Run(acc, now, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
@@ -381,5 +424,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
a.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
32
appveyor.yml
Normal file
32
appveyor.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
version: "{build}"
|
||||
|
||||
cache:
|
||||
- C:\Cache
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform: x64
|
||||
|
||||
install:
|
||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||
- IF NOT EXIST "C:\Cache\go1.8.1.msi" curl -o "C:\Cache\go1.8.1.msi" https://storage.googleapis.com/golang/go1.8.1.windows-amd64.msi
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||
- msiexec.exe /i "C:\Cache\go1.8.1.msi" /quiet
|
||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cmd: C:\GnuWin32\bin\make
|
||||
|
||||
test_script:
|
||||
- cmd: C:\GnuWin32\bin\make test-windows
|
||||
|
||||
artifacts:
|
||||
- path: telegraf.exe
|
||||
11
circle.yml
11
circle.yml
@@ -1,12 +1,13 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
- memcached
|
||||
- redis
|
||||
- rabbitmq-server
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.7.4 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.4.linux-amd64.tar.gz
|
||||
- sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.8.4.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.8.4.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
@@ -24,6 +26,8 @@ import (
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"turn on debug logging")
|
||||
var pprofAddr = flag.String("pprof-addr", "",
|
||||
"pprof address to listen on, not activate pprof if empty")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
@@ -47,16 +51,18 @@ var fAggregatorFilters = flag.String("aggregator-filter", "",
|
||||
var fProcessorFilters = flag.String("processor-filter", "",
|
||||
"filter the processors to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
|
||||
var (
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
nextVersion = "1.4.0"
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -77,8 +83,8 @@ Usage:
|
||||
|
||||
The commands & flags are:
|
||||
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
@@ -87,6 +93,7 @@ The commands & flags are:
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
@@ -98,105 +105,31 @@ Examples:
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf --config telegraf.conf -test
|
||||
telegraf --config telegraf.conf --test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
|
||||
# run telegraf with pprof
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
`
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
var srvc service.Service
|
||||
|
||||
type program struct{}
|
||||
|
||||
func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
defer func() {
|
||||
if service.Interactive() {
|
||||
os.Exit(0)
|
||||
}
|
||||
return
|
||||
}()
|
||||
func reloadLoop(
|
||||
stop chan struct{},
|
||||
inputFilters []string,
|
||||
outputFilters []string,
|
||||
aggregatorFilters []string,
|
||||
processorFilters []string,
|
||||
) {
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
var inputFilters []string
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
var outputFilters []string
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
var aggregatorFilters []string
|
||||
if *fAggregatorFilters != "" {
|
||||
aggregatorFilter := strings.TrimSpace(*fAggregatorFilters)
|
||||
aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":")
|
||||
}
|
||||
var processorFilters []string
|
||||
if *fProcessorFilters != "" {
|
||||
processorFilter := strings.TrimSpace(*fProcessorFilters)
|
||||
processorFilters = strings.Split(":"+processorFilter+":", ":")
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// switch for flags which just do something and exit immediately
|
||||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
case *fUsage != "":
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("E! %s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no other options are specified, load the config file and run.
|
||||
c := config.NewConfig()
|
||||
@@ -213,13 +146,23 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
if !*fTest && len(c.Outputs) == 0 {
|
||||
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
if int64(c.Agent.Interval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent interval must be positive, found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
if int64(c.Agent.FlushInterval.Duration) <= 0 {
|
||||
log.Fatalf("E! Agent flush_interval must be positive; found %s",
|
||||
c.Agent.Interval.Duration)
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
@@ -237,7 +180,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
return
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
@@ -265,20 +208,27 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("I! Starting Telegraf (version %s)\n", version)
|
||||
log.Printf("I! Starting Telegraf %s\n", displayVersion())
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("E! Unable to create pidfile: %s", err)
|
||||
log.Printf("E! Unable to create pidfile: %s", err)
|
||||
} else {
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
|
||||
defer func() {
|
||||
err := os.Remove(*fPidfile)
|
||||
if err != nil {
|
||||
log.Printf("E! Unable to remove pidfile: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
@@ -290,23 +240,127 @@ func usageExit(rc int) {
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
type program struct {
|
||||
inputFilters []string
|
||||
outputFilters []string
|
||||
aggregatorFilters []string
|
||||
processorFilters []string
|
||||
}
|
||||
|
||||
func (p *program) Start(s service.Service) error {
|
||||
srvc = s
|
||||
go p.run()
|
||||
return nil
|
||||
}
|
||||
func (p *program) run() {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, srvc)
|
||||
reloadLoop(
|
||||
stop,
|
||||
p.inputFilters,
|
||||
p.outputFilters,
|
||||
p.aggregatorFilters,
|
||||
p.processorFilters,
|
||||
)
|
||||
}
|
||||
func (p *program) Stop(s service.Service) error {
|
||||
close(stop)
|
||||
return nil
|
||||
}
|
||||
|
||||
func displayVersion() string {
|
||||
if version == "" {
|
||||
return fmt.Sprintf("v%s~pre%s", nextVersion, commit)
|
||||
}
|
||||
return "v" + version
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
inputFilters, outputFilters := []string{}, []string{}
|
||||
if *fInputFilters != "" {
|
||||
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilters = strings.Split(":"+strings.TrimSpace(*fOutputFilters)+":", ":")
|
||||
}
|
||||
|
||||
aggregatorFilters, processorFilters := []string{}, []string{}
|
||||
if *fAggregatorFilters != "" {
|
||||
aggregatorFilters = strings.Split(":"+strings.TrimSpace(*fAggregatorFilters)+":", ":")
|
||||
}
|
||||
if *fProcessorFilters != "" {
|
||||
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
|
||||
}
|
||||
|
||||
if *pprofAddr != "" {
|
||||
go func() {
|
||||
pprofHostPort := *pprofAddr
|
||||
parts := strings.Split(pprofHostPort, ":")
|
||||
if len(parts) == 2 && parts[0] == "" {
|
||||
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
|
||||
}
|
||||
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
|
||||
|
||||
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
|
||||
|
||||
if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// switch for flags which just do something and exit immediately
|
||||
switch {
|
||||
case *fOutputList:
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fInputList:
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
case *fUsage != "":
|
||||
err := config.PrintInputConfig(*fUsage)
|
||||
err2 := config.PrintOutputConfig(*fUsage)
|
||||
if err != nil && err2 != nil {
|
||||
log.Fatalf("E! %s and %s", err, err2)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
@@ -316,7 +370,12 @@ func main() {
|
||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||
}
|
||||
|
||||
prg := &program{}
|
||||
prg := &program{
|
||||
inputFilters: inputFilters,
|
||||
outputFilters: outputFilters,
|
||||
aggregatorFilters: aggregatorFilters,
|
||||
processorFilters: processorFilters,
|
||||
}
|
||||
s, err := service.New(prg, svcConfig)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
@@ -327,10 +386,14 @@ func main() {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
}
|
||||
if *fConfigDirectory != "" {
|
||||
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
os.Exit(0)
|
||||
} else {
|
||||
err = s.Run()
|
||||
if err != nil {
|
||||
@@ -339,6 +402,12 @@ func main() {
|
||||
}
|
||||
} else {
|
||||
stop = make(chan struct{})
|
||||
reloadLoop(stop, nil)
|
||||
reloadLoop(
|
||||
stop,
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,16 @@ Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
## Configuration file locations
|
||||
|
||||
The location of the configuration file can be set via the `--config` command
|
||||
line flag. Telegraf will also pick up all files matching the pattern `*.conf` if
|
||||
the `-config-directory` command line flag is used.
|
||||
|
||||
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
|
||||
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
|
||||
configuration files.
|
||||
|
||||
# Global Tags
|
||||
|
||||
Global tags can be specified in the `[global_tags]` section of the config file
|
||||
@@ -56,11 +66,14 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **precision**: By default, precision will be set to the same timestamp order
|
||||
as the collection interval, with the maximum being 1s. Precision will NOT
|
||||
be used for service inputs, such as logparser and statsd. Valid values are
|
||||
"ns", "us" (or "µs"), "ms", "s".
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stdout.
|
||||
* **precision**:
|
||||
By default or when set to "0s", precision will be set to the same
|
||||
timestamp order as the collection interval, with the maximum being 1s.
|
||||
Precision will NOT be used for service inputs. It is up to each individual
|
||||
service input to set the timestamp at the appropriate precision.
|
||||
Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stderr.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
@@ -114,31 +127,41 @@ is not specified then processor execution order will be random.
|
||||
Filters can be configured per input, output, processor, or aggregator,
|
||||
see below for examples.
|
||||
|
||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against
|
||||
measurement names and if it matches, the field is emitted.
|
||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
fielddrop is not available for outputs.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
||||
As opposed to tagdrop, which will drop an entire measurement based on it's
|
||||
tags, tagexclude simply strips the given tag keys from the measurement. This
|
||||
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
||||
as it is more efficient to filter out tags at the ingestion point.
|
||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||
the tag keys in the final measurement.
|
||||
* **namepass**:
|
||||
An array of glob pattern strings. Only points whose measurement name matches
|
||||
a pattern in this list are emitted.
|
||||
* **namedrop**:
|
||||
The inverse of `namepass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `namepass` test.
|
||||
* **fieldpass**:
|
||||
An array of glob pattern strings. Only fields whose field key matches a
|
||||
pattern in this list are emitted. Not available for outputs.
|
||||
* **fielddrop**:
|
||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||
patterns will be discarded from the point. This is tested on points after
|
||||
they have passed the `fieldpass` test. Not available for outputs.
|
||||
* **tagpass**:
|
||||
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||
that contain a tag key in the table and a tag value matching one of its
|
||||
patterns is emitted.
|
||||
* **tagdrop**:
|
||||
The inverse of `tagpass`. If a match is found the point is discarded. This
|
||||
is tested on points after they have passed the `tagpass` test.
|
||||
* **taginclude**:
|
||||
An array of glob pattern strings. Only tags with a tag key matching one of
|
||||
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
|
||||
point based on its tag, `taginclude` removes all non matching tags from the
|
||||
point. This filter can be used on both inputs & outputs, but it is
|
||||
_recommended_ to be used on inputs, as it is more efficient to filter out tags
|
||||
at the ingestion point.
|
||||
* **tagexclude**:
|
||||
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
|
||||
will be discarded from the point.
|
||||
|
||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
|
||||
must be defined at the _end_ of the plugin definition, otherwise subsequent
|
||||
plugin config options will be interpreted as part of the tagpass/tagdrop
|
||||
tables.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
@@ -158,7 +181,6 @@ fields which begin with `time_`.
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# INPUTS
|
||||
[[inputs.cpu]]
|
||||
@@ -297,21 +319,18 @@ to avoid measurement collisions:
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
namedrop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
namepass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
@@ -351,4 +370,4 @@ to the system load metrics due to the `namepass` parameter.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
```
|
||||
|
||||
@@ -7,6 +7,7 @@ Telegraf is able to parse the following input data formats into metrics:
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
@@ -40,7 +41,7 @@ example, in the exec plugin:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -67,7 +68,7 @@ metrics are parsed directly into Telegraf metrics.
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
@@ -117,7 +118,7 @@ For example, if you had this configuration:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -161,7 +162,7 @@ For example, if the following configuration:
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
@@ -232,7 +233,7 @@ name of the plugin.
|
||||
name_override = "entropy_available"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "value"
|
||||
@@ -390,7 +391,7 @@ There are many more options available,
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "graphite"
|
||||
@@ -427,14 +428,54 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
|
||||
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "nagios"
|
||||
```
|
||||
|
||||
# Collectd:
|
||||
|
||||
The collectd format parses the collectd binary network protocol. Tags are
|
||||
created for host, instance, type, and type instance. All collectd values are
|
||||
added as float64 fields.
|
||||
|
||||
For more information about the binary network protocol see
|
||||
[here](https://collectd.org/wiki/index.php/Binary_protocol).
|
||||
|
||||
You can control the cryptographic settings with parser options. Create an
|
||||
authentication file and set `collectd_auth_file` to the path of the file, then
|
||||
set the desired security level in `collectd_security_level`.
|
||||
|
||||
Additional information including client setup can be found
|
||||
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
|
||||
|
||||
You can also change the path to the typesdb or add additional typesdb using
|
||||
`collectd_typesdb`.
|
||||
|
||||
#### Collectd Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.socket_listener]]
|
||||
service_address = "udp://127.0.0.1:25826"
|
||||
name_prefix = "collectd_"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "collectd"
|
||||
|
||||
## Authentication file for cryptographic security levels
|
||||
collectd_auth_file = "/etc/collectd/auth_file"
|
||||
## One of none (default), sign, or encrypt
|
||||
collectd_security_level = "encrypt"
|
||||
## Path of to TypesDB specifications
|
||||
collectd_typesdb = ["/usr/share/collectd/types.db"]
|
||||
```
|
||||
|
||||
@@ -36,7 +36,7 @@ config option, for example, in the `file` output plugin:
|
||||
files = ["stdout"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
@@ -60,7 +60,7 @@ metrics are serialized directly into InfluxDB line-protocol.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
@@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
Fields with string values will be skipped. Boolean fields will be converted
|
||||
to 1 (true) or 0 (false).
|
||||
|
||||
### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
@@ -104,7 +107,7 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "graphite"
|
||||
@@ -143,8 +146,18 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
json_timestamp_units = "1ns"
|
||||
```
|
||||
|
||||
By default, the timestamp that is output in JSON data format serialized Telegraf
|
||||
metrics is in seconds. The precision of this timestamp can be adjusted for any output
|
||||
by adding the optional `json_timestamp_units` parameter to the configuration for
|
||||
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
|
||||
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
|
||||
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
|
||||
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
|
||||
output in hundredths of a second (`10ms`).
|
||||
|
||||
@@ -1,33 +1,102 @@
|
||||
# List
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
|
||||
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
# Licenses of dependencies
|
||||
|
||||
When distributed in a binary form, Telegraf may contain portions of the
|
||||
following works:
|
||||
|
||||
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
|
||||
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
|
||||
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
|
||||
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
|
||||
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
|
||||
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
|
||||
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
|
||||
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
|
||||
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
|
||||
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
|
||||
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
|
||||
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
|
||||
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
|
||||
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
||||
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
|
||||
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
||||
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
|
||||
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
|
||||
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
|
||||
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
|
||||
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
|
||||
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
|
||||
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
|
||||
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
|
||||
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
|
||||
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
|
||||
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
|
||||
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
|
||||
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
|
||||
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
|
||||
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
|
||||
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
|
||||
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
|
||||
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
|
||||
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
|
||||
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
|
||||
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
|
||||
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
|
||||
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
|
||||
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
|
||||
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
|
||||
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
|
||||
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
|
||||
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
|
||||
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)
|
||||
|
||||
24
docs/PROFILING.md
Normal file
24
docs/PROFILING.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Telegraf profiling
|
||||
|
||||
Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
|
||||
|
||||
By default, the profiling is turned off.
|
||||
|
||||
To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||
```
|
||||
|
||||
There are several paths to get different profiling information:
|
||||
|
||||
To look at the heap profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/heap`
|
||||
|
||||
or to look at a 30-second CPU profile:
|
||||
|
||||
`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
|
||||
|
||||
To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.
|
||||
|
||||
@@ -37,3 +37,9 @@ Telegraf can manage its own service through the --service flag:
|
||||
| `telegraf.exe --service start` | Start the telegraf service |
|
||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||
|
||||
|
||||
Trobleshooting common error #1067
|
||||
|
||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||
|
||||
--config C:\"Program Files"\Telegraf\telegraf.conf
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -105,10 +105,11 @@
|
||||
"% Privileged Time",
|
||||
"% User Time",
|
||||
"% Processor Time",
|
||||
"% DPC Time",
|
||||
]
|
||||
Measurement = "win_cpu"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
IncludeTotal=true
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
@@ -116,21 +117,54 @@
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Disk Time","% Disk Read Time",
|
||||
"% Disk Time",
|
||||
"% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
"% User Time",
|
||||
"Current Disk Queue Length",
|
||||
"% Free Space",
|
||||
"Free Megabytes",
|
||||
]
|
||||
Measurement = "win_disk"
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "PhysicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Disk Read Bytes/sec",
|
||||
"Disk Write Bytes/sec",
|
||||
"Current Disk Queue Length",
|
||||
"Disk Reads/sec",
|
||||
"Disk Writes/sec",
|
||||
"% Disk Time",
|
||||
"% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
]
|
||||
Measurement = "win_diskio"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "Network Interface"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Bytes Received/sec",
|
||||
"Bytes Sent/sec",
|
||||
"Packets Received/sec",
|
||||
"Packets Sent/sec",
|
||||
"Packets Received Discarded",
|
||||
"Packets Outbound Discarded",
|
||||
"Packets Received Errors",
|
||||
"Packets Outbound Errors",
|
||||
]
|
||||
Measurement = "win_net"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = [
|
||||
"Context Switches/sec",
|
||||
"System Calls/sec",
|
||||
"Processor Queue Length",
|
||||
"System Up Time",
|
||||
]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
@@ -150,6 +184,10 @@
|
||||
"Transition Faults/sec",
|
||||
"Pool Nonpaged Bytes",
|
||||
"Pool Paged Bytes",
|
||||
"Standby Cache Reserve Bytes",
|
||||
"Standby Cache Normal Priority Bytes",
|
||||
"Standby Cache Core Bytes",
|
||||
|
||||
]
|
||||
# Use 6 x - to remove the Instance bit from the query.
|
||||
Instances = ["------"]
|
||||
@@ -157,6 +195,31 @@
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back,
|
||||
# such as from the Paging File object.
|
||||
ObjectName = "Paging File"
|
||||
Counters = [
|
||||
"% Usage",
|
||||
]
|
||||
Instances = ["_Total"]
|
||||
Measurement = "win_swap"
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "Network Interface"
|
||||
Instances = ["*"]
|
||||
Counters = [
|
||||
"Bytes Sent/sec",
|
||||
"Bytes Received/sec",
|
||||
"Packets Sent/sec",
|
||||
"Packets Received/sec",
|
||||
"Packets Received Discarded",
|
||||
"Packets Received Errors",
|
||||
"Packets Outbound Discarded",
|
||||
"Packets Outbound Errors",
|
||||
]
|
||||
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
@@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter {
|
||||
}
|
||||
return &out
|
||||
}
|
||||
|
||||
type IncludeExcludeFilter struct {
|
||||
include Filter
|
||||
exclude Filter
|
||||
}
|
||||
|
||||
func NewIncludeExcludeFilter(
|
||||
include []string,
|
||||
exclude []string,
|
||||
) (Filter, error) {
|
||||
in, err := Compile(include)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ex, err := Compile(exclude)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &IncludeExcludeFilter{in, ex}, nil
|
||||
}
|
||||
|
||||
func (f *IncludeExcludeFilter) Match(s string) bool {
|
||||
if f.include != nil {
|
||||
if !f.include.Match(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if f.exclude != nil {
|
||||
if f.exclude.Match(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -45,9 +45,11 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
select {
|
||||
case b.buf <- metrics[i]:
|
||||
default:
|
||||
b.mu.Lock()
|
||||
MetricsDropped.Incr(1)
|
||||
<-b.buf
|
||||
b.buf <- metrics[i]
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -25,7 +26,6 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
"github.com/influxdata/toml"
|
||||
"github.com/influxdata/toml/ast"
|
||||
)
|
||||
@@ -85,8 +85,8 @@ type AgentConfig struct {
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// By default, precision will be set to the same timestamp order as the
|
||||
// collection interval, with the maximum being 1s.
|
||||
// By default or when set to "0s", precision will be set to the same
|
||||
// timestamp order as the collection interval, with the maximum being 1s.
|
||||
// ie, when interval = "10s", precision will be "1s"
|
||||
// when interval = "250ms", precision will be "1ms"
|
||||
// Precision will NOT be used for service inputs. It is up to each individual
|
||||
@@ -230,10 +230,13 @@ var header = `# Telegraf Configuration
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## By default, precision will be set to the same timestamp order as the
|
||||
## collection interval, with the maximum being 1s.
|
||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||
## By default or when set to "0s", precision will be set to the same
|
||||
## timestamp order as the collection interval, with the maximum being 1s.
|
||||
## ie, when interval = "10s", precision will be "1s"
|
||||
## when interval = "250ms", precision will be "1ms"
|
||||
## Precision will NOT be used for service inputs. It is up to each individual
|
||||
## service input to set the timestamp at the appropriate precision.
|
||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
|
||||
## Logging configuration:
|
||||
@@ -506,6 +509,10 @@ func PrintOutputConfig(name string) error {
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
walkfn := func(thispath string, info os.FileInfo, _ error) error {
|
||||
if info == nil {
|
||||
log.Printf("W! Telegraf is not permitted to read %s", thispath)
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
@@ -566,7 +573,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("E! Could not parse [global_tags] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
@@ -579,7 +586,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("E! Could not parse [agent] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
@@ -716,7 +723,7 @@ func (c *Config) addAggregator(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, aggregator); err != nil {
|
||||
if err := toml.UnmarshalTable(table, aggregator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -736,7 +743,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, processor); err != nil {
|
||||
if err := toml.UnmarshalTable(table, processor); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -776,7 +783,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, output); err != nil {
|
||||
if err := toml.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -817,7 +824,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, input); err != nil {
|
||||
if err := toml.UnmarshalTable(table, input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -909,7 +916,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
|
||||
conf.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
||||
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
@@ -1146,7 +1153,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("E! Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
@@ -1226,6 +1233,34 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.CollectdAuthFile = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_security_level"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.CollectdSecurityLevel = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.MetricName = name
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
@@ -1233,6 +1268,9 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
delete(tbl.Fields, "templates")
|
||||
delete(tbl.Fields, "tag_keys")
|
||||
delete(tbl.Fields, "data_type")
|
||||
delete(tbl.Fields, "collectd_auth_file")
|
||||
delete(tbl.Fields, "collectd_security_level")
|
||||
delete(tbl.Fields, "collectd_typesdb")
|
||||
|
||||
return parsers.NewParser(c)
|
||||
}
|
||||
@@ -1241,7 +1279,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
// a serializers.Serializer object, and creates it, which can then be added onto
|
||||
// an Output object.
|
||||
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
|
||||
c := &serializers.Config{}
|
||||
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
|
||||
|
||||
if node, ok := tbl.Fields["data_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
@@ -1271,9 +1309,26 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
timestampVal, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
|
||||
}
|
||||
// now that we have a duration, truncate it to the nearest
|
||||
// power of ten (just in case)
|
||||
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
|
||||
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
|
||||
c.TimestampUnits = time.Duration(new_nanoseconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
delete(tbl.Fields, "json_timestamp_units")
|
||||
return serializers.NewSerializer(c)
|
||||
}
|
||||
|
||||
|
||||
22
internal/config/testdata/telegraf-agent.toml
vendored
22
internal/config/testdata/telegraf-agent.toml
vendored
@@ -60,7 +60,7 @@
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
# ie, if this tag exists, its value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
|
||||
@@ -143,19 +143,31 @@
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# read metrics from a Kafka topic
|
||||
# read metrics from a Kafka 0.9+ topic
|
||||
[[inputs.kafka_consumer]]
|
||||
# topic(s) to consume
|
||||
## kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
# read metrics from a Kafka legacy topic
|
||||
[[inputs.kafka_consumer_legacy]]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
# the name of the consumer group
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
# Maximum number of points to buffer between collection intervals
|
||||
point_buffer = 100000
|
||||
# Offset (must be either "oldest" or "newest")
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
|
||||
# Read metrics from a LeoFS Server via SNMP
|
||||
[[inputs.leofs]]
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
package errchan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ErrChan struct {
|
||||
C chan error
|
||||
}
|
||||
|
||||
// New returns an error channel of max length 'n'
|
||||
// errors can be sent to the ErrChan.C channel, and will be returned when
|
||||
// ErrChan.Error() is called.
|
||||
func New(n int) *ErrChan {
|
||||
return &ErrChan{
|
||||
C: make(chan error, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Error closes the ErrChan.C channel and returns an error if there are any
|
||||
// non-nil errors, otherwise returns nil.
|
||||
func (e *ErrChan) Error() error {
|
||||
close(e.C)
|
||||
|
||||
var out string
|
||||
for err := range e.C {
|
||||
if err != nil {
|
||||
out += "[" + err.Error() + "], "
|
||||
}
|
||||
}
|
||||
|
||||
if out != "" {
|
||||
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
if !g.hasMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
info, err := os.Stat(g.path)
|
||||
if !os.IsNotExist(err) {
|
||||
if err == nil {
|
||||
out[g.path] = info
|
||||
}
|
||||
return out
|
||||
@@ -55,7 +55,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
files, _ := filepath.Glob(g.path)
|
||||
for _, file := range files {
|
||||
info, err := os.Stat(file)
|
||||
if !os.IsNotExist(err) {
|
||||
if err == nil {
|
||||
out[file] = info
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -28,7 +29,7 @@ func TestCompileAndMatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 3)
|
||||
assert.Len(t, matches, 6)
|
||||
matches = g2.Match()
|
||||
assert.Len(t, matches, 2)
|
||||
matches = g3.Match()
|
||||
@@ -56,7 +57,34 @@ func TestFindRootDir(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindNestedTextFile(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
// test super asterisk
|
||||
g1, err := Compile(dir + "/**.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 1)
|
||||
}
|
||||
|
||||
func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||
}
|
||||
|
||||
func TestMatch_ErrPermission(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected map[string]os.FileInfo
|
||||
}{
|
||||
{"/root/foo", map[string]os.FileInfo{}},
|
||||
{"/root/f*", map[string]os.FileInfo{}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
glob, err := Compile(test.input)
|
||||
require.NoError(t, err)
|
||||
actual := glob.Match()
|
||||
require.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
0
internal/globpath/testdata/nested1/nested2/nested.txt
vendored
Normal file
0
internal/globpath/testdata/nested1/nested2/nested.txt
vendored
Normal file
@@ -132,6 +132,7 @@ func (f *Filter) Apply(
|
||||
return true
|
||||
}
|
||||
|
||||
// IsActive checking if filter is active
|
||||
func (f *Filter) IsActive() bool {
|
||||
return f.isActive
|
||||
}
|
||||
@@ -139,43 +140,66 @@ func (f *Filter) IsActive() bool {
|
||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) shouldNamePass(key string) bool {
|
||||
if f.namePass != nil {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
if f.namePass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.nameDrop != nil {
|
||||
drop := func(f *Filter) bool {
|
||||
if f.nameDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if f.namePass != nil && f.nameDrop != nil {
|
||||
return pass(f) && drop(f)
|
||||
} else if f.namePass != nil {
|
||||
return pass(f)
|
||||
} else if f.nameDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) shouldFieldPass(key string) bool {
|
||||
if f.fieldPass != nil {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
if f.fieldPass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.fieldDrop != nil {
|
||||
drop := func(f *Filter) bool {
|
||||
if f.fieldDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if f.fieldPass != nil && f.fieldDrop != nil {
|
||||
return pass(f) && drop(f)
|
||||
} else if f.fieldPass != nil {
|
||||
return pass(f)
|
||||
} else if f.fieldDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
|
||||
pass := func(f *Filter) bool {
|
||||
for _, pat := range f.TagPass {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
@@ -189,7 +213,7 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
drop := func(f *Filter) bool {
|
||||
for _, pat := range f.TagDrop {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
@@ -203,6 +227,18 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Add additional logic in case where both parameters are set.
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
if f.TagPass != nil && f.TagDrop != nil {
|
||||
// return true only in case when tag pass and won't be dropped (true, true).
|
||||
// in case when the same tag should be passed and dropped it will be dropped (true, false).
|
||||
return pass(f) && drop(f)
|
||||
} else if f.TagPass != nil {
|
||||
return pass(f)
|
||||
} else if f.TagDrop != nil {
|
||||
return drop(f)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -357,3 +357,88 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
}
|
||||
|
||||
// TestFilter_FilterNamePassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"name1", "name2", "name3", "name4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
f := Filter{
|
||||
NamePass: []string{"name1", "name2"},
|
||||
NameDrop: []string{"name1", "name3"},
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, name := range inputData {
|
||||
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilter_FilterFieldPassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []string{"field1", "field2", "field3", "field4"}
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
f := Filter{
|
||||
FieldPass: []string{"field1", "field2"},
|
||||
FieldDrop: []string{"field1", "field3"},
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, field := range inputData {
|
||||
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
|
||||
}
|
||||
}
|
||||
|
||||
// TestFilter_FilterTagsPassAndDrop used for check case when
|
||||
// both parameters were defined
|
||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
||||
|
||||
inputData := []map[string]string{
|
||||
{"tag1": "1", "tag2": "3"},
|
||||
{"tag1": "1", "tag2": "2"},
|
||||
{"tag1": "2", "tag2": "1"},
|
||||
{"tag1": "4", "tag2": "1"},
|
||||
}
|
||||
|
||||
expectedResult := []bool{false, true, false, false}
|
||||
|
||||
filterPass := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "tag1",
|
||||
Filter: []string{"1", "4"},
|
||||
},
|
||||
}
|
||||
|
||||
filterDrop := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "tag1",
|
||||
Filter: []string{"4"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "tag2",
|
||||
Filter: []string{"3"},
|
||||
},
|
||||
}
|
||||
|
||||
f := Filter{
|
||||
TagDrop: filterDrop,
|
||||
TagPass: filterPass,
|
||||
}
|
||||
|
||||
require.NoError(t, f.Compile())
|
||||
|
||||
for i, tag := range inputData {
|
||||
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package models
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -77,7 +78,27 @@ func makemetric(
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
} else if strings.HasSuffix(v, `\`) {
|
||||
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||
"ending with a backslash, skipping", measurement, k)
|
||||
delete(tags, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
log.Printf("D! Measurement [%s] field [%s] "+
|
||||
"ends with a backslash, skipping", measurement, k)
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
@@ -128,6 +149,8 @@ func makemetric(
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
fields[k] = v
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
@@ -114,6 +114,7 @@ func (r *RunningAggregator) reset() {
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
now time.Time,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
@@ -132,7 +133,6 @@ func (r *RunningAggregator) Run(
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
now := time.Now()
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestAdd(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
@@ -55,7 +55,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
@@ -113,7 +113,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, shutdown)
|
||||
ra.Run(&acc, time.Now(), shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
@@ -332,6 +333,129 @@ func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
expectedNil bool
|
||||
expectedMeasurement string
|
||||
expectedFields map[string]interface{}
|
||||
expectedTags map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Measurement cannot have trailing slash",
|
||||
measurement: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Field key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
`bad\`: `xyzzy`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Field value with trailing slash okay",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
"ok": `xyzzy\`,
|
||||
},
|
||||
expectedTags: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "Must have one field after dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"bad": math.NaN(),
|
||||
},
|
||||
tags: map[string]string{},
|
||||
expectedNil: true,
|
||||
},
|
||||
{
|
||||
name: "Tag key with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Tag value with trailing slash dropped",
|
||||
measurement: `cpu`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
tags: map[string]string{
|
||||
`host`: `localhost\`,
|
||||
"a": "x",
|
||||
},
|
||||
expectedMeasurement: `cpu`,
|
||||
expectedFields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
expectedTags: map[string]string{
|
||||
"a": "x",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
})
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m := ri.MakeMetric(
|
||||
tc.measurement,
|
||||
tc.fields,
|
||||
tc.tags,
|
||||
telegraf.Untyped,
|
||||
now)
|
||||
|
||||
if tc.expectedNil {
|
||||
require.Nil(t, m)
|
||||
} else {
|
||||
require.NotNil(t, m)
|
||||
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||
require.Equal(t, tc.expectedFields, m.Fields())
|
||||
require.Equal(t, tc.expectedTags, m.Tags())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testInput struct{}
|
||||
|
||||
func (t *testInput) Description() string { return "" }
|
||||
|
||||
@@ -2,6 +2,7 @@ package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -34,6 +35,9 @@ type RunningOutput struct {
|
||||
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
|
||||
// Guards against concurrent calls to the Output as described in #3009
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
@@ -90,6 +94,9 @@ func NewRunningOutput(
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if ro.Config.Filter.IsActive() {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
@@ -119,9 +126,9 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||
ro.BufferSize.Set(int64(nFails + nMetrics))
|
||||
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
|
||||
ro.BufferSize.Incr(int64(nFails + nMetrics))
|
||||
var err error
|
||||
if !ro.failMetrics.IsEmpty() {
|
||||
// how many batches of failed writes we need to write.
|
||||
@@ -166,6 +173,8 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if nMetrics == 0 {
|
||||
return nil
|
||||
}
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
@@ -173,7 +182,6 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, nMetrics, elapsed)
|
||||
ro.MetricsWritten.Incr(int64(nMetrics))
|
||||
ro.BufferSize.Incr(-int64(nMetrics))
|
||||
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -75,6 +75,23 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddingNilMetric(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(nil)
|
||||
ro.AddMetric(nil)
|
||||
ro.AddMetric(nil)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
}
|
||||
|
||||
// Test that NameDrop filters ger properly applied.
|
||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningProcessor struct {
|
||||
Name string
|
||||
Name string
|
||||
|
||||
sync.Mutex
|
||||
Processor telegraf.Processor
|
||||
Config *ProcessorConfig
|
||||
}
|
||||
@@ -24,6 +28,9 @@ type ProcessorConfig struct {
|
||||
}
|
||||
|
||||
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
rp.Lock()
|
||||
defer rp.Unlock()
|
||||
|
||||
ret := []telegraf.Metric{}
|
||||
|
||||
for _, metric := range in {
|
||||
|
||||
@@ -4,11 +4,14 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
|
||||
var prefixRegex = regexp.MustCompile("^[DIWE]!")
|
||||
|
||||
// newTelegrafWriter returns a logging-wrapped writer.
|
||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||
return &telegrafLog{
|
||||
@@ -21,7 +24,13 @@ type telegrafLog struct {
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(b []byte) (n int, err error) {
|
||||
return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
|
||||
var line []byte
|
||||
if !prefixRegex.Match(b) {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
|
||||
} else {
|
||||
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
|
||||
}
|
||||
return t.writer.Write(line)
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
|
||||
@@ -51,6 +51,19 @@ func TestErrorWriteLogToFile(t *testing.T) {
|
||||
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
|
||||
}
|
||||
|
||||
func TestAddDefaultLogLevel(t *testing.T) {
|
||||
tmpfile, err := ioutil.TempFile("", "")
|
||||
assert.NoError(t, err)
|
||||
defer func() { os.Remove(tmpfile.Name()) }()
|
||||
|
||||
SetupLogging(true, false, tmpfile.Name())
|
||||
log.Printf("TEST")
|
||||
|
||||
f, err := ioutil.ReadFile(tmpfile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
|
||||
}
|
||||
|
||||
func BenchmarkTelegrafLogWrite(b *testing.B) {
|
||||
var msg = []byte("test")
|
||||
var buf bytes.Buffer
|
||||
|
||||
16
metric.go
16
metric.go
@@ -2,9 +2,6 @@ package telegraf
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
// TODO remove
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
@@ -19,8 +16,15 @@ const (
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Serialize serializes the metric into a line-protocol byte buffer,
|
||||
// including a newline at the end.
|
||||
Serialize() []byte
|
||||
String() string // convenience function for string(Serialize())
|
||||
// same as Serialize, but avoids an allocation.
|
||||
// returns number of bytes copied into dst.
|
||||
SerializeTo(dst []byte) int
|
||||
// String is the same as Serialize, but returns a string.
|
||||
String() string
|
||||
// Copy deep-copies the metric.
|
||||
Copy() Metric
|
||||
// Split will attempt to return multiple metrics with the same timestamp
|
||||
// whose string representations are no longer than maxSize.
|
||||
@@ -55,8 +59,4 @@ type Metric interface {
|
||||
// aggregator things:
|
||||
SetAggregate(bool)
|
||||
IsAggregate() bool
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
// TODO remove this function
|
||||
Point() *client.Point
|
||||
}
|
||||
|
||||
@@ -20,8 +20,14 @@ var (
|
||||
|
||||
// stringFieldEscaper is for escaping string field values only.
|
||||
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
|
||||
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
|
||||
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
|
||||
stringFieldEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
stringFieldUnEscaper = strings.NewReplacer(
|
||||
`\"`, `"`,
|
||||
`\\`, `\`,
|
||||
)
|
||||
)
|
||||
|
||||
func escape(s string, t string) string {
|
||||
|
||||
113
metric/metric.go
113
metric/metric.go
@@ -6,12 +6,10 @@ import (
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
// TODO remove
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
const MaxInt = int(^uint(0) >> 1)
|
||||
@@ -23,11 +21,14 @@ func New(
|
||||
t time.Time,
|
||||
mType ...telegraf.ValueType,
|
||||
) (telegraf.Metric, error) {
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made without any fields")
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Metric cannot be made with an empty name")
|
||||
return nil, fmt.Errorf("missing measurement name")
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("%s: must have one or more fields", name)
|
||||
}
|
||||
if strings.HasSuffix(name, `\`) {
|
||||
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
|
||||
}
|
||||
|
||||
var thisType telegraf.ValueType
|
||||
@@ -47,13 +48,25 @@ func New(
|
||||
// pre-allocate exact size of the tags slice
|
||||
taglen := 0
|
||||
for k, v := range tags {
|
||||
// TODO check that length of tag key & value are > 0
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
if strings.HasSuffix(v, `\`) {
|
||||
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
|
||||
}
|
||||
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
|
||||
}
|
||||
m.tags = make([]byte, taglen)
|
||||
|
||||
i := 0
|
||||
for k, v := range tags {
|
||||
if len(k) == 0 || len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
m.tags[i] = ','
|
||||
i++
|
||||
i += copy(m.tags[i:], escape(k, "tagkey"))
|
||||
@@ -65,6 +78,10 @@ func New(
|
||||
// pre-allocate capacity of the fields slice
|
||||
fieldlen := 0
|
||||
for k, _ := range fields {
|
||||
if strings.HasSuffix(k, `\`) {
|
||||
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
|
||||
}
|
||||
|
||||
// 10 bytes is completely arbitrary, but will at least prevent some
|
||||
// amount of allocations. There's a small possibility this will create
|
||||
// slightly more allocations for a metric that has many short fields.
|
||||
@@ -85,8 +102,31 @@ func New(
|
||||
}
|
||||
|
||||
// indexUnescapedByte finds the index of the first byte equal to b in buf that
|
||||
// is not escaped. Returns -1 if not found.
|
||||
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
|
||||
// not found.
|
||||
func indexUnescapedByte(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
if i == -1 {
|
||||
return -1
|
||||
} else if i == 0 {
|
||||
break
|
||||
}
|
||||
keyi += i
|
||||
if buf[keyi-1] != '\\' {
|
||||
break
|
||||
} else {
|
||||
keyi++
|
||||
}
|
||||
}
|
||||
return keyi
|
||||
}
|
||||
|
||||
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
|
||||
// to b in buf that is not escaped. Allows for the escape char `\` to be
|
||||
// escaped. Returns -1 if not found.
|
||||
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
|
||||
var keyi int
|
||||
for {
|
||||
i := bytes.IndexByte(buf[keyi:], b)
|
||||
@@ -137,11 +177,6 @@ type metric struct {
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
c, _ := client.NewPoint(m.Name(), m.Tags(), m.Fields(), m.Time())
|
||||
return c
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
|
||||
}
|
||||
@@ -178,8 +213,50 @@ func (m *metric) Serialize() []byte {
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (m *metric) SerializeTo(dst []byte) int {
|
||||
i := 0
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.name)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.tags)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.fields)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
dst[i] = ' '
|
||||
i++
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
|
||||
i += copy(dst[i:], m.t)
|
||||
if i >= len(dst) {
|
||||
return i
|
||||
}
|
||||
dst[i] = '\n'
|
||||
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
if m.Len() < maxSize {
|
||||
if m.Len() <= maxSize {
|
||||
return []telegraf.Metric{m}
|
||||
}
|
||||
var out []telegraf.Metric
|
||||
@@ -209,7 +286,7 @@ func (m *metric) Split(maxSize int) []telegraf.Metric {
|
||||
|
||||
// if true, then we need to create a metric _not_ including the currently
|
||||
// selected field
|
||||
if len(m.fields[i:j])+len(fields)+constant > maxSize {
|
||||
if len(m.fields[i:j])+len(fields)+constant >= maxSize {
|
||||
// if false, then we'll create a metric including the currently
|
||||
// selected field anyways. This means that the given maxSize is too
|
||||
// small for a single field to fit.
|
||||
@@ -247,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
|
||||
// end index of field value
|
||||
var i3 int
|
||||
if m.fields[i:][i2] == '"' {
|
||||
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
|
||||
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
|
||||
if i3 == -1 {
|
||||
i3 = len(m.fields[i:])
|
||||
}
|
||||
@@ -263,7 +340,7 @@ func (m *metric) Fields() map[string]interface{} {
|
||||
case '"':
|
||||
// string field
|
||||
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
// number field
|
||||
switch m.fields[i:][i3-1] {
|
||||
case 'i':
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
@@ -30,7 +31,7 @@ func TestNewMetric(t *testing.T) {
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
@@ -249,11 +250,15 @@ func TestNewMetric_Fields(t *testing.T) {
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"float": float64(1),
|
||||
"int": int64(1),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
"quote_string": `x"y`,
|
||||
"backslash_quote_string": `x\"y`,
|
||||
"backslash": `x\y`,
|
||||
"ends_with_backslash": `x\`,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
@@ -366,7 +371,7 @@ func TestIndexUnescapedByte(t *testing.T) {
|
||||
{
|
||||
in: []byte(`foo\\bar`),
|
||||
b: 'b',
|
||||
expected: 5,
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
in: []byte(`foobar`),
|
||||
@@ -409,7 +414,7 @@ func TestNewGaugeMetric(t *testing.T) {
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
@@ -431,7 +436,7 @@ func TestNewCounterMetric(t *testing.T) {
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
@@ -458,7 +463,7 @@ func TestSplitMetric(t *testing.T) {
|
||||
assert.Len(t, split70, 3)
|
||||
|
||||
split60 := m.Split(60)
|
||||
assert.Len(t, split60, 4)
|
||||
assert.Len(t, split60, 5)
|
||||
}
|
||||
|
||||
// test splitting metric into various max lengths
|
||||
@@ -578,6 +583,42 @@ func TestSplitMetric_OneField(t *testing.T) {
|
||||
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
|
||||
}
|
||||
|
||||
func TestSplitMetric_ExactSize(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
"string": "test",
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len())
|
||||
// check that no copy was made
|
||||
require.Equal(t, &m, &actual[0])
|
||||
}
|
||||
|
||||
func TestSplitMetric_NoRoomForNewline(t *testing.T) {
|
||||
now := time.Unix(0, 1480940990034083306)
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"float": float64(100001),
|
||||
"int": int64(100001),
|
||||
"bool": true,
|
||||
"false": false,
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
actual := m.Split(m.Len() - 1)
|
||||
require.Equal(t, 2, len(actual))
|
||||
}
|
||||
|
||||
func TestNewMetricAggregate(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
@@ -595,25 +636,6 @@ func TestNewMetricAggregate(t *testing.T) {
|
||||
assert.True(t, m.IsAggregate())
|
||||
}
|
||||
|
||||
func TestNewMetricPoint(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
p := m.Point()
|
||||
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, fields, p.Fields())
|
||||
assert.Equal(t, "cpu", p.Name())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
@@ -644,3 +666,72 @@ func TestNewMetricFailNaN(t *testing.T) {
|
||||
_, err := New("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyTagValueOrKey(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"emptytag": "",
|
||||
"": "valuewithoutkey",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := New("cpu", tags, fields, now)
|
||||
|
||||
assert.True(t, m.HasTag("host"))
|
||||
assert.False(t, m.HasTag("emptytag"))
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n", now.UnixNano()),
|
||||
m.String())
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNewMetric_TrailingSlash(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: `cpu\`,
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
fields: map[string]interface{}{
|
||||
`value\`: "x",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
`host\`: "localhost",
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cpu",
|
||||
tags: map[string]string{
|
||||
"host": `localhost\`,
|
||||
},
|
||||
fields: map[string]interface{}{
|
||||
"value": int64(42),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
_, err := New(tc.name, tc.tags, tc.fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -40,10 +41,21 @@ const (
|
||||
)
|
||||
|
||||
func Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTime(buf, time.Now())
|
||||
return ParseWithDefaultTimePrecision(buf, time.Now(), "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return ParseWithDefaultTimePrecision(buf, t, "")
|
||||
}
|
||||
|
||||
func ParseWithDefaultTimePrecision(
|
||||
buf []byte,
|
||||
t time.Time,
|
||||
precision string,
|
||||
) ([]telegraf.Metric, error) {
|
||||
if len(buf) == 0 {
|
||||
return []telegraf.Metric{}, nil
|
||||
}
|
||||
if len(buf) <= 6 {
|
||||
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
|
||||
}
|
||||
@@ -60,7 +72,7 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMetric(buf[i:i+j], t)
|
||||
m, err := parseMetric(buf[i:i+j], t, precision)
|
||||
if err != nil {
|
||||
i += j + 1 // increment i past the previous newline
|
||||
errStr += " " + err.Error()
|
||||
@@ -77,7 +89,10 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
|
||||
func parseMetric(buf []byte,
|
||||
defaultTime time.Time,
|
||||
precision string,
|
||||
) (telegraf.Metric, error) {
|
||||
var dTime string
|
||||
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
|
||||
pos, key, err := scanKey(buf, 0)
|
||||
@@ -111,9 +126,23 @@ func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply precision multiplier
|
||||
var nsec int64
|
||||
multiplier := getPrecisionMultiplier(precision)
|
||||
if len(ts) > 0 && multiplier > 1 {
|
||||
tsint, err := parseIntBytes(ts, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nsec := multiplier * tsint
|
||||
ts = []byte(strconv.FormatInt(nsec, 10))
|
||||
}
|
||||
|
||||
m := &metric{
|
||||
fields: fields,
|
||||
t: ts,
|
||||
nsec: nsec,
|
||||
}
|
||||
|
||||
// parse out the measurement name
|
||||
@@ -625,3 +654,21 @@ func makeError(reason string, buf []byte, i int) error {
|
||||
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
|
||||
reason, buf, i)
|
||||
}
|
||||
|
||||
// getPrecisionMultiplier will return a multiplier for the precision specified.
|
||||
func getPrecisionMultiplier(precision string) int64 {
|
||||
d := time.Nanosecond
|
||||
switch precision {
|
||||
case "u":
|
||||
d = time.Microsecond
|
||||
case "ms":
|
||||
d = time.Millisecond
|
||||
case "s":
|
||||
d = time.Second
|
||||
case "m":
|
||||
d = time.Minute
|
||||
case "h":
|
||||
d = time.Hour
|
||||
}
|
||||
return int64(d)
|
||||
}
|
||||
|
||||
@@ -44,6 +44,9 @@ cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
|
||||
`
|
||||
|
||||
const negMetrics = `weather,host=local temp=-99i,temp_float=-99.4 1465839830100400200
|
||||
`
|
||||
|
||||
// some metrics are invalid
|
||||
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
@@ -85,6 +88,26 @@ func TestParse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNegNumbers(t *testing.T) {
|
||||
metrics, err := Parse([]byte(negMetrics))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, metrics, 1)
|
||||
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"temp": int64(-99),
|
||||
"temp_float": float64(-99.4),
|
||||
},
|
||||
metrics[0].Fields(),
|
||||
)
|
||||
assert.Equal(t,
|
||||
map[string]string{
|
||||
"host": "local",
|
||||
},
|
||||
metrics[0].Tags(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseErrors(t *testing.T) {
|
||||
start := time.Now()
|
||||
metrics, err := Parse([]byte(someInvalid))
|
||||
@@ -341,6 +364,41 @@ func TestParseNegativeTimestamps(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecision(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
expected int64
|
||||
}{
|
||||
{"test v=42 1491847420", "s", 1491847420000000000},
|
||||
{"test v=42 1491847420123", "ms", 1491847420123000000},
|
||||
{"test v=42 1491847420123456", "u", 1491847420123456000},
|
||||
{"test v=42 1491847420123456789", "ns", 1491847420123456789},
|
||||
|
||||
{"test v=42 1491847420123456789", "1s", 1491847420123456789},
|
||||
{"test v=42 1491847420123456789", "asdf", 1491847420123456789},
|
||||
} {
|
||||
metrics, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, metrics[0].UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePrecisionUnsetTime(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
line string
|
||||
precision string
|
||||
}{
|
||||
{"test v=42", "s"},
|
||||
{"test v=42", "ns"},
|
||||
} {
|
||||
_, err := ParseWithDefaultTimePrecision(
|
||||
[]byte(tt.line+"\n"), time.Now(), tt.precision)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMaxKeyLength(t *testing.T) {
|
||||
key := ""
|
||||
for {
|
||||
|
||||
159
metric/reader.go
Normal file
159
metric/reader.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
_ state = iota
|
||||
// normal state copies whole metrics into the given buffer until we can't
|
||||
// fit the next metric.
|
||||
normal
|
||||
// split state means that we have a metric that we were able to split, so
|
||||
// that we can fit it into multiple metrics (and calls to Read)
|
||||
split
|
||||
// overflow state means that we have a metric that didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
overflow
|
||||
// splitOverflow state means that a split metric didn't fit into a single
|
||||
// buffer, and needs to be split across multiple calls to Read.
|
||||
splitOverflow
|
||||
// done means we're done reading metrics, and now always return (0, io.EOF)
|
||||
done
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
metrics []telegraf.Metric
|
||||
splitMetrics []telegraf.Metric
|
||||
buf []byte
|
||||
state state
|
||||
|
||||
// metric index
|
||||
iM int
|
||||
// split metric index
|
||||
iSM int
|
||||
// buffer index
|
||||
iB int
|
||||
}
|
||||
|
||||
func NewReader(metrics []telegraf.Metric) io.Reader {
|
||||
return &reader{
|
||||
metrics: metrics,
|
||||
state: normal,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (n int, err error) {
|
||||
var i int
|
||||
switch r.state {
|
||||
case done:
|
||||
return 0, io.EOF
|
||||
case normal:
|
||||
for {
|
||||
// this for-loop is the sunny-day scenario, where we are given a
|
||||
// buffer that is large enough to hold at least a single metric.
|
||||
// all of the cases below it are edge-cases.
|
||||
if r.metrics[r.iM].Len() <= len(p[i:]) {
|
||||
i += r.metrics[r.iM].SerializeTo(p[i:])
|
||||
} else {
|
||||
break
|
||||
}
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes, check if we can split the current
|
||||
// metric into multiple full metrics at a smaller size.
|
||||
if i == 0 {
|
||||
tmp := r.metrics[r.iM].Split(len(p))
|
||||
if len(tmp) > 1 {
|
||||
r.splitMetrics = tmp
|
||||
r.state = split
|
||||
if r.splitMetrics[0].Len() <= len(p) {
|
||||
i += r.splitMetrics[0].SerializeTo(p)
|
||||
r.iSM = 1
|
||||
} else {
|
||||
// splitting didn't quite work, so we'll drop down and
|
||||
// overflow the metric.
|
||||
r.state = normal
|
||||
r.iSM = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we haven't written any bytes and we're not at the end of the metrics
|
||||
// slice, then it means we have a single metric that is larger than the
|
||||
// provided buffer.
|
||||
if i == 0 {
|
||||
r.buf = r.metrics[r.iM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = overflow
|
||||
}
|
||||
|
||||
case split:
|
||||
if r.splitMetrics[r.iSM].Len() <= len(p) {
|
||||
// write the current split metric
|
||||
i += r.splitMetrics[r.iSM].SerializeTo(p)
|
||||
r.iSM++
|
||||
if r.iSM >= len(r.splitMetrics) {
|
||||
// done writing the current split metrics
|
||||
r.iSM = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
} else {
|
||||
// This would only happen if we split the metric, and then a
|
||||
// subsequent buffer was smaller than the initial one given,
|
||||
// so that our split metric no longer fits.
|
||||
r.buf = r.splitMetrics[r.iSM].Serialize()
|
||||
i += copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
r.state = splitOverflow
|
||||
}
|
||||
|
||||
case splitOverflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iSM++
|
||||
if r.iSM == len(r.splitMetrics) {
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
} else {
|
||||
r.state = split
|
||||
}
|
||||
}
|
||||
|
||||
case overflow:
|
||||
i = copy(p, r.buf[r.iB:])
|
||||
r.iB += i
|
||||
if r.iB >= len(r.buf) {
|
||||
r.iB = 0
|
||||
r.iM++
|
||||
if r.iM == len(r.metrics) {
|
||||
r.state = done
|
||||
return i, io.EOF
|
||||
}
|
||||
r.state = normal
|
||||
}
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
713
metric/reader_test.go
Normal file
713
metric/reader_test.go
Normal file
@@ -0,0 +1,713 @@
|
||||
package metric
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkMetricReader(b *testing.B) {
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, time.Now())
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
r := NewReader(metrics)
|
||||
io.Copy(ioutil.Discard, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
metrics := make([]telegraf.Metric, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
metrics[i], _ = New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(1)}, ts)
|
||||
}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
buf := make([]byte, 35)
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
}
|
||||
assert.Equal(t, 33, n)
|
||||
assert.Equal(t, "foo value=1i 1481032190000000000\n", string(buf[0:n]))
|
||||
}
|
||||
|
||||
// reader should now be done, and always return 0, io.EOF
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(buf)
|
||||
assert.True(t, err == io.EOF, err.Error())
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 5)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo v",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"alue=",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"10i 1",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"48103",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"21900",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"00000",
|
||||
nil,
|
||||
5,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric is the same size as the buffer.
|
||||
//
|
||||
// Previously EOF would not be set until the next call to Read.
|
||||
func TestMetricReader_MetricSizeEqualsBufferSize(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, m1.Len())
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for when a metric requires to be split and one of the
|
||||
// split metrics is exactly the size of the buffer.
|
||||
//
|
||||
// Previously an empty string would be returned on the next Read without error,
|
||||
// and then next Read call would panic.
|
||||
func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "bb": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bb=2i 1481032190000000000\n // len 35
|
||||
//
|
||||
// Requires this specific split order:
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bb=2i 1481032190000000000\n // len 30
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a metric requires to be split and one of the
|
||||
// split metrics is larger than the buffer.
|
||||
//
|
||||
// Previously the metric index would be set incorrectly causing a panic.
|
||||
func TestMetricReader_SplitOverflowOversized(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"bbb": int64(2),
|
||||
}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
// foo a=1i,bbb=2i 1481032190000000000\n // len 36
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo bbb=2i 1481032190000000000\n // len 31
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Regresssion test for when a split metric exactly fits in the buffer.
|
||||
//
|
||||
// Previously the metric would be overflow split when not required.
|
||||
func TestMetricReader_SplitOverflowUneeded(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"a": int64(1), "b": int64(2)}, ts)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 29)
|
||||
|
||||
// foo a=1i,b=2i 1481032190000000000\n // len 34
|
||||
//
|
||||
// foo a=1i 1481032190000000000\n // len 29
|
||||
// foo b=2i 1481032190000000000\n // len 29
|
||||
|
||||
for {
|
||||
n, err := r.Read(buf)
|
||||
// Should never read 0 bytes unless at EOF, unless input buffer is 0 length
|
||||
if n == 0 {
|
||||
require.Equal(t, io.EOF, err)
|
||||
break
|
||||
}
|
||||
// Lines should be terminated with a LF
|
||||
if err == io.EOF {
|
||||
require.Equal(t, uint8('\n'), buf[n-1])
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricReader_OverflowMultipleMetrics(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{"value": int64(10)}, ts)
|
||||
metrics := []telegraf.Metric{m, m.Copy()}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 10)
|
||||
|
||||
tests := []struct {
|
||||
exp string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
nil,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"foo value=",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"10i 148103",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"2190000000",
|
||||
nil,
|
||||
10,
|
||||
},
|
||||
{
|
||||
"000\n",
|
||||
io.EOF,
|
||||
4,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
assert.Equal(t, test.exp, string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test splitting a metric
|
||||
func TestMetricReader_SplitMetric(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
57,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test an array with one split metric and one unsplit
|
||||
func TestMetricReader_SplitMetric2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
"value4": int64(10),
|
||||
"value5": int64(10),
|
||||
"value6": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 60)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value\d=10i,value\d=10i,value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
57,
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split that results in metrics that are still too long, which results in
|
||||
// the reader falling back to regular overflow.
|
||||
func TestMetricReader_SplitMetricTooLong(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1}
|
||||
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 30)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i,value\d=10i 1481`,
|
||||
nil,
|
||||
30,
|
||||
},
|
||||
{
|
||||
`032190000000000\n`,
|
||||
io.EOF,
|
||||
16,
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(buf)
|
||||
assert.Equal(t, test.n, n)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(buf[0:n])), string(buf[0:n]))
|
||||
assert.Equal(t, test.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
"value3": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
// test split with a changing buffer size in the middle of subsequent calls
|
||||
// to Read
|
||||
func TestMetricReader_SplitMetricChangingBuffer2(t *testing.T) {
|
||||
ts := time.Unix(1481032190, 0)
|
||||
m1, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
"value2": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
m2, _ := New("foo", map[string]string{},
|
||||
map[string]interface{}{
|
||||
"value1": int64(10),
|
||||
},
|
||||
ts,
|
||||
)
|
||||
metrics := []telegraf.Metric{m1, m2}
|
||||
|
||||
r := NewReader(metrics)
|
||||
|
||||
tests := []struct {
|
||||
expRegex string
|
||||
err error
|
||||
n int
|
||||
buf []byte
|
||||
}{
|
||||
{
|
||||
`foo value\d=10i 1481032190000000000\n`,
|
||||
nil,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
`foo value\d=10i 148103219000000`,
|
||||
nil,
|
||||
30,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`0000\n`,
|
||||
nil,
|
||||
5,
|
||||
make([]byte, 30),
|
||||
},
|
||||
{
|
||||
`foo value1=10i 1481032190000000000\n`,
|
||||
io.EOF,
|
||||
35,
|
||||
make([]byte, 36),
|
||||
},
|
||||
{
|
||||
"",
|
||||
io.EOF,
|
||||
0,
|
||||
make([]byte, 36),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
n, err := r.Read(test.buf)
|
||||
assert.Equal(t, test.n, n, test.expRegex)
|
||||
re := regexp.MustCompile(test.expRegex)
|
||||
assert.True(t, re.MatchString(string(test.buf[0:n])), string(test.buf[0:n]))
|
||||
assert.Equal(t, test.err, err, test.expRegex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_Read(t *testing.T) {
|
||||
epoch := time.Unix(0, 0)
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
t time.Time
|
||||
mType []telegraf.ValueType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "escape backslashes in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape quote and backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\"`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\"" 0`),
|
||||
},
|
||||
{
|
||||
name: "escape multiple backslash in string field",
|
||||
args: args{
|
||||
name: "cpu",
|
||||
tags: map[string]string{},
|
||||
fields: map[string]interface{}{"value": `test\\`},
|
||||
t: epoch,
|
||||
},
|
||||
expected: []byte(`cpu value="test\\\\" 0`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 512)
|
||||
m, err := New(tt.args.name, tt.args.tags, tt.args.fields, tt.args.t, tt.args.mType...)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := NewReader([]telegraf.Metric{m})
|
||||
num, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
line := string(buf[:num])
|
||||
// This is done so that we can use raw strings in the test spec
|
||||
noeol := strings.TrimRight(line, "\n")
|
||||
require.Equal(t, string(tt.expected), noeol)
|
||||
require.Equal(t, len(tt.expected)+1, num)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricRoundtrip(t *testing.T) {
|
||||
const lp = `nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=netstat,sr=database IpExtInBcastOctets=12570626154i,IpExtInBcastPkts=95541226i,IpExtInCEPkts=0i,IpExtInCsumErrors=0i,IpExtInECT0Pkts=55674i,IpExtInECT1Pkts=0i,IpExtInMcastOctets=5928296i,IpExtInMcastPkts=174365i,IpExtInNoECTPkts=17965863529i,IpExtInNoRoutes=20i,IpExtInOctets=3334866321815i,IpExtInTruncatedPkts=0i,IpExtOutBcastOctets=0i,IpExtOutBcastPkts=0i,IpExtOutMcastOctets=0i,IpExtOutMcastPkts=0i,IpExtOutOctets=31397892391399i,TcpExtArpFilter=0i,TcpExtBusyPollRxPackets=0i,TcpExtDelayedACKLocked=14094i,TcpExtDelayedACKLost=302083i,TcpExtDelayedACKs=55486507i,TcpExtEmbryonicRsts=11879i,TcpExtIPReversePathFilter=0i,TcpExtListenDrops=1736i,TcpExtListenOverflows=0i,TcpExtLockDroppedIcmps=0i,TcpExtOfoPruned=0i,TcpExtOutOfWindowIcmps=8i,TcpExtPAWSActive=0i,TcpExtPAWSEstab=974i,TcpExtPAWSPassive=0i,TcpExtPruneCalled=0i,TcpExtRcvPruned=0i,TcpExtSyncookiesFailed=12593i,TcpExtSyncookiesRecv=0i,TcpExtSyncookiesSent=0i,TcpExtTCPACKSkippedChallenge=0i,TcpExtTCPACKSkippedFinWait2=0i,TcpExtTCPACKSkippedPAWS=806i,TcpExtTCPACKSkippedSeq=519i,TcpExtTCPACKSkippedSynRecv=0i,TcpExtTCPACKSkippedTimeWait=0i,TcpExtTCPAbortFailed=0i,TcpExtTCPAbortOnClose=22i,TcpExtTCPAbortOnData=36593i,TcpExtTCPAbortOnLinger=0i,TcpExtTCPAbortOnMemory=0i,TcpExtTCPAbortOnTimeout=674i,TcpExtTCPAutoCorking=494253233i,TcpExtTCPBacklogDrop=0i,TcpExtTCPChallengeACK=281i,TcpExtTCPDSACKIgnoredNoUndo=93354i,TcpExtTCPDSACKIgnoredOld=336i,TcpExtTCPDSACKOfoRecv=0i,TcpExtTCPDSACKOfoSent=7i,TcpExtTCPDSACKOldSent=302073i,TcpExtTCPDSACKRecv=215884i,TcpExtTCPDSACKUndo=7633i,TcpExtTCPDeferAcceptDrop=0i,TcpExtTCPDirectCopyFromBacklog=0i,TcpExtTCPDirectCopyFromPrequeue=0i,TcpExtTCPFACKReorder=1320i,TcpExtTCPFastOpenActive=0i,TcpExtTCPFastOpenActiveFail=0i,TcpExtTCPFastOpenCookieReqd=0i,TcpExtTCPFastOpenListenOverflow=0i,TcpExtTCPFastOpenPassive=0i,TcpExtTCPFastOpenPassiveFail=0i,TcpExtTCPFastRetrans=350681i,TcpExtTCPForwardRetrans=142168i,TcpExtTCPFromZeroWindowAdv=4317i,TcpExtTCPFullUndo=29502i,TcpExtTCPHPAcks=10267073000i,TcpExtTCPHPHits=5629837098i,TcpExtTCPHPHitsToUser=0i,TcpExtTCPHystartDelayCwnd=285127i,TcpExtTCPHystartDelayDetect=12318i,TcpExtTCPHystartTrainCwnd=69160570i,TcpExtTCPHystartTrainDetect=3315799i,TcpExtTCPLossFailures=109i,TcpExtTCPLossProbeRecovery=110819i,TcpExtTCPLossProbes=233995i,TcpExtTCPLossUndo=5276i,TcpExtTCPLostRetransmit=397i,TcpExtTCPMD5NotFound=0i,TcpExtTCPMD5Unexpected=0i,TcpExtTCPMemoryPressures=0i,TcpExtTCPMinTTLDrop=0i,TcpExtTCPOFODrop=0i,TcpExtTCPOFOMerge=7i,TcpExtTCPOFOQueue=15196i,TcpExtTCPOrigDataSent=29055119435i,TcpExtTCPPartialUndo=21320i,TcpExtTCPPrequeueDropped=0i,TcpExtTCPPrequeued=0i,TcpExtTCPPureAcks=1236441827i,TcpExtTCPRcvCoalesce=225590473i,TcpExtTCPRcvCollapsed=0i,TcpExtTCPRenoFailures=0i,TcpExtTCPRenoRecovery=0i,TcpExtTCPRenoRecoveryFail=0i,TcpExtTCPRenoReorder=0i,TcpExtTCPReqQFullDoCookies=0i,TcpExtTCPReqQFullDrop=0i,TcpExtTCPRetransFail=41i,TcpExtTCPSACKDiscard=0i,TcpExtTCPSACKReneging=0i,TcpExtTCPSACKReorder=4307i,TcpExtTCPSYNChallenge=244i,TcpExtTCPSackFailures=1698i,TcpExtTCPSackMerged=184668i,TcpExtTCPSackRecovery=97369i,TcpExtTCPSackRecoveryFail=381i,TcpExtTCPSackShiftFallback=2697079i,TcpExtTCPSackShifted=760299i,TcpExtTCPSchedulerFailed=0i,TcpExtTCPSlowStartRetrans=9276i,TcpExtTCPSpuriousRTOs=959i,TcpExtTCPSpuriousRtxHostQueues=2973i,TcpExtTCPSynRetrans=200970i,TcpExtTCPTSReorder=15221i,TcpExtTCPTimeWaitOverflow=0i,TcpExtTCPTimeouts=70127i,TcpExtTCPToZeroWindowAdv=4317i,TcpExtTCPWantZeroWindowAdv=2133i,TcpExtTW=24809813i,TcpExtTWKilled=0i,TcpExtTWRecycled=0i 1496460785000000000
|
||||
nstat,bu=linux,cls=server,dc=cer,env=production,host=hostname,name=snmp,sr=database IcmpInAddrMaskReps=0i,IcmpInAddrMasks=90i,IcmpInCsumErrors=0i,IcmpInDestUnreachs=284401i,IcmpInEchoReps=9i,IcmpInEchos=1761912i,IcmpInErrors=407i,IcmpInMsgs=2047767i,IcmpInParmProbs=0i,IcmpInRedirects=0i,IcmpInSrcQuenchs=0i,IcmpInTimeExcds=46i,IcmpInTimestampReps=0i,IcmpInTimestamps=1309i,IcmpMsgInType0=9i,IcmpMsgInType11=46i,IcmpMsgInType13=1309i,IcmpMsgInType17=90i,IcmpMsgInType3=284401i,IcmpMsgInType8=1761912i,IcmpMsgOutType0=1761912i,IcmpMsgOutType14=1248i,IcmpMsgOutType3=108709i,IcmpMsgOutType8=9i,IcmpOutAddrMaskReps=0i,IcmpOutAddrMasks=0i,IcmpOutDestUnreachs=108709i,IcmpOutEchoReps=1761912i,IcmpOutEchos=9i,IcmpOutErrors=0i,IcmpOutMsgs=1871878i,IcmpOutParmProbs=0i,IcmpOutRedirects=0i,IcmpOutSrcQuenchs=0i,IcmpOutTimeExcds=0i,IcmpOutTimestampReps=1248i,IcmpOutTimestamps=0i,IpDefaultTTL=64i,IpForwDatagrams=0i,IpForwarding=2i,IpFragCreates=0i,IpFragFails=0i,IpFragOKs=0i,IpInAddrErrors=0i,IpInDelivers=17658795773i,IpInDiscards=0i,IpInHdrErrors=0i,IpInReceives=17659269339i,IpInUnknownProtos=0i,IpOutDiscards=236976i,IpOutNoRoutes=1009i,IpOutRequests=23466783734i,IpReasmFails=0i,IpReasmOKs=0i,IpReasmReqds=0i,IpReasmTimeout=0i,TcpActiveOpens=23308977i,TcpAttemptFails=3757543i,TcpCurrEstab=280i,TcpEstabResets=184792i,TcpInCsumErrors=0i,TcpInErrs=232i,TcpInSegs=17536573089i,TcpMaxConn=-1i,TcpOutRsts=4051451i,TcpOutSegs=29836254873i,TcpPassiveOpens=176546974i,TcpRetransSegs=878085i,TcpRtoAlgorithm=1i,TcpRtoMax=120000i,TcpRtoMin=200i,UdpInCsumErrors=0i,UdpInDatagrams=24441661i,UdpInErrors=0i,UdpLiteInCsumErrors=0i,UdpLiteInDatagrams=0i,UdpLiteInErrors=0i,UdpLiteNoPorts=0i,UdpLiteOutDatagrams=0i,UdpLiteRcvbufErrors=0i,UdpLiteSndbufErrors=0i,UdpNoPorts=17660i,UdpOutDatagrams=51807896i,UdpRcvbufErrors=0i,UdpSndbufErrors=236922i 1496460785000000000
|
||||
`
|
||||
metrics, err := Parse([]byte(lp))
|
||||
require.NoError(t, err)
|
||||
r := NewReader(metrics)
|
||||
buf := make([]byte, 128)
|
||||
_, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
metrics, err = Parse(buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
)
|
||||
|
||||
97
plugins/aggregators/histogram/README.md
Normal file
97
plugins/aggregators/histogram/README.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# Histogram Aggregator Plugin
|
||||
|
||||
The histogram aggregator plugin creates histograms containing the counts of
|
||||
field values within a range.
|
||||
|
||||
Values added to a bucket are also added to the larger buckets in the
|
||||
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
|
||||
|
||||
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
|
||||
Bucket counts however are not reset between periods and will be non-strictly
|
||||
increasing while Telegraf is running.
|
||||
|
||||
#### Design
|
||||
|
||||
Each metric is passed to the aggregator and this aggregator searches
|
||||
histogram buckets for those fields, which have been specified in the
|
||||
config. If buckets are found, the aggregator will increment +1 to the appropriate
|
||||
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
|
||||
seconds this data will be forwarded to the outputs.
|
||||
|
||||
The algorithm of hit counting to buckets was implemented on the base
|
||||
of the algorithm which is implemented in the Prometheus
|
||||
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
|
||||
|
||||
### Configuration
|
||||
|
||||
```toml
|
||||
# Configuration for aggregate histogram metrics
|
||||
[[aggregators.histogram]]
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
```
|
||||
|
||||
The user is responsible for defining the bounds of the histogram bucket as
|
||||
well as the measurement name and fields to aggregate.
|
||||
|
||||
Each histogram config section must contain a `buckets` and `measurement_name`
|
||||
option. Optionally, if `fields` is set only the fields listed will be
|
||||
aggregated. If `fields` is not set all fields are aggregated.
|
||||
|
||||
The `buckets` option contains a list of floats which specify the bucket
|
||||
boundaries. Each float value defines the inclusive upper bound of the bucket.
|
||||
The `+Inf` bucket is added automatically and does not need to be defined.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
The postfix `bucket` will be added to each field key.
|
||||
|
||||
- measurement1
|
||||
- field1_bucket
|
||||
- field2_bucket
|
||||
|
||||
### Tags:
|
||||
|
||||
All measurements are given the tag `le`. This tag has the border value of
|
||||
bucket. It means that the metric value is less than or equal to the value of
|
||||
this tag. For example, let assume that we have the metric value 10 and the
|
||||
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
|
||||
10, because the metrics value is passed into bucket with right border value
|
||||
`10`.
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000
|
||||
cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000
|
||||
```
|
||||
315
plugins/aggregators/histogram/histogram.go
Normal file
315
plugins/aggregators/histogram/histogram.go
Normal file
@@ -0,0 +1,315 @@
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
// bucketTag is the tag, which contains right bucket border
|
||||
const bucketTag = "le"
|
||||
|
||||
// bucketInf is the right bucket border for infinite values
|
||||
const bucketInf = "+Inf"
|
||||
|
||||
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
|
||||
type HistogramAggregator struct {
|
||||
Configs []config `toml:"config"`
|
||||
|
||||
buckets bucketsByMetrics
|
||||
cache map[uint64]metricHistogramCollection
|
||||
}
|
||||
|
||||
// config is the config, which contains name, field of metric and histogram buckets.
|
||||
type config struct {
|
||||
Metric string `toml:"measurement_name"`
|
||||
Fields []string `toml:"fields"`
|
||||
Buckets buckets `toml:"buckets"`
|
||||
}
|
||||
|
||||
// bucketsByMetrics contains the buckets grouped by metric and field name
|
||||
type bucketsByMetrics map[string]bucketsByFields
|
||||
|
||||
// bucketsByFields contains the buckets grouped by field name
|
||||
type bucketsByFields map[string]buckets
|
||||
|
||||
// buckets contains the right borders buckets
|
||||
type buckets []float64
|
||||
|
||||
// metricHistogramCollection aggregates the histogram data
|
||||
type metricHistogramCollection struct {
|
||||
histogramCollection map[string]counts
|
||||
name string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
// counts is the number of hits in the bucket
|
||||
type counts []int64
|
||||
|
||||
// groupedByCountFields contains grouped fields by their count and fields values
|
||||
type groupedByCountFields struct {
|
||||
name string
|
||||
tags map[string]string
|
||||
fieldsWithCount map[string]int64
|
||||
}
|
||||
|
||||
// NewHistogramAggregator creates new histogram aggregator
|
||||
func NewHistogramAggregator() telegraf.Aggregator {
|
||||
h := &HistogramAggregator{}
|
||||
h.buckets = make(bucketsByMetrics)
|
||||
h.resetCache()
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## The period in which to flush the aggregator.
|
||||
period = "30s"
|
||||
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
|
||||
## Example config that aggregates all fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "cpu"
|
||||
|
||||
## Example config that aggregates only specific fields of the metric.
|
||||
# [[aggregators.histogram.config]]
|
||||
# ## The set of buckets.
|
||||
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
# ## The name of metric.
|
||||
# measurement_name = "diskio"
|
||||
# ## The concrete fields of metric
|
||||
# fields = ["io_time", "read_time", "write_time"]
|
||||
`
|
||||
|
||||
// SampleConfig returns sample of config
|
||||
func (h *HistogramAggregator) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns description of aggregator plugin
|
||||
func (h *HistogramAggregator) Description() string {
|
||||
return "Create aggregate histograms."
|
||||
}
|
||||
|
||||
// Add adds new hit to the buckets
|
||||
func (h *HistogramAggregator) Add(in telegraf.Metric) {
|
||||
bucketsByField := make(map[string][]float64)
|
||||
for field := range in.Fields() {
|
||||
buckets := h.getBuckets(in.Name(), field)
|
||||
if buckets != nil {
|
||||
bucketsByField[field] = buckets
|
||||
}
|
||||
}
|
||||
|
||||
if len(bucketsByField) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
id := in.HashID()
|
||||
agr, ok := h.cache[id]
|
||||
if !ok {
|
||||
agr = metricHistogramCollection{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
histogramCollection: make(map[string]counts),
|
||||
}
|
||||
}
|
||||
|
||||
for field, value := range in.Fields() {
|
||||
if buckets, ok := bucketsByField[field]; ok {
|
||||
if agr.histogramCollection[field] == nil {
|
||||
agr.histogramCollection[field] = make(counts, len(buckets)+1)
|
||||
}
|
||||
|
||||
if value, ok := convert(value); ok {
|
||||
index := sort.SearchFloat64s(buckets, value)
|
||||
agr.histogramCollection[field][index]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h.cache[id] = agr
|
||||
}
|
||||
|
||||
// Push returns histogram values for metrics
|
||||
func (h *HistogramAggregator) Push(acc telegraf.Accumulator) {
|
||||
metricsWithGroupedFields := []groupedByCountFields{}
|
||||
|
||||
for _, aggregate := range h.cache {
|
||||
for field, counts := range aggregate.histogramCollection {
|
||||
h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts)
|
||||
}
|
||||
}
|
||||
|
||||
for _, metric := range metricsWithGroupedFields {
|
||||
acc.AddFields(metric.name, makeFieldsWithCount(metric.fieldsWithCount), metric.tags)
|
||||
}
|
||||
}
|
||||
|
||||
// groupFieldsByBuckets groups fields by metric buckets which are represented as tags
|
||||
func (h *HistogramAggregator) groupFieldsByBuckets(
|
||||
metricsWithGroupedFields *[]groupedByCountFields,
|
||||
name string,
|
||||
field string,
|
||||
tags map[string]string,
|
||||
counts []int64,
|
||||
) {
|
||||
count := int64(0)
|
||||
for index, bucket := range h.getBuckets(name, field) {
|
||||
count += counts[index]
|
||||
|
||||
tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64)
|
||||
h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags))
|
||||
}
|
||||
|
||||
count += counts[len(counts)-1]
|
||||
tags[bucketTag] = bucketInf
|
||||
|
||||
h.groupField(metricsWithGroupedFields, name, field, count, tags)
|
||||
}
|
||||
|
||||
// groupField groups field by count value
|
||||
func (h *HistogramAggregator) groupField(
|
||||
metricsWithGroupedFields *[]groupedByCountFields,
|
||||
name string,
|
||||
field string,
|
||||
count int64,
|
||||
tags map[string]string,
|
||||
) {
|
||||
for key, metric := range *metricsWithGroupedFields {
|
||||
if name == metric.name && isTagsIdentical(tags, metric.tags) {
|
||||
(*metricsWithGroupedFields)[key].fieldsWithCount[field] = count
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fieldsWithCount := map[string]int64{
|
||||
field: count,
|
||||
}
|
||||
|
||||
*metricsWithGroupedFields = append(
|
||||
*metricsWithGroupedFields,
|
||||
groupedByCountFields{name: name, tags: tags, fieldsWithCount: fieldsWithCount},
|
||||
)
|
||||
}
|
||||
|
||||
// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has
|
||||
// small value, we will get a histogram with a small amount of the distribution.
|
||||
func (h *HistogramAggregator) Reset() {}
|
||||
|
||||
// resetCache resets cached counts(hits) in the buckets
|
||||
func (h *HistogramAggregator) resetCache() {
|
||||
h.cache = make(map[uint64]metricHistogramCollection)
|
||||
}
|
||||
|
||||
// getBuckets finds buckets and returns them
|
||||
func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 {
|
||||
if buckets, ok := h.buckets[metric][field]; ok {
|
||||
return buckets
|
||||
}
|
||||
|
||||
for _, config := range h.Configs {
|
||||
if config.Metric == metric {
|
||||
if !isBucketExists(field, config) {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := h.buckets[metric]; !ok {
|
||||
h.buckets[metric] = make(bucketsByFields)
|
||||
}
|
||||
|
||||
h.buckets[metric][field] = sortBuckets(config.Buckets)
|
||||
}
|
||||
}
|
||||
|
||||
return h.buckets[metric][field]
|
||||
}
|
||||
|
||||
// isBucketExists checks if buckets exists for the passed field
|
||||
func isBucketExists(field string, cfg config) bool {
|
||||
if len(cfg.Fields) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, fl := range cfg.Fields {
|
||||
if fl == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// sortBuckets sorts the buckets if it is needed
|
||||
func sortBuckets(buckets []float64) []float64 {
|
||||
for i, bucket := range buckets {
|
||||
if i < len(buckets)-1 && bucket >= buckets[i+1] {
|
||||
sort.Float64s(buckets)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return buckets
|
||||
}
|
||||
|
||||
// convert converts interface to concrete type
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
// copyTags copies tags
|
||||
func copyTags(tags map[string]string) map[string]string {
|
||||
copiedTags := map[string]string{}
|
||||
for key, val := range tags {
|
||||
copiedTags[key] = val
|
||||
}
|
||||
|
||||
return copiedTags
|
||||
}
|
||||
|
||||
// isTagsIdentical checks the identity of two list of tags
|
||||
func isTagsIdentical(originalTags, checkedTags map[string]string) bool {
|
||||
if len(originalTags) != len(checkedTags) {
|
||||
return false
|
||||
}
|
||||
|
||||
for tagName, tagValue := range originalTags {
|
||||
if tagValue != checkedTags[tagName] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// makeFieldsWithCount assigns count value to all metric fields
|
||||
func makeFieldsWithCount(fieldsWithCountIn map[string]int64) map[string]interface{} {
|
||||
fieldsWithCountOut := map[string]interface{}{}
|
||||
for field, count := range fieldsWithCountIn {
|
||||
fieldsWithCountOut[field+"_bucket"] = count
|
||||
}
|
||||
|
||||
return fieldsWithCountOut
|
||||
}
|
||||
|
||||
// init initializes histogram aggregator plugin
|
||||
func init() {
|
||||
aggregators.Add("histogram", func() telegraf.Aggregator {
|
||||
return NewHistogramAggregator()
|
||||
})
|
||||
}
|
||||
210
plugins/aggregators/histogram/histogram_test.go
Normal file
210
plugins/aggregators/histogram/histogram_test.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// NewTestHistogram creates new test histogram aggregation with specified config
|
||||
func NewTestHistogram(cfg []config) telegraf.Aggregator {
|
||||
htm := &HistogramAggregator{Configs: cfg}
|
||||
htm.buckets = make(bucketsByMetrics)
|
||||
htm.resetCache()
|
||||
|
||||
return htm
|
||||
}
|
||||
|
||||
// firstMetric1 is the first test metric
|
||||
var firstMetric1, _ = metric.New(
|
||||
"first_metric_name",
|
||||
map[string]string{"tag_name": "tag_value"},
|
||||
map[string]interface{}{
|
||||
"a": float64(15.3),
|
||||
"b": float64(40),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
// firstMetric1 is the first test metric with other value
|
||||
var firstMetric2, _ = metric.New(
|
||||
"first_metric_name",
|
||||
map[string]string{"tag_name": "tag_value"},
|
||||
map[string]interface{}{
|
||||
"a": float64(15.9),
|
||||
"c": float64(40),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
// secondMetric is the second metric
|
||||
var secondMetric, _ = metric.New(
|
||||
"second_metric_name",
|
||||
map[string]string{"tag_name": "tag_value"},
|
||||
map[string]interface{}{
|
||||
"a": float64(105),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
// BenchmarkApply runs benchmarks
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
histogram := NewHistogramAggregator()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Add(secondMetric)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
|
||||
func TestHistogramWithPeriodAndOneField(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Push(acc)
|
||||
|
||||
if len(acc.Metrics) != 6 {
|
||||
assert.Fail(t, "Incorrect number of metrics")
|
||||
}
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf)
|
||||
}
|
||||
|
||||
// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields
|
||||
func TestHistogramWithPeriodAndAllFields(t *testing.T) {
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
|
||||
cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Add(secondMetric)
|
||||
histogram.Push(acc)
|
||||
|
||||
if len(acc.Metrics) != 12 {
|
||||
assert.Fail(t, "Incorrect number of metrics")
|
||||
}
|
||||
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
|
||||
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf)
|
||||
}
|
||||
|
||||
// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
|
||||
// getting added in different periods) for all fields
|
||||
func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) {
|
||||
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
histogram.Add(firstMetric1)
|
||||
histogram.Push(acc)
|
||||
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf)
|
||||
|
||||
acc.ClearMetrics()
|
||||
histogram.Add(firstMetric2)
|
||||
histogram.Push(acc)
|
||||
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
|
||||
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
|
||||
}
|
||||
|
||||
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
|
||||
func TestWrongBucketsOrder(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
assert.Equal(
|
||||
t,
|
||||
"histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a",
|
||||
fmt.Sprint(r),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
var cfg []config
|
||||
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
|
||||
histogram := NewTestHistogram(cfg)
|
||||
histogram.Add(firstMetric2)
|
||||
}
|
||||
|
||||
// assertContainsTaggedField is help functions to test histogram data
|
||||
func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) {
|
||||
acc.Lock()
|
||||
defer acc.Unlock()
|
||||
|
||||
for _, checkedMetric := range acc.Metrics {
|
||||
// check metric name
|
||||
if checkedMetric.Measurement != metricName {
|
||||
continue
|
||||
}
|
||||
|
||||
// check "le" tag
|
||||
if checkedMetric.Tags[bucketTag] != le {
|
||||
continue
|
||||
}
|
||||
|
||||
// check fields
|
||||
isFieldsIdentical := true
|
||||
for field := range fields {
|
||||
if _, ok := checkedMetric.Fields[field]; !ok {
|
||||
isFieldsIdentical = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isFieldsIdentical {
|
||||
continue
|
||||
}
|
||||
|
||||
// check fields with their counts
|
||||
if assert.Equal(t, fields, checkedMetric.Fields) {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName))
|
||||
}
|
||||
|
||||
assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields))
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
# Example Input Plugin
|
||||
|
||||
The example plugin gathers metrics about example things
|
||||
The example plugin gathers metrics about example things. This description
|
||||
explains at a high level what the plugin does and provides links to where
|
||||
additional information can be found.
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -12,7 +14,8 @@ The example plugin gathers metrics about example things
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
<optional description>
|
||||
Here you should add an optional description and links to where the user can
|
||||
get more information about the measurements.
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
@@ -27,11 +30,14 @@ The example plugin gathers metrics about example things
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
|
||||
### Sample Queries:
|
||||
|
||||
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
|
||||
This section should contain some useful InfluxDB queries that can be used to
|
||||
get started with the plugin or to generate dashboards. For each query listed,
|
||||
describe at a high level what data is returned.
|
||||
|
||||
Get the max, mean, and min for the measurement in the last hour:
|
||||
```
|
||||
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
|
||||
```
|
||||
@@ -39,7 +45,6 @@ SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar A
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
as "github.com/aerospike/aerospike-client-go"
|
||||
@@ -41,17 +40,16 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := errchan.New(len(a.Servers))
|
||||
wg.Add(len(a.Servers))
|
||||
for _, server := range a.Servers {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
errChan.C <- a.gatherServer(serv, acc)
|
||||
acc.AddError(a.gatherServer(serv, acc))
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
|
||||
@@ -75,10 +73,9 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
for _, n := range nodes {
|
||||
tags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
stats, err := as.RequestNodeStats(n)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -88,7 +85,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
if err == nil {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v)
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
@@ -102,11 +99,10 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
for _, namespace := range namespaces {
|
||||
nTags := map[string]string{
|
||||
"aerospike_host": hostport,
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
nTags["namespace"] = namespace
|
||||
nFields := map[string]interface{}{
|
||||
"node_name": n.GetName(),
|
||||
}
|
||||
nFields := make(map[string]interface{})
|
||||
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -121,7 +117,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
if err == nil {
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1])
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||
|
||||
@@ -19,12 +19,14 @@ func TestAerospikeStatistics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := a.Gather(&acc)
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasTag("aerospike_node", "node_name"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
|
||||
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
@@ -41,12 +43,11 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := a.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
require.Error(t, acc.GatherError(a.Gather))
|
||||
|
||||
assert.True(t, acc.HasMeasurement("aerospike_node"))
|
||||
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
|
||||
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
|
||||
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
|
||||
}
|
||||
|
||||
func TestAerospikeParseValue(t *testing.T) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
@@ -14,12 +15,15 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
|
||||
@@ -28,10 +32,13 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
@@ -39,6 +46,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
@@ -49,6 +57,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
@@ -63,21 +72,26 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_services"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zipkin"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
)
|
||||
|
||||
47
plugins/inputs/amqp_consumer/README.md
Normal file
47
plugins/inputs/amqp_consumer/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# AMQP Consumer Input Plugin
|
||||
|
||||
This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
|
||||
|
||||
Metrics are read from a topic exchange using the configured queue and binding_key.
|
||||
|
||||
Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
For an introduction to AMQP see:
|
||||
- https://www.rabbitmq.com/tutorials/amqp-concepts.html
|
||||
- https://www.rabbitmq.com/getstarted.html
|
||||
|
||||
The following defaults are known to work with RabbitMQ:
|
||||
|
||||
```toml
|
||||
# AMQP consumer plugin
|
||||
[[inputs.amqp_consumer]]
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
exchange = "telegraf"
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Controls how many messages the server will try to keep on the network
|
||||
## for consumers before receiving delivery acks.
|
||||
#prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported.
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
280
plugins/inputs/amqp_consumer/amqp_consumer.go
Normal file
280
plugins/inputs/amqp_consumer/amqp_consumer.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package amqp_consumer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/streadway/amqp"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
// AMQPConsumer is the top level struct for this plugin
|
||||
type AMQPConsumer struct {
|
||||
URL string
|
||||
// AMQP exchange
|
||||
Exchange string
|
||||
// Queue Name
|
||||
Queue string
|
||||
// Binding Key
|
||||
BindingKey string `toml:"binding_key"`
|
||||
|
||||
// Controls how many messages the server will try to keep on the network
|
||||
// for consumers before receiving delivery acks.
|
||||
PrefetchCount int
|
||||
|
||||
// AMQP Auth method
|
||||
AuthMethod string
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
parser parsers.Parser
|
||||
conn *amqp.Connection
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
type externalAuth struct{}
|
||||
|
||||
func (a *externalAuth) Mechanism() string {
|
||||
return "EXTERNAL"
|
||||
}
|
||||
func (a *externalAuth) Response() string {
|
||||
return fmt.Sprintf("\000")
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultAuthMethod = "PLAIN"
|
||||
DefaultPrefetchCount = 50
|
||||
)
|
||||
|
||||
func (a *AMQPConsumer) SampleConfig() string {
|
||||
return `
|
||||
## AMQP url
|
||||
url = "amqp://localhost:5672/influxdb"
|
||||
## AMQP exchange
|
||||
exchange = "telegraf"
|
||||
## AMQP queue name
|
||||
queue = "telegraf"
|
||||
## Binding Key
|
||||
binding_key = "#"
|
||||
|
||||
## Maximum number of messages server should give to the worker.
|
||||
prefetch_count = 50
|
||||
|
||||
## Auth method. PLAIN and EXTERNAL are supported
|
||||
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
|
||||
## described here: https://www.rabbitmq.com/plugins.html
|
||||
# auth_method = "PLAIN"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) Description() string {
|
||||
return "AMQP consumer plugin"
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) SetParser(parser parsers.Parser) {
|
||||
a.parser = parser
|
||||
}
|
||||
|
||||
// All gathering is done in the Start function
|
||||
func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
|
||||
// make new tls config
|
||||
tls, err := internal.GetTLSConfig(
|
||||
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse auth method
|
||||
var sasl []amqp.Authentication // nil by default
|
||||
|
||||
if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
|
||||
sasl = []amqp.Authentication{&externalAuth{}}
|
||||
}
|
||||
|
||||
config := amqp.Config{
|
||||
TLSClientConfig: tls,
|
||||
SASL: sasl, // if nil, it will be PLAIN
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// Start satisfies the telegraf.ServiceInput interface
|
||||
func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
|
||||
amqpConf, err := a.createConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.wg = &sync.WaitGroup{}
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
|
||||
go func() {
|
||||
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
|
||||
for {
|
||||
msgs, err := a.connect(amqpConf)
|
||||
if err != nil {
|
||||
log.Printf("E! AMQP connection failed: %s", err)
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
a.wg.Add(1)
|
||||
go a.process(msgs, acc)
|
||||
break
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
|
||||
conn, err := amqp.DialConfig(a.URL, *amqpConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.conn = conn
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to open a channel: %s", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
a.Exchange, // name
|
||||
"topic", // type
|
||||
true, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||
}
|
||||
|
||||
q, err := ch.QueueDeclare(
|
||||
a.Queue, // queue
|
||||
true, // durable
|
||||
false, // delete when unused
|
||||
false, // exclusive
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to declare a queue: %s", err)
|
||||
}
|
||||
|
||||
err = ch.QueueBind(
|
||||
q.Name, // queue
|
||||
a.BindingKey, // binding-key
|
||||
a.Exchange, // exchange
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to bind a queue: %s", err)
|
||||
}
|
||||
|
||||
err = ch.Qos(
|
||||
a.PrefetchCount,
|
||||
0, // prefetch-size
|
||||
false, // global
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to set QoS: %s", err)
|
||||
}
|
||||
|
||||
msgs, err := ch.Consume(
|
||||
q.Name, // queue
|
||||
"", // consumer
|
||||
false, // auto-ack
|
||||
false, // exclusive
|
||||
false, // no-local
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed establishing connection to queue: %s", err)
|
||||
}
|
||||
|
||||
log.Println("I! Started AMQP consumer")
|
||||
return msgs, err
|
||||
}
|
||||
|
||||
// Read messages from queue and add them to the Accumulator
|
||||
func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
|
||||
defer a.wg.Done()
|
||||
for d := range msgs {
|
||||
metrics, err := a.parser.Parse(d.Body)
|
||||
if err != nil {
|
||||
log.Printf("E! %v: error parsing metric - %v", err, string(d.Body))
|
||||
} else {
|
||||
for _, m := range metrics {
|
||||
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
}
|
||||
|
||||
d.Ack(false)
|
||||
}
|
||||
log.Printf("I! AMQP consumer queue closed")
|
||||
}
|
||||
|
||||
func (a *AMQPConsumer) Stop() {
|
||||
err := a.conn.Close()
|
||||
if err != nil && err != amqp.ErrClosed {
|
||||
log.Printf("E! Error closing AMQP connection: %s", err)
|
||||
return
|
||||
}
|
||||
a.wg.Wait()
|
||||
log.Println("I! Stopped AMQP service")
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("amqp_consumer", func() telegraf.Input {
|
||||
return &AMQPConsumer{
|
||||
AuthMethod: DefaultAuthMethod,
|
||||
PrefetchCount: DefaultPrefetchCount,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,55 +1,84 @@
|
||||
# Telegraf plugin: Apache
|
||||
# Apache Input Plugin
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||
- **username** string: Username for HTTP basic authentication
|
||||
- **password** string: Password for HTTP basic authentication
|
||||
- **timeout** duration: time that the HTTP connection will remain waiting for response. Defalt 4 seconds ("4s")
|
||||
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
|
||||
|
||||
##### Optional SSL Config
|
||||
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documenation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
|
||||
|
||||
- **ssl_ca** string: the full path for the SSL CA certicate
|
||||
- **ssl_cert** string: the full path for the SSL certificate
|
||||
- **ssl_key** string: the full path for the key file
|
||||
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
|
||||
### Configuration:
|
||||
|
||||
#### Description
|
||||
```toml
|
||||
# Read Apache status information (mod_status)
|
||||
[[inputs.apache]]
|
||||
## An array of URLs to gather from, must be directed at the machine
|
||||
## readable version of the mod_status page including the auto query string.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
|
||||
The Apache plugin collects from the /server-status?auto URL. See
|
||||
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
|
||||
example. And
|
||||
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
|
||||
mod_status documentation.
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
|
||||
# Measurements:
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
Meta:
|
||||
- tags: `port=<port>`, `server=url`
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
- apache_TotalAccesses
|
||||
- apache_TotalkBytes
|
||||
- apache_CPULoad
|
||||
- apache_Uptime
|
||||
- apache_ReqPerSec
|
||||
- apache_BytesPerSec
|
||||
- apache_BytesPerReq
|
||||
- apache_BusyWorkers
|
||||
- apache_IdleWorkers
|
||||
- apache_ConnsTotal
|
||||
- apache_ConnsAsyncWriting
|
||||
- apache_ConnsAsyncKeepAlive
|
||||
- apache_ConnsAsyncClosing
|
||||
### Measurements & Fields:
|
||||
|
||||
### Scoreboard measurements
|
||||
- apache
|
||||
- BusyWorkers (float)
|
||||
- BytesPerReq (float)
|
||||
- BytesPerSec (float)
|
||||
- ConnsAsyncClosing (float)
|
||||
- ConnsAsyncKeepAlive (float)
|
||||
- ConnsAsyncWriting (float)
|
||||
- ConnsTotal (float)
|
||||
- CPUChildrenSystem (float)
|
||||
- CPUChildrenUser (float)
|
||||
- CPULoad (float)
|
||||
- CPUSystem (float)
|
||||
- CPUUser (float)
|
||||
- IdleWorkers (float)
|
||||
- Load1 (float)
|
||||
- Load5 (float)
|
||||
- Load15 (float)
|
||||
- ParentServerConfigGeneration (float)
|
||||
- ParentServerMPMGeneration (float)
|
||||
- ReqPerSec (float)
|
||||
- ServerUptimeSeconds (float)
|
||||
- TotalAccesses (float)
|
||||
- TotalkBytes (float)
|
||||
- Uptime (float)
|
||||
|
||||
- apache_scboard_waiting
|
||||
- apache_scboard_starting
|
||||
- apache_scboard_reading
|
||||
- apache_scboard_sending
|
||||
- apache_scboard_keepalive
|
||||
- apache_scboard_dnslookup
|
||||
- apache_scboard_closing
|
||||
- apache_scboard_logging
|
||||
- apache_scboard_finishing
|
||||
- apache_scboard_idle_cleanup
|
||||
- apache_scboard_open
|
||||
The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state:
|
||||
|
||||
- apache
|
||||
- scboard_closing (float)
|
||||
- scboard_dnslookup (float)
|
||||
- scboard_finishing (float)
|
||||
- scboard_idle_cleanup (float)
|
||||
- scboard_keepalive (float)
|
||||
- scboard_logging (float)
|
||||
- scboard_open (float)
|
||||
- scboard_reading (float)
|
||||
- scboard_sending (float)
|
||||
- scboard_starting (float)
|
||||
- scboard_waiting (float)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- port
|
||||
- server
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
|
||||
```
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -28,18 +29,22 @@ type Apache struct {
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of Apache status URI to gather stats.
|
||||
## An array of URLs to gather from, must be directed at the machine
|
||||
## readable version of the mod_status page including the auto query string.
|
||||
## Default is "http://localhost/server-status?auto".
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
## user credentials for basic HTTP authentication
|
||||
username = "myuser"
|
||||
password = "mypassword"
|
||||
|
||||
## Timeout to the complete conection and reponse time in seconds
|
||||
response_timeout = "25s" ## default to 5 seconds
|
||||
## Credentials for basic HTTP authentication.
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
|
||||
## Maximum time to receive response.
|
||||
# response_timeout = "5s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -65,55 +70,51 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
n.ResponseTimeout.Duration = time.Second * 5
|
||||
}
|
||||
|
||||
var outerr error
|
||||
var errch = make(chan error)
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
go func(addr *url.URL) {
|
||||
errch <- n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
// Drain channel, waiting for all requests to finish and save last error.
|
||||
for range n.Urls {
|
||||
if err := <-errch; err != nil {
|
||||
outerr = err
|
||||
}
|
||||
}
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
|
||||
var tr *http.Transport
|
||||
|
||||
if addr.Scheme == "https" {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if n.client == nil {
|
||||
client, err := n.createHttpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
TLSClientConfig: tlsCfg,
|
||||
}
|
||||
} else {
|
||||
tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
n.client = client
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(n.Urls))
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
|
||||
continue
|
||||
}
|
||||
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
acc.AddError(n.gatherUrl(addr, acc))
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) createHttpClient() (*http.Client, error) {
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsCfg,
|
||||
},
|
||||
Timeout: n.ResponseTimeout.Duration,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
req, err := http.NewRequest("GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
|
||||
@@ -123,7 +124,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
req.SetBasicAuth(n.Username, n.Password)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := n.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestHTTPApache(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
err := acc.GatherError(a.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
@@ -70,7 +70,7 @@ Using this configuration:
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter bcache -test
|
||||
./telegraf --config telegraf.conf --input-filter bcache --test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -123,8 +122,8 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out)
|
||||
j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,8 +154,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
addCassandraMetric(k, c, v.(map[string]interface{}))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -164,8 +163,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
|
||||
c, values.(map[string]interface{}))
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -274,8 +273,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||
} else {
|
||||
// unsupported metric type
|
||||
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
|
||||
metric)
|
||||
acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping",
|
||||
metric))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -283,7 +282,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||
serverTokens["port"] + context + metric)
|
||||
if err != nil {
|
||||
return err
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
|
||||
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||
@@ -291,8 +291,12 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
out, err := c.getAttr(requestUrl)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
continue
|
||||
}
|
||||
if out["status"] != 200.0 {
|
||||
fmt.Printf("URL returned with status %v\n", out["status"])
|
||||
acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl))
|
||||
continue
|
||||
}
|
||||
m.addTagsFields(out)
|
||||
|
||||
@@ -151,7 +151,7 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
@@ -180,7 +180,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
@@ -197,16 +197,17 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
func TestHttp404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
|
||||
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
|
||||
[]string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := jolokia.Gather(&acc)
|
||||
err := acc.GatherError(jolokia.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
assert.Contains(t, err.Error(), "has status code 404")
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Cassandra
|
||||
@@ -214,7 +215,7 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
@@ -246,7 +247,7 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
err := acc.GatherError(cassandra.Gather)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
@@ -117,7 +117,7 @@ All fields are collected under the **ceph** measurement and stored as float64s.
|
||||
* recovering\_objects\_per\_sec (float)
|
||||
|
||||
* ceph\_pgmap\_state
|
||||
* state name e.g. active+clean (float)
|
||||
* count (float)
|
||||
|
||||
* ceph\_usage
|
||||
* bytes\_used (float)
|
||||
@@ -186,7 +186,7 @@ All measurements will have the following tags:
|
||||
|
||||
*Cluster Stats*
|
||||
|
||||
* ceph\_pg\_state has the following tags:
|
||||
* ceph\_pgmap\_state has the following tags:
|
||||
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
|
||||
* ceph\_pool\_usage has the following tags:
|
||||
* id
|
||||
@@ -200,7 +200,7 @@ All measurements will have the following tags:
|
||||
*Admin Socket Stats*
|
||||
|
||||
<pre>
|
||||
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
|
||||
telegraf --config /etc/telegraf/telegraf.conf --config-directory /etc/telegraf/telegraf.d --input-filter ceph --test
|
||||
* Plugin: ceph, Collection 1
|
||||
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
|
||||
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
|
||||
@@ -213,7 +213,8 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
|
||||
<pre>
|
||||
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
|
||||
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0,state=active+clean count=22952 1468928660000000000
|
||||
> ceph_pgmap_state,host=ceph-mon-0,state=active+degraded count=16 1468928660000000000
|
||||
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
|
||||
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
|
||||
|
||||
@@ -4,13 +4,14 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -100,15 +101,15 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||
for _, s := range sockets {
|
||||
dump, err := perfDump(c.CephBinary, s)
|
||||
if err != nil {
|
||||
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
|
||||
acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
|
||||
continue
|
||||
}
|
||||
data, err := parseDump(dump)
|
||||
if err != nil {
|
||||
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
|
||||
acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
|
||||
continue
|
||||
}
|
||||
for tag, metrics := range *data {
|
||||
for tag, metrics := range data {
|
||||
acc.AddFields(measurement,
|
||||
map[string]interface{}(metrics),
|
||||
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
|
||||
@@ -244,25 +245,19 @@ type taggedMetricMap map[string]metricMap
|
||||
|
||||
// Parses a raw JSON string into a taggedMetricMap
|
||||
// Delegates the actual parsing to newTaggedMetricMap(..)
|
||||
func parseDump(dump string) (*taggedMetricMap, error) {
|
||||
func parseDump(dump string) (taggedMetricMap, error) {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(dump), &data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
|
||||
}
|
||||
|
||||
tmm := newTaggedMetricMap(data)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
|
||||
}
|
||||
|
||||
return tmm, nil
|
||||
return newTaggedMetricMap(data), nil
|
||||
}
|
||||
|
||||
// Builds a TaggedMetricMap out of a generic string map.
|
||||
// The top-level key is used as a tag and all sub-keys are flattened into metrics
|
||||
func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||
func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap {
|
||||
tmm := make(taggedMetricMap)
|
||||
for tag, datapoints := range data {
|
||||
mm := make(metricMap)
|
||||
@@ -271,7 +266,7 @@ func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||
}
|
||||
tmm[tag] = mm
|
||||
}
|
||||
return &tmm
|
||||
return tmm
|
||||
}
|
||||
|
||||
// Recursively flattens any k-v hierarchy present in data.
|
||||
@@ -376,36 +371,53 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
func extractPgmapStates(data map[string]interface{}) ([]interface{}, error) {
|
||||
const key = "pgs_by_state"
|
||||
|
||||
pgmap, ok := data["pgmap"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
return nil, fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range pgmap {
|
||||
switch value.(type) {
|
||||
case []interface{}:
|
||||
if key != "pgs_by_state" {
|
||||
continue
|
||||
}
|
||||
for _, state := range value.([]interface{}) {
|
||||
state_map, ok := state.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
|
||||
}
|
||||
state_name, ok := state_map["state_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
|
||||
}
|
||||
state_count, ok := state_map["count"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
|
||||
}
|
||||
fields[state_name] = state_count
|
||||
}
|
||||
|
||||
s, ok := pgmap[key]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("WARNING %s - pgmap is missing the %s field", measurement, key)
|
||||
}
|
||||
|
||||
states, ok := s.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("WARNING %s - pgmap[%s] is not a list", measurement, key)
|
||||
}
|
||||
return states, nil
|
||||
}
|
||||
|
||||
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
|
||||
states, err := extractPgmapStates(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, state := range states {
|
||||
stateMap, ok := state.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
|
||||
}
|
||||
stateName, ok := stateMap["state_name"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
|
||||
}
|
||||
stateCount, ok := stateMap["count"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"state": stateName,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"count": stateCount,
|
||||
}
|
||||
acc.AddFields("ceph_pgmap_state", fields, tags)
|
||||
}
|
||||
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
package ceph
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,15 +26,38 @@ func TestParseSockId(t *testing.T) {
|
||||
func TestParseMonDump(t *testing.T) {
|
||||
dump, err := parseDump(monPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
|
||||
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
|
||||
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
|
||||
}
|
||||
|
||||
func TestParseOsdDump(t *testing.T) {
|
||||
dump, err := parseDump(osdPerfDump)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||
assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||
assert.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||
assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||
}
|
||||
|
||||
func TestDecodeStatusPgmapState(t *testing.T) {
|
||||
data := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(clusterStatusDump), &data)
|
||||
assert.NoError(t, err)
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err = decodeStatusPgmapState(acc, data)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var results = []struct {
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{map[string]interface{}{"count": float64(2560)}, map[string]string{"state": "active+clean"}},
|
||||
{map[string]interface{}{"count": float64(10)}, map[string]string{"state": "active+scrubbing"}},
|
||||
{map[string]interface{}{"count": float64(5)}, map[string]string{"state": "active+backfilling"}},
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
acc.AssertContainsTaggedFields(t, "ceph_pgmap_state", r.fields, r.tags)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
@@ -685,3 +710,127 @@ var osdPerfDump = `
|
||||
"wait": { "avgcount": 0,
|
||||
"sum": 0.000000000}}}
|
||||
`
|
||||
var clusterStatusDump = `
|
||||
{
|
||||
"health": {
|
||||
"health": {
|
||||
"health_services": [
|
||||
{
|
||||
"mons": [
|
||||
{
|
||||
"name": "a",
|
||||
"kb_total": 114289256,
|
||||
"kb_used": 26995516,
|
||||
"kb_avail": 81465132,
|
||||
"avail_percent": 71,
|
||||
"last_updated": "2017-01-03 17:20:57.595004",
|
||||
"store_stats": {
|
||||
"bytes_total": 942117141,
|
||||
"bytes_sst": 0,
|
||||
"bytes_log": 4345406,
|
||||
"bytes_misc": 937771735,
|
||||
"last_updated": "0.000000"
|
||||
},
|
||||
"health": "HEALTH_OK"
|
||||
},
|
||||
{
|
||||
"name": "b",
|
||||
"kb_total": 114289256,
|
||||
"kb_used": 27871624,
|
||||
"kb_avail": 80589024,
|
||||
"avail_percent": 70,
|
||||
"last_updated": "2017-01-03 17:20:47.784331",
|
||||
"store_stats": {
|
||||
"bytes_total": 454853104,
|
||||
"bytes_sst": 0,
|
||||
"bytes_log": 5788320,
|
||||
"bytes_misc": 449064784,
|
||||
"last_updated": "0.000000"
|
||||
},
|
||||
"health": "HEALTH_OK"
|
||||
},
|
||||
{
|
||||
"name": "c",
|
||||
"kb_total": 130258508,
|
||||
"kb_used": 38076996,
|
||||
"kb_avail": 85541692,
|
||||
"avail_percent": 65,
|
||||
"last_updated": "2017-01-03 17:21:03.311123",
|
||||
"store_stats": {
|
||||
"bytes_total": 455555199,
|
||||
"bytes_sst": 0,
|
||||
"bytes_log": 6950876,
|
||||
"bytes_misc": 448604323,
|
||||
"last_updated": "0.000000"
|
||||
},
|
||||
"health": "HEALTH_OK"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"timechecks": {
|
||||
"epoch": 504,
|
||||
"round": 34642,
|
||||
"round_status": "finished",
|
||||
"mons": [
|
||||
{ "name": "a", "skew": 0, "latency": 0, "health": "HEALTH_OK" },
|
||||
{ "name": "b", "skew": -0, "latency": 0.000951, "health": "HEALTH_OK" },
|
||||
{ "name": "c", "skew": -0, "latency": 0.000946, "health": "HEALTH_OK" }
|
||||
]
|
||||
},
|
||||
"summary": [],
|
||||
"overall_status": "HEALTH_OK",
|
||||
"detail": []
|
||||
},
|
||||
"fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
|
||||
"election_epoch": 504,
|
||||
"quorum": [ 0, 1, 2 ],
|
||||
"quorum_names": [ "a", "b", "c" ],
|
||||
"monmap": {
|
||||
"epoch": 17,
|
||||
"fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
|
||||
"modified": "2016-04-11 14:01:52.600198",
|
||||
"created": "0.000000",
|
||||
"mons": [
|
||||
{ "rank": 0, "name": "a", "addr": "192.168.0.1:6789/0" },
|
||||
{ "rank": 1, "name": "b", "addr": "192.168.0.2:6789/0" },
|
||||
{ "rank": 2, "name": "c", "addr": "192.168.0.3:6789/0" }
|
||||
]
|
||||
},
|
||||
"osdmap": {
|
||||
"osdmap": {
|
||||
"epoch": 21734,
|
||||
"num_osds": 24,
|
||||
"num_up_osds": 24,
|
||||
"num_in_osds": 24,
|
||||
"full": false,
|
||||
"nearfull": false,
|
||||
"num_remapped_pgs": 0
|
||||
}
|
||||
},
|
||||
"pgmap": {
|
||||
"pgs_by_state": [
|
||||
{ "state_name": "active+clean", "count": 2560 },
|
||||
{ "state_name": "active+scrubbing", "count": 10 },
|
||||
{ "state_name": "active+backfilling", "count": 5 }
|
||||
],
|
||||
"version": 52314277,
|
||||
"num_pgs": 2560,
|
||||
"data_bytes": 2700031960713,
|
||||
"bytes_used": 7478347665408,
|
||||
"bytes_avail": 9857462382592,
|
||||
"bytes_total": 17335810048000,
|
||||
"read_bytes_sec": 0,
|
||||
"write_bytes_sec": 367217,
|
||||
"op_per_sec": 98
|
||||
},
|
||||
"mdsmap": {
|
||||
"epoch": 1,
|
||||
"up": 0,
|
||||
"in": 0,
|
||||
"max": 0,
|
||||
"by_rank": []
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
@@ -22,10 +22,11 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
for dir := range list {
|
||||
if dir.err != nil {
|
||||
return dir.err
|
||||
acc.AddError(dir.err)
|
||||
continue
|
||||
}
|
||||
if err := g.gatherDir(dir.path, acc); err != nil {
|
||||
return err
|
||||
acc.AddError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,7 +225,7 @@ var fileFormats = [...]fileFormat{
|
||||
}
|
||||
|
||||
func numberOrString(s string) interface{} {
|
||||
i, err := strconv.Atoi(s)
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
@@ -24,24 +24,24 @@ var cg1 = &CGroup{
|
||||
func TestCgroupStatistics_1(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg1.Gather(&acc)
|
||||
err := acc.GatherError(cg1.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.stat.cache": 1739362304123123123,
|
||||
"memory.stat.rss": 1775325184,
|
||||
"memory.stat.rss_huge": 778043392,
|
||||
"memory.stat.mapped_file": 421036032,
|
||||
"memory.stat.dirty": -307200,
|
||||
"memory.max_usage_in_bytes.0": 0,
|
||||
"memory.max_usage_in_bytes.1": -1,
|
||||
"memory.max_usage_in_bytes.2": 2,
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.stat.cache": int64(1739362304123123123),
|
||||
"memory.stat.rss": int64(1775325184),
|
||||
"memory.stat.rss_huge": int64(778043392),
|
||||
"memory.stat.mapped_file": int64(421036032),
|
||||
"memory.stat.dirty": int64(-307200),
|
||||
"memory.max_usage_in_bytes.0": int64(0),
|
||||
"memory.max_usage_in_bytes.1": int64(-1),
|
||||
"memory.max_usage_in_bytes.2": int64(2),
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"notify_on_release": 0,
|
||||
"notify_on_release": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
@@ -56,17 +56,17 @@ var cg2 = &CGroup{
|
||||
func TestCgroupStatistics_2(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg2.Gather(&acc)
|
||||
err := acc.GatherError(cg2.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/cpu",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"cpuacct.usage_percpu.0": -1452543795404,
|
||||
"cpuacct.usage_percpu.1": 1376681271659,
|
||||
"cpuacct.usage_percpu.2": 1450950799997,
|
||||
"cpuacct.usage_percpu.3": -1473113374257,
|
||||
"cpuacct.usage_percpu.0": int64(-1452543795404),
|
||||
"cpuacct.usage_percpu.1": int64(1376681271659),
|
||||
"cpuacct.usage_percpu.2": int64(1450950799997),
|
||||
"cpuacct.usage_percpu.3": int64(-1473113374257),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
@@ -81,14 +81,14 @@ var cg3 = &CGroup{
|
||||
func TestCgroupStatistics_3(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg3.Gather(&acc)
|
||||
err := acc.GatherError(cg3.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
@@ -108,14 +108,14 @@ var cg4 = &CGroup{
|
||||
func TestCgroupStatistics_4(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg4.Gather(&acc)
|
||||
err := acc.GatherError(cg4.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
@@ -140,14 +140,14 @@ var cg5 = &CGroup{
|
||||
func TestCgroupStatistics_5(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg5.Gather(&acc)
|
||||
err := acc.GatherError(cg5.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.limit_in_bytes": int64(223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
@@ -167,16 +167,16 @@ var cg6 = &CGroup{
|
||||
func TestCgroupStatistics_6(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg6.Gather(&acc)
|
||||
err := acc.GatherError(cg6.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.usage_in_bytes": 3513667584,
|
||||
"memory.usage_in_bytes": int64(3513667584),
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
||||
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
@@ -63,6 +63,7 @@ Delete second or Not synchronised.
|
||||
### Measurements & Fields:
|
||||
|
||||
- chrony
|
||||
- system_time (float, seconds)
|
||||
- last_offset (float, seconds)
|
||||
- rms_offset (float, seconds)
|
||||
- frequency (float, ppm)
|
||||
@@ -82,9 +83,9 @@ Delete second or Not synchronised.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -config telegraf.conf -input-filter chrony -test
|
||||
$ telegraf --config telegraf.conf --input-filter chrony --test
|
||||
* Plugin: chrony, Collection 1
|
||||
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
@@ -92,7 +90,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
|
||||
}
|
||||
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
|
||||
// ignore reference time
|
||||
if strings.Contains(name, "time") {
|
||||
if strings.Contains(name, "ref_time") {
|
||||
continue
|
||||
}
|
||||
valueFields := strings.Fields(stats[1])
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package chrony
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build linux
|
||||
|
||||
package chrony
|
||||
|
||||
import (
|
||||
@@ -31,6 +29,7 @@ func TestGather(t *testing.T) {
|
||||
"stratum": "3",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"system_time": 0.000020390,
|
||||
"last_offset": 0.000012651,
|
||||
"rms_offset": 0.000025577,
|
||||
"frequency": -16.001,
|
||||
|
||||
@@ -9,8 +9,8 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
|
||||
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
|
||||
3. Shared profile from `profile` attribute
|
||||
4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
||||
5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
||||
4. [Environment Variables](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#environment-variables)
|
||||
5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file)
|
||||
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||
|
||||
### Configuration:
|
||||
@@ -20,9 +20,24 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
## Amazon Region (required)
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) Assumed credentials via STS if role_arn is specified
|
||||
## 2) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 3) shared profile from 'profile'
|
||||
## 4) environment variables
|
||||
## 5) shared credentials file
|
||||
## 6) EC2 Instance Profile
|
||||
#access_key = ""
|
||||
#secret_key = ""
|
||||
#token = ""
|
||||
#role_arn = ""
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
@@ -42,9 +57,10 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 400. Optional - default value is 200.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
ratelimit = 200
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
@@ -56,10 +72,6 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = "LoadBalancerName"
|
||||
value = "p-example"
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = "AvailabilityZone"
|
||||
value = "*"
|
||||
```
|
||||
#### Requirements and Terminology
|
||||
|
||||
@@ -133,6 +145,6 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter cloudwatch -test
|
||||
$ ./telegraf --config telegraf.conf --input-filter cloudwatch --test
|
||||
> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
|
||||
```
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/internal/limiter"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
@@ -82,7 +81,7 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
@@ -105,9 +104,10 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 10. Optional - default value is 10.
|
||||
ratelimit = 10
|
||||
## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
## maximum of 400. Optional - default value is 200.
|
||||
## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
|
||||
ratelimit = 200
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
@@ -185,8 +185,6 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricCount := len(metrics)
|
||||
errChan := errchan.New(metricCount)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
@@ -201,12 +199,12 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
<-lmtr.C
|
||||
go func(inm *cloudwatch.Metric) {
|
||||
defer wg.Done()
|
||||
c.gatherMetric(acc, inm, now, errChan.C)
|
||||
acc.AddError(c.gatherMetric(acc, inm, now))
|
||||
}(m)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -214,7 +212,7 @@ func init() {
|
||||
ttl, _ := time.ParseDuration("1hr")
|
||||
return &CloudWatch{
|
||||
CacheTTL: internal.Duration{Duration: ttl},
|
||||
RateLimit: 10,
|
||||
RateLimit: 200,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -284,13 +282,11 @@ func (c *CloudWatch) gatherMetric(
|
||||
acc telegraf.Accumulator,
|
||||
metric *cloudwatch.Metric,
|
||||
now time.Time,
|
||||
errChan chan error,
|
||||
) {
|
||||
) error {
|
||||
params := c.getStatisticsInput(metric, now)
|
||||
resp, err := c.client.GetMetricStatistics(params)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
for _, point := range resp.Datapoints {
|
||||
@@ -325,7 +321,7 @@ func (c *CloudWatch) gatherMetric(
|
||||
acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -58,13 +58,13 @@ func TestGather(t *testing.T) {
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 10,
|
||||
RateLimit: 200,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
c.client = &mockGatherCloudWatchClient{}
|
||||
|
||||
c.Gather(&acc)
|
||||
acc.GatherError(c.Gather)
|
||||
|
||||
fields := map[string]interface{}{}
|
||||
fields["latency_minimum"] = 0.1
|
||||
@@ -146,7 +146,7 @@ func TestSelectMetrics(t *testing.T) {
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 10,
|
||||
RateLimit: 200,
|
||||
Metrics: []*Metric{
|
||||
&Metric{
|
||||
MetricNames: []string{"Latency", "RequestCount"},
|
||||
@@ -207,14 +207,13 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetricsCacheTimeout(t *testing.T) {
|
||||
ttl, _ := time.ParseDuration("5ms")
|
||||
cache := &MetricCache{
|
||||
Metrics: []*cloudwatch.Metric{},
|
||||
Fetched: time.Now(),
|
||||
TTL: ttl,
|
||||
TTL: time.Minute,
|
||||
}
|
||||
|
||||
assert.True(t, cache.IsValid())
|
||||
time.Sleep(ttl)
|
||||
cache.Fetched = time.Now().Add(-time.Minute)
|
||||
assert.False(t, cache.IsValid())
|
||||
}
|
||||
|
||||
@@ -51,6 +51,6 @@ This input does not use tags.
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter conntrack -test
|
||||
$ ./telegraf --config telegraf.conf --input-filter conntrack --test
|
||||
conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735
|
||||
```
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"log"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
@@ -93,15 +92,15 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
contents, err := ioutil.ReadFile(fName)
|
||||
if err != nil {
|
||||
log.Printf("E! failed to read file '%s': %v", fName, err)
|
||||
acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err))
|
||||
continue
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(string(contents))
|
||||
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Printf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err)
|
||||
acc.AddError(fmt.Errorf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Telegraf Input Plugin: Consul
|
||||
|
||||
This plugin will collect statistics about all helath checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
This plugin will collect statistics about all health checks registered in the Consul. It uses [Consul API](https://www.consul.io/docs/agent/http/health.html#health_state)
|
||||
to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed.
|
||||
|
||||
## Configuration:
|
||||
@@ -35,12 +35,19 @@ Fields:
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
- passing
|
||||
- critical
|
||||
- warning
|
||||
|
||||
`passing`, `critical`, and `warning` are integer representations of the health
|
||||
check state. A value of `1` represents that the status was the state of the
|
||||
the health check at this sample.
|
||||
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf -input-filter consul -test
|
||||
$ telegraf --config ./telegraf.conf --input-filter consul --test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036
|
||||
```
|
||||
|
||||
@@ -69,6 +69,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) {
|
||||
config.Datacenter = c.Datacentre
|
||||
}
|
||||
|
||||
if c.Token != "" {
|
||||
config.Token = c.Token
|
||||
}
|
||||
|
||||
if c.Username != "" {
|
||||
config.HttpAuth = &api.HttpBasicAuth{
|
||||
Username: c.Username,
|
||||
@@ -97,7 +101,12 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt
|
||||
|
||||
record["check_name"] = check.Name
|
||||
record["service_id"] = check.ServiceID
|
||||
|
||||
record["status"] = check.Status
|
||||
record["passing"] = 0
|
||||
record["critical"] = 0
|
||||
record["warning"] = 0
|
||||
record[check.Status] = 1
|
||||
|
||||
tags["node"] = check.Node
|
||||
tags["service_name"] = check.ServiceName
|
||||
|
||||
@@ -20,10 +20,13 @@ var sampleChecks = []*api.HealthCheck{
|
||||
},
|
||||
}
|
||||
|
||||
func TestGatherHealtCheck(t *testing.T) {
|
||||
func TestGatherHealthCheck(t *testing.T) {
|
||||
expectedFields := map[string]interface{}{
|
||||
"check_name": "foo.health",
|
||||
"status": "passing",
|
||||
"passing": 1,
|
||||
"critical": 0,
|
||||
"warning": 0,
|
||||
"service_id": "foo.123",
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
### couchbase_node
|
||||
|
||||
Tags:
|
||||
- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`
|
||||
- cluster: sanitized string from `servers` configuration field e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` -> `http://couchbase-0.example.com:8091/endpoint`
|
||||
- hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091`
|
||||
|
||||
Fields:
|
||||
@@ -48,7 +48,7 @@ Fields:
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf -config telegraf.conf -input-filter couchbase -test
|
||||
$ telegraf --config telegraf.conf --input-filter couchbase --test
|
||||
* Plugin: couchbase, Collection 1
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package couchbase
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
couchbase "github.com/couchbase/go-couchbase"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Couchbase struct {
|
||||
@@ -24,6 +26,8 @@ var sampleConfig = `
|
||||
servers = ["http://localhost:8091"]
|
||||
`
|
||||
|
||||
var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`)
|
||||
|
||||
func (r *Couchbase) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
@@ -42,19 +46,17 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range r.Servers {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = r.gatherServer(serv, acc, nil)
|
||||
acc.AddError(r.gatherServer(serv, acc, nil))
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error {
|
||||
@@ -73,15 +75,17 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co
|
||||
}
|
||||
pool = &p
|
||||
}
|
||||
|
||||
for i := 0; i < len(pool.Nodes); i++ {
|
||||
node := pool.Nodes[i]
|
||||
tags := map[string]string{"cluster": addr, "hostname": node.Hostname}
|
||||
tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname}
|
||||
fields := make(map[string]interface{})
|
||||
fields["memory_free"] = node.MemoryFree
|
||||
fields["memory_total"] = node.MemoryTotal
|
||||
acc.AddFields("couchbase_node", fields, tags)
|
||||
}
|
||||
for bucketName, _ := range pool.BucketMap {
|
||||
|
||||
for bucketName := range pool.BucketMap {
|
||||
tags := map[string]string{"cluster": addr, "bucket": bucketName}
|
||||
bs := pool.BucketMap[bucketName].BasicStats
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -63,7 +63,7 @@ httpd statistics:
|
||||
### Example output:
|
||||
|
||||
```
|
||||
➜ telegraf git:(master) ✗ ./telegraf -config ./config.conf -input-filter couchdb -test
|
||||
➜ telegraf git:(master) ✗ ./telegraf --config ./config.conf --input-filter couchdb --test
|
||||
* Plugin: couchdb,
|
||||
Collection 1
|
||||
> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0,
|
||||
|
||||
@@ -2,13 +2,11 @@ package couchdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -83,34 +81,20 @@ func (*CouchDB) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
|
||||
errorChannel := make(chan error, len(c.HOSTs))
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range c.HOSTs {
|
||||
wg.Add(1)
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if err := c.fetchAndInsertData(accumulator, host); err != nil {
|
||||
errorChannel <- fmt.Errorf("[host=%s]: %s", host, err)
|
||||
accumulator.AddError(fmt.Errorf("[host=%s]: %s", host, err))
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
|
||||
@@ -316,5 +316,5 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
require.NoError(t, acc.GatherError(plugin.Gather))
|
||||
}
|
||||
|
||||
@@ -75,12 +75,11 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err))
|
||||
continue
|
||||
} else if u.Scheme == "" {
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
u.Scheme = "tcp"
|
||||
@@ -90,13 +89,13 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(u, acc)
|
||||
acc.AddError(g.gatherServer(u, acc))
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
return nil
|
||||
}
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestDisqueGeneratesMetrics(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
err = acc.GatherError(r.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
@@ -117,7 +117,7 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
err = acc.GatherError(r.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
||||
47
plugins/inputs/dmcache/README.md
Normal file
47
plugins/inputs/dmcache/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# DMCache Input Plugin
|
||||
|
||||
This plugin provide a native collection for dmsetup based statistics for dm-cache.
|
||||
|
||||
This plugin requires sudo, that is why you should setup and be sure that the telegraf is able to execute sudo without a password.
|
||||
|
||||
`sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes.
|
||||
|
||||
### Configuration
|
||||
|
||||
```toml
|
||||
[[inputs.dmcache]]
|
||||
## Whether to report per-device stats or not
|
||||
per_device = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- dmcache
|
||||
- length
|
||||
- target
|
||||
- metadata_blocksize
|
||||
- metadata_used
|
||||
- metadata_total
|
||||
- cache_blocksize
|
||||
- cache_used
|
||||
- cache_total
|
||||
- read_hits
|
||||
- read_misses
|
||||
- write_hits
|
||||
- write_misses
|
||||
- demotions
|
||||
- promotions
|
||||
- dirty
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache
|
||||
* Plugin: inputs.dmcache, Collection 1
|
||||
> dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000
|
||||
```
|
||||
33
plugins/inputs/dmcache/dmcache.go
Normal file
33
plugins/inputs/dmcache/dmcache.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type DMCache struct {
|
||||
PerDevice bool `toml:"per_device"`
|
||||
getCurrentStatus func() ([]string, error)
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Whether to report per-device stats or not
|
||||
per_device = true
|
||||
`
|
||||
|
||||
func (c *DMCache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (c *DMCache) Description() string {
|
||||
return "Provide a native collection for dmsetup based statistics for dm-cache"
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dmcache", func() telegraf.Input {
|
||||
return &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: dmSetupStatus,
|
||||
}
|
||||
})
|
||||
}
|
||||
190
plugins/inputs/dmcache/dmcache_linux.go
Normal file
190
plugins/inputs/dmcache/dmcache_linux.go
Normal file
@@ -0,0 +1,190 @@
|
||||
// +build linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const metricName = "dmcache"
|
||||
|
||||
type cacheStatus struct {
|
||||
device string
|
||||
length int64
|
||||
target string
|
||||
metadataBlocksize int64
|
||||
metadataUsed int64
|
||||
metadataTotal int64
|
||||
cacheBlocksize int64
|
||||
cacheUsed int64
|
||||
cacheTotal int64
|
||||
readHits int64
|
||||
readMisses int64
|
||||
writeHits int64
|
||||
writeMisses int64
|
||||
demotions int64
|
||||
promotions int64
|
||||
dirty int64
|
||||
}
|
||||
|
||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||
outputLines, err := c.getCurrentStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalStatus := cacheStatus{}
|
||||
|
||||
for _, s := range outputLines {
|
||||
status, err := parseDMSetupStatus(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.PerDevice {
|
||||
tags := map[string]string{"device": status.device}
|
||||
acc.AddFields(metricName, toFields(status), tags)
|
||||
}
|
||||
aggregateStats(&totalStatus, status)
|
||||
}
|
||||
|
||||
acc.AddFields(metricName, toFields(totalStatus), map[string]string{"device": "all"})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseDMSetupStatus(line string) (cacheStatus, error) {
|
||||
var err error
|
||||
parseError := errors.New("Output from dmsetup could not be parsed")
|
||||
status := cacheStatus{}
|
||||
values := strings.Fields(line)
|
||||
if len(values) < 15 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
|
||||
status.device = strings.TrimRight(values[0], ":")
|
||||
status.length, err = strconv.ParseInt(values[2], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.target = values[3]
|
||||
status.metadataBlocksize, err = strconv.ParseInt(values[4], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
metadata := strings.Split(values[5], "/")
|
||||
if len(metadata) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.metadataUsed, err = strconv.ParseInt(metadata[0], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.metadataTotal, err = strconv.ParseInt(metadata[1], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheBlocksize, err = strconv.ParseInt(values[6], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
cache := strings.Split(values[7], "/")
|
||||
if len(cache) != 2 {
|
||||
return cacheStatus{}, parseError
|
||||
}
|
||||
status.cacheUsed, err = strconv.ParseInt(cache[0], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.cacheTotal, err = strconv.ParseInt(cache[1], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readHits, err = strconv.ParseInt(values[8], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.readMisses, err = strconv.ParseInt(values[9], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeHits, err = strconv.ParseInt(values[10], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.writeMisses, err = strconv.ParseInt(values[11], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.demotions, err = strconv.ParseInt(values[12], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.promotions, err = strconv.ParseInt(values[13], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
status.dirty, err = strconv.ParseInt(values[14], 10, 64)
|
||||
if err != nil {
|
||||
return cacheStatus{}, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func aggregateStats(totalStatus *cacheStatus, status cacheStatus) {
|
||||
totalStatus.length += status.length
|
||||
totalStatus.metadataBlocksize += status.metadataBlocksize
|
||||
totalStatus.metadataUsed += status.metadataUsed
|
||||
totalStatus.metadataTotal += status.metadataTotal
|
||||
totalStatus.cacheBlocksize += status.cacheBlocksize
|
||||
totalStatus.cacheUsed += status.cacheUsed
|
||||
totalStatus.cacheTotal += status.cacheTotal
|
||||
totalStatus.readHits += status.readHits
|
||||
totalStatus.readMisses += status.readMisses
|
||||
totalStatus.writeHits += status.writeHits
|
||||
totalStatus.writeMisses += status.writeMisses
|
||||
totalStatus.demotions += status.demotions
|
||||
totalStatus.promotions += status.promotions
|
||||
totalStatus.dirty += status.dirty
|
||||
}
|
||||
|
||||
func toFields(status cacheStatus) map[string]interface{} {
|
||||
fields := make(map[string]interface{})
|
||||
fields["length"] = status.length
|
||||
fields["metadata_blocksize"] = status.metadataBlocksize
|
||||
fields["metadata_used"] = status.metadataUsed
|
||||
fields["metadata_total"] = status.metadataTotal
|
||||
fields["cache_blocksize"] = status.cacheBlocksize
|
||||
fields["cache_used"] = status.cacheUsed
|
||||
fields["cache_total"] = status.cacheTotal
|
||||
fields["read_hits"] = status.readHits
|
||||
fields["read_misses"] = status.readMisses
|
||||
fields["write_hits"] = status.writeHits
|
||||
fields["write_misses"] = status.writeMisses
|
||||
fields["demotions"] = status.demotions
|
||||
fields["promotions"] = status.promotions
|
||||
fields["dirty"] = status.dirty
|
||||
return fields
|
||||
}
|
||||
|
||||
func dmSetupStatus() ([]string, error) {
|
||||
out, err := exec.Command("/bin/sh", "-c", "sudo /sbin/dmsetup status --target cache").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if string(out) == "No devices found\n" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
outString := strings.TrimRight(string(out), "\n")
|
||||
status := strings.Split(outString, "\n")
|
||||
|
||||
return status, nil
|
||||
}
|
||||
171
plugins/inputs/dmcache/dmcache_linux_test.go
Normal file
171
plugins/inputs/dmcache/dmcache_linux_test.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// +build linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
measurement = "dmcache"
|
||||
badFormatOutput = []string{"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 "}
|
||||
good2DevicesFormatOutput = []string{
|
||||
"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 15 46 0 7 0 1 writeback 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
|
||||
"cs-2: 0 4294967296 cache 8 72352/1310720 128 26/24327168 2409 286 265 524682 0 0 0 1 writethrough 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8",
|
||||
}
|
||||
)
|
||||
|
||||
func TestPerDeviceGoodOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return good2DevicesFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"device": "cs-1",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"length": int64(4883791872),
|
||||
"metadata_blocksize": int64(8),
|
||||
"metadata_used": int64(1018),
|
||||
"metadata_total": int64(1501122),
|
||||
"cache_blocksize": int64(512),
|
||||
"cache_used": int64(7),
|
||||
"cache_total": int64(464962),
|
||||
"read_hits": int64(139),
|
||||
"read_misses": int64(352643),
|
||||
"write_hits": int64(15),
|
||||
"write_misses": int64(46),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(7),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields1, tags1)
|
||||
|
||||
tags2 := map[string]string{
|
||||
"device": "cs-2",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"length": int64(4294967296),
|
||||
"metadata_blocksize": int64(8),
|
||||
"metadata_used": int64(72352),
|
||||
"metadata_total": int64(1310720),
|
||||
"cache_blocksize": int64(128),
|
||||
"cache_used": int64(26),
|
||||
"cache_total": int64(24327168),
|
||||
"read_hits": int64(2409),
|
||||
"read_misses": int64(286),
|
||||
"write_hits": int64(265),
|
||||
"write_misses": int64(524682),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(0),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields2, tags2)
|
||||
|
||||
tags3 := map[string]string{
|
||||
"device": "all",
|
||||
}
|
||||
|
||||
fields3 := map[string]interface{}{
|
||||
"length": int64(9178759168),
|
||||
"metadata_blocksize": int64(16),
|
||||
"metadata_used": int64(73370),
|
||||
"metadata_total": int64(2811842),
|
||||
"cache_blocksize": int64(640),
|
||||
"cache_used": int64(33),
|
||||
"cache_total": int64(24792130),
|
||||
"read_hits": int64(2548),
|
||||
"read_misses": int64(352929),
|
||||
"write_hits": int64(280),
|
||||
"write_misses": int64(524728),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(7),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields3, tags3)
|
||||
}
|
||||
|
||||
func TestNotPerDeviceGoodOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: false,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return good2DevicesFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"device": "all",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"length": int64(9178759168),
|
||||
"metadata_blocksize": int64(16),
|
||||
"metadata_used": int64(73370),
|
||||
"metadata_total": int64(2811842),
|
||||
"cache_blocksize": int64(640),
|
||||
"cache_used": int64(33),
|
||||
"cache_total": int64(24792130),
|
||||
"read_hits": int64(2548),
|
||||
"read_misses": int64(352929),
|
||||
"write_hits": int64(280),
|
||||
"write_misses": int64(524728),
|
||||
"demotions": int64(0),
|
||||
"promotions": int64(7),
|
||||
"dirty": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, measurement, fields, tags)
|
||||
}
|
||||
|
||||
func TestNoDevicesOutput(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return []string{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestErrorDuringGettingStatus(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return nil, errors.New("dmsetup doesn't exist")
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestBadFormatOfStatus(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
var plugin = &DMCache{
|
||||
PerDevice: true,
|
||||
getCurrentStatus: func() ([]string, error) {
|
||||
return badFormatOutput, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := plugin.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
15
plugins/inputs/dmcache/dmcache_notlinux.go
Normal file
15
plugins/inputs/dmcache/dmcache_notlinux.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build !linux
|
||||
|
||||
package dmcache
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
func (c *DMCache) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dmSetupStatus() ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
@@ -8,19 +8,23 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi
|
||||
# Sample Config:
|
||||
[[inputs.dns_query]]
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"] # required
|
||||
servers = ["8.8.8.8"]
|
||||
|
||||
## Domains or subdomains to query. "." (root) is default
|
||||
domains = ["."] # optional
|
||||
## Network is the network protocol name.
|
||||
# network = "udp"
|
||||
|
||||
## Query record type. Posible values: A, AAAA, ANY, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT. Default is "NS"
|
||||
record_type = "A" # optional
|
||||
## Domains or subdomains to query.
|
||||
# domains = ["."]
|
||||
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
## Query record type.
|
||||
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||
# record_type = "A"
|
||||
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
## Dns server port.
|
||||
# port = 53
|
||||
|
||||
## Query timeout in seconds.
|
||||
# timeout = 2
|
||||
```
|
||||
|
||||
For querying more than one record type make:
|
||||
@@ -46,6 +50,6 @@ For querying more than one record type make:
|
||||
### Example output:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -test -input-filter dns_query -test
|
||||
telegraf --input-filter dns_query --test
|
||||
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
```
|
||||
|
||||
@@ -3,13 +3,13 @@ package dns_query
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/miekg/dns"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
@@ -17,6 +17,9 @@ type DnsQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
|
||||
// Network protocl name
|
||||
Network string
|
||||
|
||||
// Server to query
|
||||
Servers []string
|
||||
|
||||
@@ -32,20 +35,23 @@ type DnsQuery struct {
|
||||
|
||||
var sampleConfig = `
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"] # required
|
||||
servers = ["8.8.8.8"]
|
||||
|
||||
## Domains or subdomains to query. "."(root) is default
|
||||
domains = ["."] # optional
|
||||
## Network is the network protocol name.
|
||||
# network = "udp"
|
||||
|
||||
## Query record type. Default is "A"
|
||||
## Domains or subdomains to query.
|
||||
# domains = ["."]
|
||||
|
||||
## Query record type.
|
||||
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||
record_type = "A" # optional
|
||||
# record_type = "A"
|
||||
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
## Dns server port.
|
||||
# port = 53
|
||||
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
## Query timeout in seconds.
|
||||
# timeout = 2
|
||||
`
|
||||
|
||||
func (d *DnsQuery) SampleConfig() string {
|
||||
@@ -58,11 +64,10 @@ func (d *DnsQuery) Description() string {
|
||||
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
d.setDefaultValues()
|
||||
|
||||
errChan := errchan.New(len(d.Domains) * len(d.Servers))
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
errChan.C <- err
|
||||
acc.AddError(err)
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
@@ -74,10 +79,14 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
}
|
||||
|
||||
return errChan.Error()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
if d.Network == "" {
|
||||
d.Network = "udp"
|
||||
}
|
||||
|
||||
if len(d.RecordType) == 0 {
|
||||
d.RecordType = "NS"
|
||||
}
|
||||
@@ -101,6 +110,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error
|
||||
|
||||
c := new(dns.Client)
|
||||
c.ReadTimeout = time.Duration(d.Timeout) * time.Second
|
||||
c.Net = d.Network
|
||||
|
||||
m := new(dns.Msg)
|
||||
recordType, err := d.parseRecordType()
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestGathering(t *testing.T) {
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -44,7 +44,7 @@ func TestGatheringMxRecord(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.RecordType = "MX"
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -70,7 +70,7 @@ func TestGatheringRootDomain(t *testing.T) {
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -96,7 +96,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
err := acc.GatherError(dnsConfig.Gather)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
@@ -121,7 +121,7 @@ func TestGatheringTimeout(t *testing.T) {
|
||||
|
||||
channel := make(chan error, 1)
|
||||
go func() {
|
||||
channel <- dnsConfig.Gather(&acc)
|
||||
channel <- acc.GatherError(dnsConfig.Gather)
|
||||
}()
|
||||
select {
|
||||
case res := <-channel:
|
||||
|
||||
@@ -1,29 +1,62 @@
|
||||
# Docker Input Plugin
|
||||
|
||||
The docker plugin uses the docker remote API to gather metrics on running
|
||||
docker containers. You can read Docker's documentation for their remote API
|
||||
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
|
||||
The docker plugin uses the Docker Engine API to gather metrics on running
|
||||
docker containers.
|
||||
|
||||
The docker plugin uses the excellent
|
||||
[docker engine-api](https://github.com/docker/engine-api) library to
|
||||
gather stats. Documentation for the library can be found
|
||||
[here](https://godoc.org/github.com/docker/engine-api) and documentation
|
||||
for the stat structure can be found
|
||||
[here](https://godoc.org/github.com/docker/engine-api/types#Stats)
|
||||
The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client)
|
||||
to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/).
|
||||
[Library Documentation](https://godoc.org/github.com/moby/moby/client)
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
## Docker Endpoint
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
|
||||
## Only collect metrics for these containers. Values will be appended to
|
||||
## container_name_include.
|
||||
## Deprecated (1.4.0), use container_name_include
|
||||
container_names = []
|
||||
|
||||
## Containers to include and exclude. Collect all if empty. Globs accepted.
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
||||
## network (eth0, eth1, ...) stats or not
|
||||
perdevice = true
|
||||
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
## Which environment variables should we use as a tag
|
||||
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
#### Environment Configuration
|
||||
|
||||
When using the `"ENV"` endpoint, the connection is configured using the
|
||||
[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient).
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
@@ -122,36 +155,38 @@ based on the availability of per-cpu stats on your system.
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
#### Docker Engine tags
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
- engine_host
|
||||
|
||||
#### Docker Container tags
|
||||
- Tags on all containers:
|
||||
- engine_host
|
||||
- container_image
|
||||
- container_name
|
||||
- container_version
|
||||
- docker_container_mem specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- docker_container_cpu specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
|
||||
% ./telegraf --config ~/ws/telegraf.conf --input-filter docker --test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
|
||||
67
plugins/inputs/docker/client.go
Normal file
67
plugins/inputs/docker/client.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
docker "github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
)
|
||||
|
||||
var (
|
||||
version string
|
||||
defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
Info(ctx context.Context) (types.Info, error)
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error)
|
||||
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||
}
|
||||
|
||||
func NewEnvClient() (Client, error) {
|
||||
client, err := docker.NewEnvClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SocketClient{client}, nil
|
||||
}
|
||||
|
||||
func NewClient(host string, tlsConfig *tls.Config) (Client, error) {
|
||||
proto, addr, _, err := docker.ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
sockets.ConfigureTransport(transport, proto, addr)
|
||||
httpClient := &http.Client{Transport: transport}
|
||||
|
||||
client, err := docker.NewClient(host, version, httpClient, defaultHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SocketClient{client}, nil
|
||||
}
|
||||
|
||||
type SocketClient struct {
|
||||
client *docker.Client
|
||||
}
|
||||
|
||||
func (c *SocketClient) Info(ctx context.Context) (types.Info, error) {
|
||||
return c.client.Info(ctx)
|
||||
}
|
||||
func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
return c.client.ContainerList(ctx, options)
|
||||
}
|
||||
func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
return c.client.ContainerStats(ctx, containerID, stream)
|
||||
}
|
||||
func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
return c.client.ContainerInspect(ctx, containerID)
|
||||
}
|
||||
@@ -1,21 +1,21 @@
|
||||
package system
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/filter"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
@@ -24,19 +24,31 @@ import (
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
|
||||
Timeout internal.Duration
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
PerDevice bool `toml:"perdevice"`
|
||||
Total bool `toml:"total"`
|
||||
TagEnvironment []string `toml:"tag_env"`
|
||||
LabelInclude []string `toml:"docker_label_include"`
|
||||
LabelExclude []string `toml:"docker_label_exclude"`
|
||||
|
||||
client DockerClient
|
||||
engine_host string
|
||||
}
|
||||
ContainerInclude []string `toml:"container_name_include"`
|
||||
ContainerExclude []string `toml:"container_name_exclude"`
|
||||
|
||||
// DockerClient interface, useful for testing
|
||||
type DockerClient interface {
|
||||
Info(ctx context.Context) (types.Info, error)
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error)
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
InsecureSkipVerify bool
|
||||
|
||||
newEnvClient func() (Client, error)
|
||||
newClient func(string, *tls.Config) (Client, error)
|
||||
|
||||
client Client
|
||||
httpClient *http.Client
|
||||
engine_host string
|
||||
filtersCreated bool
|
||||
labelFilter filter.Filter
|
||||
containerFilter filter.Filter
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
@@ -46,6 +58,8 @@ const (
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
|
||||
defaultEndpoint = "unix:///var/run/docker.sock"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -57,8 +71,15 @@ var sampleConfig = `
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
|
||||
## Containers to include and exclude. Globs accepted.
|
||||
## Note that an empty array for both will include all containers
|
||||
container_name_include = []
|
||||
container_name_exclude = []
|
||||
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
|
||||
@@ -67,46 +88,66 @@ var sampleConfig = `
|
||||
perdevice = true
|
||||
## Whether to report for each container total blkio and network stats or not
|
||||
total = false
|
||||
## Which environment variables should we use as a tag
|
||||
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
||||
|
||||
## docker labels to include and exclude as tags. Globs accepted.
|
||||
## Note that an empty array for both will include all labels as tags
|
||||
docker_label_include = []
|
||||
docker_label_exclude = []
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# ssl_key = "/etc/telegraf/key.pem"
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
// Description returns input description
|
||||
func (d *Docker) Description() string {
|
||||
return "Read metrics about docker containers"
|
||||
}
|
||||
|
||||
// SampleConfig prints sampleConfig
|
||||
func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// Gather starts stats collection
|
||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.client == nil {
|
||||
var c *client.Client
|
||||
var c Client
|
||||
var err error
|
||||
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
if d.Endpoint == "ENV" {
|
||||
c, err = client.NewEnvClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if d.Endpoint == "" {
|
||||
c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err = d.newEnvClient()
|
||||
} else {
|
||||
c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders)
|
||||
tlsConfig, err := internal.GetTLSConfig(
|
||||
d.SSLCert, d.SSLKey, d.SSLCA, d.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err = d.newClient(d.Endpoint, tlsConfig)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
|
||||
// Create label filters if not already created
|
||||
if !d.filtersCreated {
|
||||
err := d.createLabelFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.createContainerFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.filtersCreated = true
|
||||
}
|
||||
|
||||
// Get daemon info
|
||||
err := d.gatherInfo(acc)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
acc.AddError(err)
|
||||
}
|
||||
|
||||
// List containers
|
||||
@@ -126,8 +167,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
log.Printf("E! Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error())
|
||||
acc.AddError(fmt.Errorf("E! Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error()))
|
||||
}
|
||||
}(container)
|
||||
}
|
||||
@@ -239,10 +280,9 @@ func (d *Docker) gatherContainer(
|
||||
"container_image": imageName,
|
||||
"container_version": imageVersion,
|
||||
}
|
||||
if len(d.ContainerNames) > 0 {
|
||||
if !sliceContains(cname, d.ContainerNames) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !d.containerFilter.Match(cname) {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
@@ -251,21 +291,41 @@ func (d *Docker) gatherContainer(
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting docker stats: %s", err.Error())
|
||||
}
|
||||
defer r.Close()
|
||||
dec := json.NewDecoder(r)
|
||||
defer r.Body.Close()
|
||||
dec := json.NewDecoder(r.Body)
|
||||
if err = dec.Decode(&v); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error decoding: %s", err.Error())
|
||||
}
|
||||
daemonOSType := r.OSType
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
tags[k] = label
|
||||
if d.labelFilter.Match(k) {
|
||||
tags[k] = label
|
||||
}
|
||||
}
|
||||
|
||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total)
|
||||
// Add whitelisted environment variables to tags
|
||||
if len(d.TagEnvironment) > 0 {
|
||||
info, err := d.client.ContainerInspect(ctx, container.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error inspecting docker container: %s", err.Error())
|
||||
}
|
||||
for _, envvar := range info.Config.Env {
|
||||
for _, configvar := range d.TagEnvironment {
|
||||
dock_env := strings.SplitN(envvar, "=", 2)
|
||||
//check for presence of tag in whitelist
|
||||
if len(dock_env) == 2 && len(strings.TrimSpace(dock_env[1])) != 0 && configvar == dock_env[0] {
|
||||
tags[dock_env[0]] = dock_env[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -277,47 +337,73 @@ func gatherContainerStats(
|
||||
id string,
|
||||
perDevice bool,
|
||||
total bool,
|
||||
daemonOSType string,
|
||||
) {
|
||||
now := stat.Read
|
||||
tm := stat.Read
|
||||
|
||||
if tm.Before(time.Unix(0, 0)) {
|
||||
tm = time.Now()
|
||||
}
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": stat.MemoryStats.MaxUsage,
|
||||
"usage": stat.MemoryStats.Usage,
|
||||
"fail_count": stat.MemoryStats.Failcnt,
|
||||
"limit": stat.MemoryStats.Limit,
|
||||
"total_pgmafault": stat.MemoryStats.Stats["total_pgmajfault"],
|
||||
"cache": stat.MemoryStats.Stats["cache"],
|
||||
"mapped_file": stat.MemoryStats.Stats["mapped_file"],
|
||||
"total_inactive_file": stat.MemoryStats.Stats["total_inactive_file"],
|
||||
"pgpgout": stat.MemoryStats.Stats["pagpgout"],
|
||||
"rss": stat.MemoryStats.Stats["rss"],
|
||||
"total_mapped_file": stat.MemoryStats.Stats["total_mapped_file"],
|
||||
"writeback": stat.MemoryStats.Stats["writeback"],
|
||||
"unevictable": stat.MemoryStats.Stats["unevictable"],
|
||||
"pgpgin": stat.MemoryStats.Stats["pgpgin"],
|
||||
"total_unevictable": stat.MemoryStats.Stats["total_unevictable"],
|
||||
"pgmajfault": stat.MemoryStats.Stats["pgmajfault"],
|
||||
"total_rss": stat.MemoryStats.Stats["total_rss"],
|
||||
"total_rss_huge": stat.MemoryStats.Stats["total_rss_huge"],
|
||||
"total_writeback": stat.MemoryStats.Stats["total_write_back"],
|
||||
"total_inactive_anon": stat.MemoryStats.Stats["total_inactive_anon"],
|
||||
"rss_huge": stat.MemoryStats.Stats["rss_huge"],
|
||||
"hierarchical_memory_limit": stat.MemoryStats.Stats["hierarchical_memory_limit"],
|
||||
"total_pgfault": stat.MemoryStats.Stats["total_pgfault"],
|
||||
"total_active_file": stat.MemoryStats.Stats["total_active_file"],
|
||||
"active_anon": stat.MemoryStats.Stats["active_anon"],
|
||||
"total_active_anon": stat.MemoryStats.Stats["total_active_anon"],
|
||||
"total_pgpgout": stat.MemoryStats.Stats["total_pgpgout"],
|
||||
"total_cache": stat.MemoryStats.Stats["total_cache"],
|
||||
"inactive_anon": stat.MemoryStats.Stats["inactive_anon"],
|
||||
"active_file": stat.MemoryStats.Stats["active_file"],
|
||||
"pgfault": stat.MemoryStats.Stats["pgfault"],
|
||||
"inactive_file": stat.MemoryStats.Stats["inactive_file"],
|
||||
"total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
|
||||
"usage_percent": calculateMemPercent(stat),
|
||||
"container_id": id,
|
||||
"container_id": id,
|
||||
}
|
||||
acc.AddFields("docker_container_mem", memfields, tags, now)
|
||||
|
||||
memstats := []string{
|
||||
"active_anon",
|
||||
"active_file",
|
||||
"cache",
|
||||
"hierarchical_memory_limit",
|
||||
"inactive_anon",
|
||||
"inactive_file",
|
||||
"mapped_file",
|
||||
"pgfault",
|
||||
"pgmajfault",
|
||||
"pgpgin",
|
||||
"pgpgout",
|
||||
"rss",
|
||||
"rss_huge",
|
||||
"total_active_anon",
|
||||
"total_active_file",
|
||||
"total_cache",
|
||||
"total_inactive_anon",
|
||||
"total_inactive_file",
|
||||
"total_mapped_file",
|
||||
"total_pgfault",
|
||||
"total_pgmajfault",
|
||||
"total_pgpgin",
|
||||
"total_pgpgout",
|
||||
"total_rss",
|
||||
"total_rss_huge",
|
||||
"total_unevictable",
|
||||
"total_writeback",
|
||||
"unevictable",
|
||||
"writeback",
|
||||
}
|
||||
for _, field := range memstats {
|
||||
if value, ok := stat.MemoryStats.Stats[field]; ok {
|
||||
memfields[field] = value
|
||||
}
|
||||
}
|
||||
if stat.MemoryStats.Failcnt != 0 {
|
||||
memfields["fail_count"] = stat.MemoryStats.Failcnt
|
||||
}
|
||||
|
||||
if daemonOSType != "windows" {
|
||||
memfields["limit"] = stat.MemoryStats.Limit
|
||||
memfields["usage"] = stat.MemoryStats.Usage
|
||||
memfields["max_usage"] = stat.MemoryStats.MaxUsage
|
||||
|
||||
mem := calculateMemUsageUnixNoCache(stat.MemoryStats)
|
||||
memLimit := float64(stat.MemoryStats.Limit)
|
||||
memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem)
|
||||
} else {
|
||||
memfields["commit_bytes"] = stat.MemoryStats.Commit
|
||||
memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak
|
||||
memfields["private_working_set"] = stat.MemoryStats.PrivateWorkingSet
|
||||
}
|
||||
|
||||
acc.AddFields("docker_container_mem", memfields, tags, tm)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||
@@ -327,21 +413,40 @@ func gatherContainerStats(
|
||||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
"usage_percent": calculateCPUPercent(stat),
|
||||
"container_id": id,
|
||||
}
|
||||
|
||||
if daemonOSType != "windows" {
|
||||
previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage
|
||||
previousSystem := stat.PreCPUStats.SystemUsage
|
||||
cpuPercent := calculateCPUPercentUnix(previousCPU, previousSystem, stat)
|
||||
cpufields["usage_percent"] = cpuPercent
|
||||
} else {
|
||||
cpuPercent := calculateCPUPercentWindows(stat)
|
||||
cpufields["usage_percent"] = cpuPercent
|
||||
}
|
||||
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
||||
acc.AddFields("docker_container_cpu", cpufields, cputags, tm)
|
||||
|
||||
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
||||
// If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs
|
||||
// (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400)
|
||||
var percpuusage []uint64
|
||||
if stat.CPUStats.OnlineCPUs > 0 {
|
||||
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs]
|
||||
} else {
|
||||
percpuusage = stat.CPUStats.CPUUsage.PercpuUsage
|
||||
}
|
||||
|
||||
for i, percpu := range percpuusage {
|
||||
percputags := copyTags(tags)
|
||||
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
||||
fields := map[string]interface{}{
|
||||
"usage_total": percpu,
|
||||
"container_id": id,
|
||||
}
|
||||
acc.AddFields("docker_container_cpu", fields, percputags, now)
|
||||
acc.AddFields("docker_container_cpu", fields, percputags, tm)
|
||||
}
|
||||
|
||||
totalNetworkStatMap := make(map[string]interface{})
|
||||
@@ -361,7 +466,7 @@ func gatherContainerStats(
|
||||
if perDevice {
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = network
|
||||
acc.AddFields("docker_container_net", netfields, nettags, now)
|
||||
acc.AddFields("docker_container_net", netfields, nettags, tm)
|
||||
}
|
||||
if total {
|
||||
for field, value := range netfields {
|
||||
@@ -394,37 +499,17 @@ func gatherContainerStats(
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = "total"
|
||||
totalNetworkStatMap["container_id"] = id
|
||||
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, now)
|
||||
acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm)
|
||||
}
|
||||
|
||||
gatherBlockIOMetrics(stat, acc, tags, now, id, perDevice, total)
|
||||
}
|
||||
|
||||
func calculateMemPercent(stat *types.StatsJSON) float64 {
|
||||
var memPercent = 0.0
|
||||
if stat.MemoryStats.Limit > 0 {
|
||||
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
return memPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercent(stat *types.StatsJSON) float64 {
|
||||
var cpuPercent = 0.0
|
||||
// calculate the change for the cpu and system usage of the container in between readings
|
||||
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stat.CPUStats.SystemUsage) - float64(stat.PreCPUStats.SystemUsage)
|
||||
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total)
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *types.StatsJSON,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
now time.Time,
|
||||
tm time.Time,
|
||||
id string,
|
||||
perDevice bool,
|
||||
total bool,
|
||||
@@ -495,7 +580,7 @@ func gatherBlockIOMetrics(
|
||||
if perDevice {
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = device
|
||||
acc.AddFields("docker_container_blkio", fields, iotags, now)
|
||||
acc.AddFields("docker_container_blkio", fields, iotags, tm)
|
||||
}
|
||||
if total {
|
||||
for field, value := range fields {
|
||||
@@ -526,7 +611,7 @@ func gatherBlockIOMetrics(
|
||||
totalStatMap["container_id"] = id
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = "total"
|
||||
acc.AddFields("docker_container_blkio", totalStatMap, iotags, now)
|
||||
acc.AddFields("docker_container_blkio", totalStatMap, iotags, tm)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -568,11 +653,38 @@ func parseSize(sizeStr string) (int64, error) {
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
func (d *Docker) createContainerFilters() error {
|
||||
// Backwards compatibility for deprecated `container_names` parameter.
|
||||
if len(d.ContainerNames) > 0 {
|
||||
d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...)
|
||||
}
|
||||
|
||||
filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.containerFilter = filter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) createLabelFilters() error {
|
||||
filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.labelFilter = filter
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
PerDevice: true,
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
Endpoint: defaultEndpoint,
|
||||
newEnvClient: NewEnvClient,
|
||||
newClient: NewClient,
|
||||
filtersCreated: false,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user