Compare commits
694 Commits
v0.3.0-bet
...
1.0.0-beta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ad88a9421a | ||
|
|
346deb30a3 | ||
|
|
8c3d7cd145 | ||
|
|
821b30eb92 | ||
|
|
a362352587 | ||
|
|
94f952787f | ||
|
|
3ff184c061 | ||
|
|
80368e3936 | ||
|
|
2c448e22e1 | ||
|
|
1aabd38eb2 | ||
|
|
675457873a | ||
|
|
8173338f8a | ||
|
|
c4841843a9 | ||
|
|
f08a27be5d | ||
|
|
a4b36d12dd | ||
|
|
c842724b61 | ||
|
|
fb5f40319e | ||
|
|
52b9fc837c | ||
|
|
6f991ec78a | ||
|
|
7921d87a45 | ||
|
|
9f7a758bf9 | ||
|
|
0aff7a0bc1 | ||
|
|
c4cfdb8a25 | ||
|
|
342cfc4087 | ||
|
|
bd1282eddf | ||
|
|
892abec025 | ||
|
|
e809c4e445 | ||
|
|
9ff536d94d | ||
|
|
4f27315720 | ||
|
|
958ef2f872 | ||
|
|
069764f05e | ||
|
|
eeeab5192b | ||
|
|
a7dfbce3d3 | ||
|
|
ed2d1d9bb7 | ||
|
|
0fb2d2ffae | ||
|
|
3af65e7abb | ||
|
|
984b6cb0fb | ||
|
|
ca504a19ec | ||
|
|
c2797c85d1 | ||
|
|
d5add07c0b | ||
|
|
0ebf1c1ad7 | ||
|
|
42d7fc5e16 | ||
|
|
6828fc48e1 | ||
|
|
98d91b1c89 | ||
|
|
9bbdb2d562 | ||
|
|
a8334c3261 | ||
|
|
9144f9630b | ||
|
|
3e4a19539a | ||
|
|
5fe7e6e40e | ||
|
|
58f2ba1247 | ||
|
|
5f3a91bffd | ||
|
|
6351aa5167 | ||
|
|
9966099d1a | ||
|
|
1ef5599361 | ||
|
|
c78b6cdb4e | ||
|
|
d736c7235a | ||
|
|
475252d873 | ||
|
|
e103923430 | ||
|
|
cb59517ceb | ||
|
|
1248934f3e | ||
|
|
204ebf6bf6 | ||
|
|
52d5b19219 | ||
|
|
8e92d3a4a0 | ||
|
|
c44ecf54a5 | ||
|
|
c6699c36d3 | ||
|
|
d6ceae7005 | ||
|
|
4dcb82bf08 | ||
|
|
4f5d5926d9 | ||
|
|
3c5c3b98df | ||
|
|
56aee1ceee | ||
|
|
f176c28a56 | ||
|
|
2e68bd1412 | ||
|
|
35eb65460d | ||
|
|
ab54064689 | ||
|
|
debf7bf149 | ||
|
|
1dbe3b8231 | ||
|
|
b065573e23 | ||
|
|
e94e50181c | ||
|
|
69dfe63809 | ||
|
|
f32916a5bd | ||
|
|
be7ca56872 | ||
|
|
33cacc71b8 | ||
|
|
c292e3931a | ||
|
|
a87d6f0545 | ||
|
|
3a01b6d5b7 | ||
|
|
39df2635bd | ||
|
|
08ecfb8a67 | ||
|
|
a59bf7246a | ||
|
|
281296cd3f | ||
|
|
61d190b1ae | ||
|
|
dc89f029ad | ||
|
|
7557056a31 | ||
|
|
20c45a150c | ||
|
|
46bf0ef271 | ||
|
|
a7b632eb5e | ||
|
|
90a98c76a0 | ||
|
|
12357ee8c5 | ||
|
|
bb254fc2b9 | ||
|
|
aeadc2c43a | ||
|
|
ed492fe950 | ||
|
|
775daba8f5 | ||
|
|
677dd7ad53 | ||
|
|
85dee02a3b | ||
|
|
afdebbc3a2 | ||
|
|
5deb22a539 | ||
|
|
36b9e2e077 | ||
|
|
5348937c3d | ||
|
|
72fcacbbc7 | ||
|
|
4c28f15b35 | ||
|
|
095ef04c04 | ||
|
|
7d49979658 | ||
|
|
7a36695a21 | ||
|
|
5865587bd0 | ||
|
|
219bf93566 | ||
|
|
8371546a66 | ||
|
|
0b9b7bddd7 | ||
|
|
4c8449f4bc | ||
|
|
36d7b5c9ab | ||
|
|
f2b0ea6722 | ||
|
|
46f4be88a6 | ||
|
|
6381efa7ce | ||
|
|
85ee66efb9 | ||
|
|
40dccf5b29 | ||
|
|
c114849a31 | ||
|
|
4e9798d0e6 | ||
|
|
a30b1a394f | ||
|
|
91460436cf | ||
|
|
3f807a9432 | ||
|
|
cbe32c7482 | ||
|
|
5d3c582ecf | ||
|
|
3ed006d216 | ||
|
|
3e1026286b | ||
|
|
b59266249d | ||
|
|
015261a524 | ||
|
|
024e1088eb | ||
|
|
08f4b1ae8a | ||
|
|
1390c22004 | ||
|
|
8742ead585 | ||
|
|
59a297abe6 | ||
|
|
18636ea628 | ||
|
|
cf5980ace2 | ||
|
|
a7b0861436 | ||
|
|
89f2c0b0a4 | ||
|
|
ee4f4d7800 | ||
|
|
4de75ce621 | ||
|
|
1c4043ab39 | ||
|
|
44c945b9f5 | ||
|
|
c7719ac365 | ||
|
|
b9c24189e4 | ||
|
|
411d8d7439 | ||
|
|
671b40df2a | ||
|
|
249a860c6f | ||
|
|
0367a39e1f | ||
|
|
1a7340bb02 | ||
|
|
ce7d852d22 | ||
|
|
01b01c5969 | ||
|
|
c159460b2c | ||
|
|
07728d7425 | ||
|
|
d3a25e4dc1 | ||
|
|
1751c35f69 | ||
|
|
93f5b8cc4a | ||
|
|
5b1e59a48c | ||
|
|
7b27cad1ba | ||
|
|
1b083d63ab | ||
|
|
23f2b47531 | ||
|
|
194288c00e | ||
|
|
f9c8ed0dc3 | ||
|
|
88def9b71b | ||
|
|
f818f44693 | ||
|
|
8a395fdb4a | ||
|
|
c0588926b8 | ||
|
|
f1b7ecb2a2 | ||
|
|
4bcf157d88 | ||
|
|
2f7da03cce | ||
|
|
f1c995dcb8 | ||
|
|
9aec58c6b8 | ||
|
|
46aaaa9b70 | ||
|
|
46543d6323 | ||
|
|
a585119a67 | ||
|
|
8cc72368ca | ||
|
|
92e57ee06c | ||
|
|
c737a19d9f | ||
|
|
708a97d773 | ||
|
|
b95a90dbd6 | ||
|
|
a2d1ee08d4 | ||
|
|
7e64dc380f | ||
|
|
046cb6a564 | ||
|
|
644ce9edab | ||
|
|
059b601b13 | ||
|
|
d59999f510 | ||
|
|
c5d31e7527 | ||
|
|
c121e38da6 | ||
|
|
b16bc3d2e3 | ||
|
|
c732abbda2 | ||
|
|
61d681a7c8 | ||
|
|
7828bc09cf | ||
|
|
36d330fea0 | ||
|
|
4d46589d39 | ||
|
|
93f57edd3a | ||
|
|
8ec8ae0587 | ||
|
|
ce94e636bb | ||
|
|
21c7378b61 | ||
|
|
75a9845d20 | ||
|
|
d638f6e411 | ||
|
|
81d0a64d46 | ||
|
|
f76739cb1b | ||
|
|
7f992fd321 | ||
|
|
e428d11add | ||
|
|
0ed5b75a14 | ||
|
|
b1b4adec74 | ||
|
|
1934cc2e62 | ||
|
|
ae8cf8c35e | ||
|
|
f5878eafb9 | ||
|
|
27fe4f7062 | ||
|
|
7ad8b26297 | ||
|
|
1a383b7d90 | ||
|
|
445946792e | ||
|
|
de82c7d5ac | ||
|
|
07f0d561dc | ||
|
|
be379f3dac | ||
|
|
1bf904fe60 | ||
|
|
c6faf005cb | ||
|
|
b534b58542 | ||
|
|
920711533e | ||
|
|
194110433e | ||
|
|
7926396d2a | ||
|
|
797522e8ca | ||
|
|
64b5d1a269 | ||
|
|
d00d3802c9 | ||
|
|
46fff13341 | ||
|
|
be3374a3ef | ||
|
|
264ac0b017 | ||
|
|
17033b3c6c | ||
|
|
c4ea122d66 | ||
|
|
90185dc6b3 | ||
|
|
377b030d88 | ||
|
|
437bd87d7c | ||
|
|
73a7916ce3 | ||
|
|
f947fa86e3 | ||
|
|
7219efbdb7 | ||
|
|
b7435b9cd1 | ||
|
|
207ab5a0d1 | ||
|
|
70aa0ef85d | ||
|
|
dfbe231a51 | ||
|
|
cce35da366 | ||
|
|
1a612bcae9 | ||
|
|
d5b9e003fe | ||
|
|
e19c474a92 | ||
|
|
8274798499 | ||
|
|
9f68a32934 | ||
|
|
5c688daff1 | ||
|
|
741fb1181f | ||
|
|
fd1f05c8e0 | ||
|
|
708cbf937f | ||
|
|
32213cad01 | ||
|
|
9320a6e115 | ||
|
|
0f16c0f4cf | ||
|
|
30464396d9 | ||
|
|
64066c4ea8 | ||
|
|
7e97787d9d | ||
|
|
40f2dd8c6c | ||
|
|
4dd364e1c3 | ||
|
|
03f2a35b31 | ||
|
|
73bd98df57 | ||
|
|
bcf1fc658d | ||
|
|
863cbe512d | ||
|
|
d871e9aee7 | ||
|
|
70ef61ac6d | ||
|
|
a4a140bfad | ||
|
|
d2d91e713a | ||
|
|
d9bb1ceaec | ||
|
|
5fe8903fd2 | ||
|
|
8509101b83 | ||
|
|
357849c348 | ||
|
|
0f1b4e06f5 | ||
|
|
8e041420cd | ||
|
|
9211d22b2b | ||
|
|
f5246eb167 | ||
|
|
e436b2d720 | ||
|
|
51f4e9c0d3 | ||
|
|
8c3371c4ac | ||
|
|
6ff0fc6d83 | ||
|
|
9347a70425 | ||
|
|
91957f0848 | ||
|
|
62105bb353 | ||
|
|
e03f684508 | ||
|
|
2f41ae24f8 | ||
|
|
4ad551be9a | ||
|
|
bd640ae2c5 | ||
|
|
2cfc882c62 | ||
|
|
21ece2d76d | ||
|
|
d055d7f496 | ||
|
|
b1cfb1afe4 | ||
|
|
2f215356d6 | ||
|
|
e07c79259b | ||
|
|
59085f072a | ||
|
|
474d6db42f | ||
|
|
a95710ed0c | ||
|
|
51d7724255 | ||
|
|
276e7629bd | ||
|
|
69606a45e0 | ||
|
|
7f65ffcb15 | ||
|
|
4f5f6761f3 | ||
|
|
f543dbb42f | ||
|
|
5917a42997 | ||
|
|
fbe1664214 | ||
|
|
d09bb13cb6 | ||
|
|
31c323c097 | ||
|
|
8f09aadfdf | ||
|
|
20b4e8c779 | ||
|
|
402a0108ae | ||
|
|
9de4a8efcf | ||
|
|
077fa2e6b9 | ||
|
|
2ae9316f48 | ||
|
|
9b5a90e3b9 | ||
|
|
483942dc41 | ||
|
|
2ddda6457f | ||
|
|
681e695170 | ||
|
|
a043664dc4 | ||
|
|
e940f99646 | ||
|
|
22073042a9 | ||
|
|
2634cc408a | ||
|
|
36446bcbc2 | ||
|
|
b371ec5cf6 | ||
|
|
18f4afb388 | ||
|
|
77dcbe95c0 | ||
|
|
061b749041 | ||
|
|
5b0c3951f6 | ||
|
|
f2394b5a8d | ||
|
|
fe7b884cc9 | ||
|
|
5c1b635229 | ||
|
|
63410491b7 | ||
|
|
26e0a4bbde | ||
|
|
c356e56522 | ||
|
|
fd26bbbd0b | ||
|
|
7aa55371b5 | ||
|
|
ba06533c3e | ||
|
|
d66d66e74b | ||
|
|
d6b5f3efe6 | ||
|
|
b5a431624b | ||
|
|
8e7284de5a | ||
|
|
b2d38cd31c | ||
|
|
a15fed35b7 | ||
|
|
eee6b0059c | ||
|
|
530b4f3bee | ||
|
|
bac1c223de | ||
|
|
57f7582b4d | ||
|
|
5afe819ebd | ||
|
|
59568f5311 | ||
|
|
822706367b | ||
|
|
f8e9fafda3 | ||
|
|
c2bb9db012 | ||
|
|
e4e7d7fbfc | ||
|
|
035e4cf90a | ||
|
|
18fff4a3f5 | ||
|
|
675b6dc305 | ||
|
|
4071c78b2b | ||
|
|
2cb32a683e | ||
|
|
b6dc9c004b | ||
|
|
4ea0c707c1 | ||
|
|
2fbcb5c6d8 | ||
|
|
a4d60d9750 | ||
|
|
d3925890b1 | ||
|
|
8c6c144f28 | ||
|
|
db8c24cc7b | ||
|
|
ecbbb8426f | ||
|
|
0752879fc8 | ||
|
|
3f2a04b25b | ||
|
|
aa15e7916e | ||
|
|
7b09623fa8 | ||
|
|
2f45b8b7f5 | ||
|
|
5ffa2a30be | ||
|
|
b102ae141a | ||
|
|
845abcdd77 | ||
|
|
805db7ca50 | ||
|
|
bd3d0c330f | ||
|
|
0060df9877 | ||
|
|
cd66e203bd | ||
|
|
240f99478a | ||
|
|
41534c73f0 | ||
|
|
6139a69fa8 | ||
|
|
3cca312e61 | ||
|
|
7e312797ec | ||
|
|
fe44fa648a | ||
|
|
3249030257 | ||
|
|
8f98c20c51 | ||
|
|
1c76d5d096 | ||
|
|
35f1e28809 | ||
|
|
20999979de | ||
|
|
c6706a86f1 | ||
|
|
b4b1866286 | ||
|
|
28eb9b4c29 | ||
|
|
0a9accccc1 | ||
|
|
c3d220175f | ||
|
|
095c90ad22 | ||
|
|
a77bfecb02 | ||
|
|
72027b5b3c | ||
|
|
e5503c56ad | ||
|
|
ee7b225272 | ||
|
|
03d37725a9 | ||
|
|
29d1cbb673 | ||
|
|
e81278b800 | ||
|
|
e5482a5725 | ||
|
|
8464be691e | ||
|
|
ed9937bbd8 | ||
|
|
b2a4d4a018 | ||
|
|
74aaf4f75b | ||
|
|
2945f9daa9 | ||
|
|
3b496ab3d8 | ||
|
|
e1f30aeff9 | ||
|
|
a92e73231d | ||
|
|
8d91115623 | ||
|
|
9af8d6912a | ||
|
|
fe43fb47e1 | ||
|
|
ca3a80fbe1 | ||
|
|
f0747e76da | ||
|
|
7416d6ea71 | ||
|
|
ea7cbc781e | ||
|
|
3568fb9f93 | ||
|
|
43b7ce4f6d | ||
|
|
baa38d6266 | ||
|
|
1677960caa | ||
|
|
0fab573c98 | ||
|
|
04a8e5b888 | ||
|
|
6284e2011c | ||
|
|
a97c93abe4 | ||
|
|
664816383a | ||
|
|
fc4cb1654c | ||
|
|
f1fa915985 | ||
|
|
11482a75a1 | ||
|
|
e983d35c25 | ||
|
|
85c4f753ad | ||
|
|
1847ce3f3d | ||
|
|
83c27cc7b1 | ||
|
|
3e8f96a463 | ||
|
|
69e4f16b13 | ||
|
|
918c3fb260 | ||
|
|
54ee44839c | ||
|
|
8362aa9d66 | ||
|
|
2a6ff16819 | ||
|
|
47ad73cc89 | ||
|
|
9687f71a17 | ||
|
|
ed684be18d | ||
|
|
5aef725c13 | ||
|
|
d00550c45f | ||
|
|
9ce8d78835 | ||
|
|
29016822fd | ||
|
|
bb50d7edb4 | ||
|
|
d43d6f2b13 | ||
|
|
636dc27ead | ||
|
|
a18f535f21 | ||
|
|
6994d4a712 | ||
|
|
c9d0ae7cf3 | ||
|
|
9edc25999e | ||
|
|
53c130b704 | ||
|
|
e4e174981d | ||
|
|
584a52ac21 | ||
|
|
f9b5767dae | ||
|
|
3179829fa5 | ||
|
|
187d1b853d | ||
|
|
8d2e5f0bda | ||
|
|
7def6663bd | ||
|
|
a13d19c582 | ||
|
|
1837f83282 | ||
|
|
b14cfd6c64 | ||
|
|
963c51f473 | ||
|
|
1f77b75e14 | ||
|
|
e5f3acd139 | ||
|
|
c8365b3b7e | ||
|
|
29c671ce46 | ||
|
|
38ac9d2ecf | ||
|
|
3573d93855 | ||
|
|
3cc2cda026 | ||
|
|
7d10986f10 | ||
|
|
8c6a6604ce | ||
|
|
7170280401 | ||
|
|
babecb6d49 | ||
|
|
9770802901 | ||
|
|
4c1e817b38 | ||
|
|
52b329be4e | ||
|
|
1d50d62a79 | ||
|
|
07502c9804 | ||
|
|
59e0e49822 | ||
|
|
05170d78be | ||
|
|
88c83277c6 | ||
|
|
d0734b105b | ||
|
|
4860dc148c | ||
|
|
ee468be696 | ||
|
|
7f539c951a | ||
|
|
e495ae9030 | ||
|
|
ccb6b3c64b | ||
|
|
85594cc92e | ||
|
|
0b72612cd2 | ||
|
|
dd086c7830 | ||
|
|
6a601ceb97 | ||
|
|
8236534e3c | ||
|
|
0fef147713 | ||
|
|
0198296ced | ||
|
|
37726a02af | ||
|
|
a9c135488e | ||
|
|
72f5c9b62d | ||
|
|
8d0f50a6fd | ||
|
|
6c353e8b8f | ||
|
|
512d9822f0 | ||
|
|
d003ca46c7 | ||
|
|
7587dc350e | ||
|
|
28664fedb2 | ||
|
|
ef20f05221 | ||
|
|
cabf5d004d | ||
|
|
d551da26e5 | ||
|
|
893357f01e | ||
|
|
fc7fa4b6c5 | ||
|
|
fb75db2f1f | ||
|
|
7c20522a30 | ||
|
|
c09884c686 | ||
|
|
9273782093 | ||
|
|
44ffe29c10 | ||
|
|
e619493ece | ||
|
|
1449c8b887 | ||
|
|
6b06a23102 | ||
|
|
b55a93a3e1 | ||
|
|
f5f43e6d1b | ||
|
|
1e03a9440b | ||
|
|
9a59512f75 | ||
|
|
35150caea4 | ||
|
|
f01da8fee4 | ||
|
|
434c08a357 | ||
|
|
bd9c5b6995 | ||
|
|
b941d270ce | ||
|
|
9406961125 | ||
|
|
0d391b66a3 | ||
|
|
a11e07e250 | ||
|
|
d266dad1f4 | ||
|
|
331b700d1b | ||
|
|
2163fde0a4 | ||
|
|
24a2aaef4b | ||
|
|
042cf517b2 | ||
|
|
b97027ac9a | ||
|
|
4ea3f82e50 | ||
|
|
38c4111e6c | ||
|
|
338341add8 | ||
|
|
93bb679f9d | ||
|
|
40d859354f | ||
|
|
9e7c8df384 | ||
|
|
f088dd7e00 | ||
|
|
10c4e4f63f | ||
|
|
962325cc40 | ||
|
|
a9c33abfa5 | ||
|
|
d835c19fce | ||
|
|
1f1384afc6 | ||
|
|
9d4b55be19 | ||
|
|
c549ab907a | ||
|
|
9c0d14bb60 | ||
|
|
a822d942cd | ||
|
|
3a64a01f91 | ||
|
|
6ebb6bc7ee | ||
|
|
be95dfdd0e | ||
|
|
88890fa7c2 | ||
|
|
f8930b9cbc | ||
|
|
c10227a766 | ||
|
|
7e7e462de1 | ||
|
|
a93e1ceac8 | ||
|
|
7f8469b66a | ||
|
|
cf568487c8 | ||
|
|
4c74a2dd3a | ||
|
|
a70452219b | ||
|
|
47ea2d5fb4 | ||
|
|
16540e35f1 | ||
|
|
3bfb3a9fe2 | ||
|
|
f9517dcf24 | ||
|
|
7878b22b09 | ||
|
|
e6d7e4e309 | ||
|
|
40d0da404e | ||
|
|
8675bd125a | ||
|
|
4e5dfa5d33 | ||
|
|
89f5b77550 | ||
|
|
5b15cd9163 | ||
|
|
dbf1383a38 | ||
|
|
46b367e74b | ||
|
|
3da390682d | ||
|
|
5349a3b6d1 | ||
|
|
f2ab5f61f5 | ||
|
|
e910a03af4 | ||
|
|
4d0dc8b7c8 | ||
|
|
e0dc1ef5bd | ||
|
|
f24f5e98dd | ||
|
|
6647cfc228 | ||
|
|
ddcd99a1ce | ||
|
|
55c07f23b0 | ||
|
|
8192572e23 | ||
|
|
0cdf1b07e9 | ||
|
|
8653bae6ac | ||
|
|
fc1aa7d3b4 | ||
|
|
8bdcd6d576 | ||
|
|
d3925fe578 | ||
|
|
d3a5cca1bc | ||
|
|
f3b553712a | ||
|
|
839651fadb | ||
|
|
6a50fceea4 | ||
|
|
7efe108686 | ||
|
|
c313af1b24 | ||
|
|
1388b1b58b | ||
|
|
551db20657 | ||
|
|
bc71e956a5 | ||
|
|
5af6974796 | ||
|
|
a712036b56 | ||
|
|
37b96c192b | ||
|
|
8cbdf0f907 | ||
|
|
ef5c630d3a | ||
|
|
6eea89f4c0 | ||
|
|
dbbb2d9877 | ||
|
|
c483e16d72 | ||
|
|
40a5bad968 | ||
|
|
1421bce371 | ||
|
|
5e7dd6d51b | ||
|
|
71f4e72b22 | ||
|
|
b24e71b232 | ||
|
|
f60c090e4c | ||
|
|
50334e6bac | ||
|
|
963a9429dd | ||
|
|
2eda8d64c7 | ||
|
|
9b96c62e46 | ||
|
|
378b7467a4 | ||
|
|
c0d98ecd4b | ||
|
|
b44644b6bf | ||
|
|
7bfb42946e | ||
|
|
e8907acd28 | ||
|
|
d6ef3b1e02 | ||
|
|
a39a7a7a03 | ||
|
|
923be102b3 | ||
|
|
7531e218c1 | ||
|
|
3cc1fecb53 | ||
|
|
3c89847489 | ||
|
|
fb837ca66d | ||
|
|
2ec1ffdc11 | ||
|
|
56509a61b9 | ||
|
|
f37f8ac815 | ||
|
|
231b5feb23 | ||
|
|
81fa063338 | ||
|
|
07b4a4dbca | ||
|
|
fd6daaa73b | ||
|
|
6496d185ab | ||
|
|
7499c1f969 | ||
|
|
9c5db1057d | ||
|
|
30d24a3c1c | ||
|
|
ad4af06802 | ||
|
|
64b98a9b61 | ||
|
|
4fdcb136bc | ||
|
|
0e398f5802 | ||
|
|
b9869eadc3 | ||
|
|
936c5a8a7a | ||
|
|
10f19fade1 | ||
|
|
c01594c2a4 | ||
|
|
ccbd7bb785 | ||
|
|
6eb49dee5d | ||
|
|
6a4bf9fcff | ||
|
|
9ada89d51a | ||
|
|
524fddedb4 | ||
|
|
c4a7711e02 | ||
|
|
2e20fc413c | ||
|
|
498482d0f6 | ||
|
|
4bd5b6a4d6 | ||
|
|
2e764cb22d | ||
|
|
c8914679b7 | ||
|
|
e25ac0d587 | ||
|
|
41374aabcb | ||
|
|
f60d846eb3 | ||
|
|
96e54ab326 | ||
|
|
40a3feaad0 | ||
|
|
2611931f82 | ||
|
|
ec39d10695 | ||
|
|
30d8ed411a | ||
|
|
64a832467e | ||
|
|
9c5321c538 | ||
|
|
aba123dae0 | ||
|
|
5aca58ad2a | ||
|
|
a34418d724 | ||
|
|
5f4262921a | ||
|
|
6fcd05b855 | ||
|
|
7746a2b3cd | ||
|
|
2749dcd128 | ||
|
|
92343d91d6 | ||
|
|
ce7b48143a | ||
|
|
e30e98a496 | ||
|
|
4798bd9d33 | ||
|
|
38d6cb97ad | ||
|
|
3be111a160 | ||
|
|
97a66b73cf | ||
|
|
50fc3ec974 | ||
|
|
ee8d99b955 | ||
|
|
5bf7c4d241 | ||
|
|
c2b5f21832 | ||
|
|
bdac9b7241 | ||
|
|
ec6eae9537 |
4
.gitattributes
vendored
Normal file
4
.gitattributes
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
CHANGELOG.md merge=union
|
||||||
|
README.md merge=union
|
||||||
|
plugins/inputs/all/all.go merge=union
|
||||||
|
plugins/outputs/all/all.go merge=union
|
||||||
42
.github/ISSUE_TEMPLATE.md
vendored
Normal file
42
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
## Directions
|
||||||
|
|
||||||
|
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||||
|
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
||||||
|
|
||||||
|
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||||
|
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||||
|
Erase the other section and everything on and above this line.
|
||||||
|
|
||||||
|
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||||
|
|
||||||
|
## Bug report
|
||||||
|
|
||||||
|
### System info:
|
||||||
|
|
||||||
|
[Include Telegraf version, operating system name, and other relevant details]
|
||||||
|
|
||||||
|
### Steps to reproduce:
|
||||||
|
|
||||||
|
1. ...
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
### Expected behavior:
|
||||||
|
|
||||||
|
### Actual behavior:
|
||||||
|
|
||||||
|
### Additional info:
|
||||||
|
|
||||||
|
[Include gist of relevant config, logs, etc.]
|
||||||
|
|
||||||
|
|
||||||
|
## Feature Request
|
||||||
|
|
||||||
|
Opening a feature request kicks off a discussion.
|
||||||
|
|
||||||
|
### Proposal:
|
||||||
|
|
||||||
|
### Current behavior:
|
||||||
|
|
||||||
|
### Desired behavior:
|
||||||
|
|
||||||
|
### Use case: [Why is this important (helps with prioritizing requests)]
|
||||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
### Required for all PRs:
|
||||||
|
|
||||||
|
- [ ] CHANGELOG.md updated
|
||||||
|
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
|
||||||
|
- [ ] README.md updated (if adding a new plugin)
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,4 +1,6 @@
|
|||||||
tivan
|
tivan
|
||||||
.vagrant
|
.vagrant
|
||||||
telegraf
|
/telegraf
|
||||||
.idea
|
.idea
|
||||||
|
*~
|
||||||
|
*#
|
||||||
|
|||||||
775
CHANGELOG.md
775
CHANGELOG.md
@@ -1,29 +1,512 @@
|
|||||||
|
## v1.0 beta 1 [2016-06-07]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
- `flush_jitter` behavior has been changed. The random jitter will now be
|
||||||
|
evaluated at every flush interval, rather than once at startup. This makes it
|
||||||
|
consistent with the behavior of `collection_jitter`.
|
||||||
|
|
||||||
|
- All AWS plugins now utilize a standard mechanism for evaluating credentials.
|
||||||
|
This allows all AWS plugins to support environment variables, shared credential
|
||||||
|
files & profiles, and role assumptions. See the specific plugin README for
|
||||||
|
details.
|
||||||
|
|
||||||
|
- The AWS CloudWatch input plugin can now declare a wildcard value for a metric
|
||||||
|
dimension. This causes the plugin to read all metrics that contain the specified
|
||||||
|
dimension key regardless of value. This is used to export collections of metrics
|
||||||
|
without having to know the dimension values ahead of time.
|
||||||
|
|
||||||
|
- The AWS CloudWatch input plugin can now be configured with the `cache_ttl`
|
||||||
|
attribute. This configures the TTL of the internal metric cache. This is useful
|
||||||
|
in conjunction with wildcard dimension values as it will control the amount of
|
||||||
|
time before a new metric is included by the plugin.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging.
|
||||||
|
- [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash
|
||||||
|
- [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy!
|
||||||
|
- [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm!
|
||||||
|
- [#1247](https://github.com/influxdata/telegraf/pull/1247): rollbar input plugin. Thanks @francois2metz and @cduez!
|
||||||
|
- [#1208](https://github.com/influxdata/telegraf/pull/1208): Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. Thanks @johnrengelman!
|
||||||
|
- [#1264](https://github.com/influxdata/telegraf/pull/1264): Add SSL config options to http_response plugin.
|
||||||
|
- [#1272](https://github.com/influxdata/telegraf/pull/1272): graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser.
|
||||||
|
- [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren!
|
||||||
|
- [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats.
|
||||||
|
- [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration.
|
||||||
|
- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified
|
||||||
|
- [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second.
|
||||||
|
- [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified
|
||||||
|
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
|
||||||
|
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1252](https://github.com/influxdata/telegraf/pull/1252) & [#1279](https://github.com/influxdata/telegraf/pull/1279): Fix systemd service. Thanks @zbindenren & @PierreF!
|
||||||
|
- [#1221](https://github.com/influxdata/telegraf/pull/1221): Fix influxdb n_shards counter.
|
||||||
|
- [#1258](https://github.com/influxdata/telegraf/pull/1258): Fix potential kernel plugin integer parse error.
|
||||||
|
- [#1268](https://github.com/influxdata/telegraf/pull/1268): Fix potential influxdb input type assertion panic.
|
||||||
|
- [#1283](https://github.com/influxdata/telegraf/pull/1283): Still send processes metrics if a process exited during metric collection.
|
||||||
|
- [#1297](https://github.com/influxdata/telegraf/issues/1297): disk plugin panic when usage grab fails.
|
||||||
|
- [#1316](https://github.com/influxdata/telegraf/pull/1316): Removed leaked "database" tag on redis metrics. Thanks @PierreF!
|
||||||
|
- [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory.
|
||||||
|
- [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function.
|
||||||
|
|
||||||
|
## v0.13.1 [2016-05-24]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
- net_response and http_response plugins timeouts will now accept duration
|
||||||
|
strings, ie, "2s" or "500ms".
|
||||||
|
- Input plugin Gathers will no longer be logged by default, but a Gather for
|
||||||
|
_each_ plugin will be logged in Debug mode.
|
||||||
|
- Debug mode will no longer print every point added to the accumulator. This
|
||||||
|
functionality can be duplicated using the `file` output plugin and printing
|
||||||
|
to "stdout".
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1173](https://github.com/influxdata/telegraf/pull/1173): varnish input plugin. Thanks @sfox-xmatters!
|
||||||
|
- [#1138](https://github.com/influxdata/telegraf/pull/1138): nstat input plugin. Thanks @Maksadbek!
|
||||||
|
- [#1139](https://github.com/influxdata/telegraf/pull/1139): instrumental output plugin. Thanks @jasonroelofs!
|
||||||
|
- [#1172](https://github.com/influxdata/telegraf/pull/1172): Ceph storage stats. Thanks @robinpercy!
|
||||||
|
- [#1233](https://github.com/influxdata/telegraf/pull/1233): Updated golint gopsutil dependency.
|
||||||
|
- [#1238](https://github.com/influxdata/telegraf/pull/1238): chrony input plugin. Thanks @zbindenren!
|
||||||
|
- [#479](https://github.com/influxdata/telegraf/issues/479): per-plugin execution time added to debug output.
|
||||||
|
- [#1249](https://github.com/influxdata/telegraf/issues/1249): influxdb output: added write_consistency argument.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1195](https://github.com/influxdata/telegraf/pull/1195): Docker panic on timeout. Thanks @zstyblik!
|
||||||
|
- [#1211](https://github.com/influxdata/telegraf/pull/1211): mongodb input. Fix possible panic. Thanks @kols!
|
||||||
|
- [#1215](https://github.com/influxdata/telegraf/pull/1215): Fix for possible gopsutil-dependent plugin hangs.
|
||||||
|
- [#1228](https://github.com/influxdata/telegraf/pull/1228): Fix service plugin host tag overwrite.
|
||||||
|
- [#1198](https://github.com/influxdata/telegraf/pull/1198): http_response: override request Host header properly
|
||||||
|
- [#1230](https://github.com/influxdata/telegraf/issues/1230): Fix Telegraf process hangup due to a single plugin hanging.
|
||||||
|
- [#1214](https://github.com/influxdata/telegraf/issues/1214): Use TCP timeout argument in net_response plugin.
|
||||||
|
- [#1243](https://github.com/influxdata/telegraf/pull/1243): Logfile not created on systemd.
|
||||||
|
|
||||||
|
## v0.13 [2016-05-11]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
- **Breaking change** in jolokia plugin. See
|
||||||
|
https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md
|
||||||
|
for updated configuration. The plugin will now support proxy mode and will make
|
||||||
|
POST requests.
|
||||||
|
|
||||||
|
- New [agent] configuration option: `metric_batch_size`. This option tells
|
||||||
|
telegraf the maximum batch size to allow to accumulate before sending a flush
|
||||||
|
to the configured outputs. `metric_buffer_limit` now refers to the absolute
|
||||||
|
maximum number of metrics that will accumulate before metrics are dropped.
|
||||||
|
|
||||||
|
- There is no longer an option to
|
||||||
|
`flush_buffer_when_full`, this is now the default and only behavior of telegraf.
|
||||||
|
|
||||||
|
- **Breaking Change**: docker plugin tags. The cont_id tag no longer exists, it
|
||||||
|
will now be a field, and be called container_id. Additionally, cont_image and
|
||||||
|
cont_name are being renamed to container_image and container_name.
|
||||||
|
|
||||||
|
- **Breaking Change**: docker plugin measurements. The `docker_cpu`, `docker_mem`,
|
||||||
|
`docker_blkio` and `docker_net` measurements are being renamed to
|
||||||
|
`docker_container_cpu`, `docker_container_mem`, `docker_container_blkio` and
|
||||||
|
`docker_container_net`. Why? Because these metrics are
|
||||||
|
specifically tracking per-container stats. The problem with per-container stats,
|
||||||
|
in some use-cases, is that if containers are short-lived AND names are not
|
||||||
|
kept consistent, then the series cardinality will balloon very quickly.
|
||||||
|
So adding "container" to each metric will:
|
||||||
|
(1) make it more clear that these metrics are per-container, and
|
||||||
|
(2) allow users to easily drop per-container metrics if cardinality is an
|
||||||
|
issue (`namedrop = ["docker_container_*"]`)
|
||||||
|
|
||||||
|
- `tagexclude` and `taginclude` are now available, which can be used to remove
|
||||||
|
tags from measurements on inputs and outputs. See
|
||||||
|
[the configuration doc](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md)
|
||||||
|
for more details.
|
||||||
|
|
||||||
|
- **Measurement filtering:** All measurement filters now match based on glob
|
||||||
|
only. Previously there was an undocumented behavior where filters would match
|
||||||
|
based on _prefix_ in addition to globs. This means that a filter like
|
||||||
|
`fielddrop = ["time_"]` will need to be changed to `fielddrop = ["time_*"]`
|
||||||
|
|
||||||
|
- **datadog**: measurement and field names will no longer have `_` replaced by `.`
|
||||||
|
|
||||||
|
- The following plugins have changed their tags to _not_ overwrite the host tag:
|
||||||
|
- cassandra: `host -> cassandra_host`
|
||||||
|
- disque: `host -> disque_host`
|
||||||
|
- rethinkdb: `host -> rethinkdb_host`
|
||||||
|
|
||||||
|
- **Breaking Change**: The `win_perf_counters` input has been changed to
|
||||||
|
sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as
|
||||||
|
spaces with underscores. This is needed because Graphite doesn't like slashes
|
||||||
|
and spaces, and was failing to accept metrics that had them.
|
||||||
|
The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
|
||||||
|
|
||||||
|
- **Breaking Change**: snmp plugin. The `host` tag of the snmp plugin has been
|
||||||
|
changed to the `snmp_host` tag.
|
||||||
|
|
||||||
|
- The `disk` input plugin can now be configured with the `HOST_MOUNT_PREFIX` environment variable.
|
||||||
|
This value is prepended to any mountpaths discovered before retrieving stats.
|
||||||
|
It is not included on the report path. This is necessary for reporting host disk stats when running from within a container.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1031](https://github.com/influxdata/telegraf/pull/1031): Jolokia plugin proxy mode. Thanks @saiello!
|
||||||
|
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
|
||||||
|
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
|
||||||
|
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
|
||||||
|
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
|
||||||
|
- [#1056](https://github.com/influxdata/telegraf/pull/1056): Don't allow inputs to overwrite host tags.
|
||||||
|
- [#1035](https://github.com/influxdata/telegraf/issues/1035): Add `user`, `exe`, `pidfile` tags to procstat plugin.
|
||||||
|
- [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin.
|
||||||
|
- [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat.
|
||||||
|
- [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin
|
||||||
|
- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman!
|
||||||
|
- [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers.
|
||||||
|
- [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements.
|
||||||
|
- [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja!
|
||||||
|
- [#1122](https://github.com/influxdata/telegraf/pull/1122): Support setting config path through env variable and default paths.
|
||||||
|
- [#1128](https://github.com/influxdata/telegraf/pull/1128): MongoDB jumbo chunks metric for MongoDB input plugin
|
||||||
|
- [#1146](https://github.com/influxdata/telegraf/pull/1146): HAProxy socket support. Thanks weshmashian!
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1050](https://github.com/influxdata/telegraf/issues/1050): jolokia plugin - do not overwrite host tag. Thanks @saiello!
|
||||||
|
- [#921](https://github.com/influxdata/telegraf/pull/921): mqtt_consumer stops gathering metrics. Thanks @chaton78!
|
||||||
|
- [#1013](https://github.com/influxdata/telegraf/pull/1013): Close dead riemann output connections. Thanks @echupriyanov!
|
||||||
|
- [#1012](https://github.com/influxdata/telegraf/pull/1012): Set default tags in test accumulator.
|
||||||
|
- [#1024](https://github.com/influxdata/telegraf/issues/1024): Don't replace `.` with `_` in datadog output.
|
||||||
|
- [#1058](https://github.com/influxdata/telegraf/issues/1058): Fix possible leaky TCP connections in influxdb output.
|
||||||
|
- [#1044](https://github.com/influxdata/telegraf/pull/1044): Fix SNMP OID possible collisions. Thanks @relip
|
||||||
|
- [#1022](https://github.com/influxdata/telegraf/issues/1022): Dont error deb/rpm install on systemd errors.
|
||||||
|
- [#1078](https://github.com/influxdata/telegraf/issues/1078): Use default AWS credential chain.
|
||||||
|
- [#1070](https://github.com/influxdata/telegraf/issues/1070): SQL Server input. Fix datatype conversion.
|
||||||
|
- [#1089](https://github.com/influxdata/telegraf/issues/1089): Fix leaky TCP connections in phpfpm plugin.
|
||||||
|
- [#914](https://github.com/influxdata/telegraf/issues/914): Telegraf can drop metrics on full buffers.
|
||||||
|
- [#1098](https://github.com/influxdata/telegraf/issues/1098): Sanitize invalid OpenTSDB characters.
|
||||||
|
- [#1110](https://github.com/influxdata/telegraf/pull/1110): Sanitize * to - in graphite serializer. Thanks @goodeggs!
|
||||||
|
- [#1118](https://github.com/influxdata/telegraf/pull/1118): Sanitize Counter names for `win_perf_counters` input.
|
||||||
|
- [#1125](https://github.com/influxdata/telegraf/pull/1125): Wrap all exec command runners with a timeout, so hung os processes don't halt Telegraf.
|
||||||
|
- [#1113](https://github.com/influxdata/telegraf/pull/1113): Set MaxRetry and RequiredAcks defaults in Kafka output.
|
||||||
|
- [#1090](https://github.com/influxdata/telegraf/issues/1090): [agent] and [global_tags] config sometimes not getting applied.
|
||||||
|
- [#1133](https://github.com/influxdata/telegraf/issues/1133): Use a timeout for docker list & stat cmds.
|
||||||
|
- [#1052](https://github.com/influxdata/telegraf/issues/1052): Docker panic fix when decode fails.
|
||||||
|
- [#1136](https://github.com/influxdata/telegraf/pull/1136): "DELAYED" Inserts were deprecated in MySQL 5.6.6. Thanks @PierreF
|
||||||
|
|
||||||
|
## v0.12.1 [2016-04-14]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
- Breaking change in the dovecot input plugin. See Features section below.
|
||||||
|
- Graphite output templates are now supported. See
|
||||||
|
https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
|
||||||
|
- Possible breaking change for the librato and graphite outputs. Telegraf will
|
||||||
|
no longer insert field names when the field is simply named `value`. This is
|
||||||
|
because the `value` field is redundant in the graphite/librato context.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra!
|
||||||
|
- [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs.
|
||||||
|
- [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener.
|
||||||
|
- [#992](https://github.com/influxdata/telegraf/pull/992): Refactor allocations in TCP/UDP listeners.
|
||||||
|
- [#935](https://github.com/influxdata/telegraf/pull/935): AWS Cloudwatch input plugin. Thanks @joshhardy & @ljosa!
|
||||||
|
- [#943](https://github.com/influxdata/telegraf/pull/943): http_response input plugin. Thanks @Lswith!
|
||||||
|
- [#939](https://github.com/influxdata/telegraf/pull/939): sysstat input plugin. Thanks @zbindenren!
|
||||||
|
- [#998](https://github.com/influxdata/telegraf/pull/998): **breaking change** enabled global, user and ip queries in dovecot plugin. Thanks @mikif70!
|
||||||
|
- [#1001](https://github.com/influxdata/telegraf/pull/1001): Graphite serializer templates.
|
||||||
|
- [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name)
|
||||||
|
- [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw!
|
||||||
|
- [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj!
|
||||||
|
- [#645](https://github.com/influxdata/telegraf/issues/645): docker plugin i/o error on closed pipe. Thanks @tripledes!
|
||||||
|
|
||||||
|
## v0.12.0 [2016-04-05]
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file.
|
||||||
|
- [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented).
|
||||||
|
- [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension
|
||||||
|
- [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini!
|
||||||
|
- [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert!
|
||||||
|
- [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo!
|
||||||
|
- [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey!
|
||||||
|
- [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues
|
||||||
|
- [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type.
|
||||||
|
- [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama!
|
||||||
|
- [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa!
|
||||||
|
- [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty!
|
||||||
|
- [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert!
|
||||||
|
- [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent.
|
||||||
|
- [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config.
|
||||||
|
- [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug!
|
||||||
|
- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere!
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided.
|
||||||
|
- [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write.
|
||||||
|
- [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name.
|
||||||
|
- [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue.
|
||||||
|
- [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key.
|
||||||
|
- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic.
|
||||||
|
- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert!
|
||||||
|
- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk!
|
||||||
|
- [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout.
|
||||||
|
- [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF!
|
||||||
|
|
||||||
|
## v0.11.1 [2016-03-17]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
- Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref!
|
||||||
|
- [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou!
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix
|
||||||
|
- [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic
|
||||||
|
|
||||||
|
## v0.11.0 [2016-03-15]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies
|
||||||
|
- [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF!
|
||||||
|
- [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide!
|
||||||
|
- [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration.
|
||||||
|
- [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert!
|
||||||
|
- [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert!
|
||||||
|
- [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug!
|
||||||
|
- [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener.
|
||||||
|
- [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions.
|
||||||
|
- [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert!
|
||||||
|
- [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998!
|
||||||
|
- [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert!
|
||||||
|
- [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin.
|
||||||
|
- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.)
|
||||||
|
- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":"
|
||||||
|
- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty!
|
||||||
|
- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert!
|
||||||
|
- [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78!
|
||||||
|
- [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi!
|
||||||
|
- [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert!
|
||||||
|
- [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package
|
||||||
|
- [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory
|
||||||
|
- [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable.
|
||||||
|
- [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag.
|
||||||
|
- [#821](https://github.com/influxdata/telegraf/issues/821): Remove postgres password from server tag. Thanks @menardorama!
|
||||||
|
|
||||||
|
## v0.10.4.1
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
- Bug in the build script broke deb and rpm packages.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken
|
||||||
|
- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken
|
||||||
|
|
||||||
|
## v0.10.4 [2016-02-24]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
- The pass/drop parameters have been renamed to fielddrop/fieldpass parameters,
|
||||||
|
to more accurately indicate their purpose.
|
||||||
|
- There are also now namedrop/namepass parameters for passing/dropping based
|
||||||
|
on the metric _name_.
|
||||||
|
- Experimental windows builds now available.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene!
|
||||||
|
- [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion!
|
||||||
|
- [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel!
|
||||||
|
- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF!
|
||||||
|
- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath!
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode.
|
||||||
|
- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters.
|
||||||
|
|
||||||
|
## v0.10.3 [2016-02-18]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
- Users of the `exec` and `kafka_consumer` (and the new `nats_consumer`
|
||||||
|
and `mqtt_consumer` plugins) can now specify the incoming data
|
||||||
|
format that they would like to parse. Currently supports: "json", "influx", and
|
||||||
|
"graphite"
|
||||||
|
- Users of message broker and file output plugins can now choose what data format
|
||||||
|
they would like to output. Currently supports: "influx" and "graphite"
|
||||||
|
- More info on parsing _incoming_ data formats can be found
|
||||||
|
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md)
|
||||||
|
- More info on serializing _outgoing_ data formats can be found
|
||||||
|
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
|
||||||
|
- Telegraf now has an option `flush_buffer_when_full` that will flush the
|
||||||
|
metric buffer whenever it fills up for each output, rather than dropping
|
||||||
|
points and only flushing on a set time interval. This will default to `true`
|
||||||
|
and is in the `[agent]` config section.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate!
|
||||||
|
- [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs.
|
||||||
|
- [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70!
|
||||||
|
- [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen!
|
||||||
|
- [#676](https://github.com/influxdata/telegraf/pull/676): MQTT consumer input plugin.
|
||||||
|
- [#683](https://github.com/influxdata/telegraf/pull/683): PostGRES input plugin: add pg_stat_bgwriter. Thanks @menardorama!
|
||||||
|
- [#679](https://github.com/influxdata/telegraf/pull/679): File/stdout output plugin.
|
||||||
|
- [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats.
|
||||||
|
- [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei!
|
||||||
|
- [#650](https://github.com/influxdata/telegraf/pull/650): net_response input plugin. Thanks @titilambert!
|
||||||
|
- [#699](https://github.com/influxdata/telegraf/pull/699): Flush based on buffer size rather than time.
|
||||||
|
- [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes!
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux.
|
||||||
|
- [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug.
|
||||||
|
- [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues.
|
||||||
|
- [#394](https://github.com/influxdata/telegraf/issues/394): Support HTTP POST. Thanks @gabelev!
|
||||||
|
- [#715](https://github.com/influxdata/telegraf/pull/715): Fix influxdb precision config panic. Thanks @netixen!
|
||||||
|
|
||||||
|
## v0.10.2 [2016-02-04]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
- Statsd timing measurements are now aggregated into a single measurement with
|
||||||
|
fields.
|
||||||
|
- Graphite output now inserts tags into the bucket in alphabetical order.
|
||||||
|
- Normalized TLS/SSL support for output plugins: MQTT, AMQP, Kafka
|
||||||
|
- `verify_ssl` config option was removed from Kafka because it was actually
|
||||||
|
doing the opposite of what it claimed to do (yikes). It's been replaced by
|
||||||
|
`insecure_skip_verify`
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse!
|
||||||
|
- [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type.
|
||||||
|
- [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch!
|
||||||
|
- [#601](https://github.com/influxdata/telegraf/issues/601): Warn when overwriting cached metrics.
|
||||||
|
- [#614](https://github.com/influxdata/telegraf/pull/614): PowerDNS input plugin. Thanks @Kasen!
|
||||||
|
- [#617](https://github.com/influxdata/telegraf/pull/617): exec plugin: parse influx line protocol in addition to JSON.
|
||||||
|
- [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements.
|
||||||
|
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
|
||||||
|
- [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong.
|
||||||
|
- [#602](https://github.com/influxdata/telegraf/issues/602): Fix statsd field name templating.
|
||||||
|
- [#612](https://github.com/influxdata/telegraf/pull/612): Docker input panic fix if stats received are nil.
|
||||||
|
- [#634](https://github.com/influxdata/telegraf/pull/634): Properly set host headers in httpjson. Thanks @reginaldosousa!
|
||||||
|
|
||||||
|
## v0.10.1 [2016-01-27]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
|
||||||
|
- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
|
||||||
|
defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
|
||||||
|
successful write to that output occurs.
|
||||||
|
- The docker plugin has been significantly overhauled to add more metrics
|
||||||
|
and allow for docker-machine (incl OSX) support.
|
||||||
|
[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
|
||||||
|
for the latest measurements, fields, and tags. There is also now support for
|
||||||
|
specifying a docker endpoint to get metrics from.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
|
||||||
|
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
|
||||||
|
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
|
||||||
|
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
|
||||||
|
- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
|
||||||
|
- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
|
||||||
|
- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
|
||||||
|
- AMQP SSL support. Thanks @ekini!
|
||||||
|
- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
|
||||||
|
- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
|
||||||
|
- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
|
||||||
|
- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
|
||||||
|
- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
|
||||||
|
- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
|
||||||
|
- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
|
||||||
|
- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
|
||||||
|
- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
|
||||||
|
- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
|
||||||
|
- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
|
||||||
|
- [#589](https://github.com/influxdata/telegraf/pull/589): Microsoft SQL Server input plugin. Thanks @zensqlmonitor!
|
||||||
|
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
|
||||||
|
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
|
||||||
|
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
|
||||||
|
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
|
||||||
|
- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
|
||||||
|
- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
|
||||||
|
- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
|
||||||
|
- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
|
||||||
|
- [#585](https://github.com/influxdata/telegraf/pull/585): Log stack trace and continue on Telegraf panic. Thanks @wutaizeng!
|
||||||
|
|
||||||
|
## v0.10.0 [2016-01-12]
|
||||||
|
|
||||||
|
### Release Notes
|
||||||
|
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
|
||||||
|
and configuration files are in `/etc/telegraf`
|
||||||
|
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
|
||||||
|
`plugins` is too generic, as there are now also "output plugins", and will likely
|
||||||
|
be "aggregator plugins" and "filter plugins" in the future. Additionally,
|
||||||
|
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
|
||||||
|
directory.
|
||||||
|
- **breaking change** the `io` plugin has been renamed `diskio`
|
||||||
|
- **breaking change** plugin measurements aggregated into a single measurement.
|
||||||
|
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
|
||||||
|
for configuration.
|
||||||
|
- **breaking change** `twemproxy` plugin: `prefix` option removed.
|
||||||
|
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_`
|
||||||
|
instead of only `cpu_`
|
||||||
|
- **breaking change** some command-line flags have been renamed to separate words.
|
||||||
|
`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`,
|
||||||
|
`-outputfilter` -> `-output-filter`
|
||||||
|
- The prometheus plugin schema has not been changed (measurements have not been
|
||||||
|
aggregated).
|
||||||
|
|
||||||
|
### Packaging change note:
|
||||||
|
|
||||||
|
RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their
|
||||||
|
configurations overwritten by the upgrade. There is a backup stored at
|
||||||
|
/etc/telegraf/telegraf.conf.$(date +%s).backup.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Plugin measurements aggregated into a single measurement.
|
||||||
|
- Added ability to specify per-plugin tags
|
||||||
|
- Added ability to specify per-plugin measurement suffix and prefix.
|
||||||
|
(`name_prefix` and `name_suffix`)
|
||||||
|
- Added ability to override base plugin measurement name. (`name_override`)
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
## v0.2.5 [unreleased]
|
## v0.2.5 [unreleased]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#427](https://github.com/influxdb/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||||
- [#428](https://github.com/influxdb/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||||
- [#449](https://github.com/influxdb/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#430](https://github.com/influxdb/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||||
- [#452](https://github.com/influxdb/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||||
|
|
||||||
## v0.2.4 [2015-12-08]
|
## v0.2.4 [2015-12-08]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#412](https://github.com/influxdb/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||||
- [#410](https://github.com/influxdb/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||||
- [#414](https://github.com/influxdb/telegraf/issues/414): Jolokia plugin auth parameters
|
- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
|
||||||
- [#415](https://github.com/influxdb/telegraf/issues/415): memcached plugin: support unix sockets
|
- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
|
||||||
- [#418](https://github.com/influxdb/telegraf/pull/418): memcached plugin additional unit tests.
|
- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
|
||||||
- [#408](https://github.com/influxdb/telegraf/pull/408): MailChimp plugin.
|
- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
|
||||||
- [#382](https://github.com/influxdb/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||||
- [#401](https://github.com/influxdb/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#405](https://github.com/influxdb/telegraf/issues/405): Prometheus output cardinality issue
|
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
|
||||||
- [#388](https://github.com/influxdb/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||||
|
|
||||||
## v0.2.3 [2015-11-30]
|
## v0.2.3 [2015-11-30]
|
||||||
|
|
||||||
@@ -38,11 +521,11 @@ functional.
|
|||||||
same type can be specified, like this:
|
same type can be specified, like this:
|
||||||
|
|
||||||
```
|
```
|
||||||
[[plugins.cpu]]
|
[[inputs.cpu]]
|
||||||
percpu = false
|
percpu = false
|
||||||
totalcpu = true
|
totalcpu = true
|
||||||
|
|
||||||
[[plugins.cpu]]
|
[[inputs.cpu]]
|
||||||
percpu = true
|
percpu = true
|
||||||
totalcpu = false
|
totalcpu = false
|
||||||
drop = ["cpu_time"]
|
drop = ["cpu_time"]
|
||||||
@@ -52,15 +535,15 @@ same type can be specified, like this:
|
|||||||
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
|
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj!
|
- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||||
- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin.
|
- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
|
||||||
- [#392](https://github.com/influxdb/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||||
- [#383](https://github.com/influxdb/telegraf/pull/383): Specify plugins as a list.
|
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
|
||||||
- [#354](https://github.com/influxdb/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning.
|
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||||
- [#389](https://github.com/influxdb/telegraf/issues/389): NaN value panic
|
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
|
||||||
|
|
||||||
## v0.2.2 [2015-11-18]
|
## v0.2.2 [2015-11-18]
|
||||||
|
|
||||||
@@ -69,7 +552,7 @@ same type can be specified, like this:
|
|||||||
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
|
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in plugins.
|
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
|
||||||
|
|
||||||
## v0.2.1 [2015-11-16]
|
## v0.2.1 [2015-11-16]
|
||||||
|
|
||||||
@@ -86,22 +569,22 @@ changed to just run docker commands in the Makefile. See `make docker-run` and
|
|||||||
same type.
|
same type.
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||||
- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||||
- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||||
- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||||
- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output.
|
- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
|
||||||
- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||||
- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||||
- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output.
|
- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
|
||||||
- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||||
- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||||
- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||||
- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||||
- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||||
|
|
||||||
## v0.2.0 [2015-10-27]
|
## v0.2.0 [2015-10-27]
|
||||||
|
|
||||||
@@ -122,38 +605,38 @@ be controlled via the `round_interval` and `flush_jitter` config options.
|
|||||||
- Telegraf will now retry metric flushes twice
|
- Telegraf will now retry metric flushes twice
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info
|
- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
|
||||||
- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||||
- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||||
- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||||
- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||||
- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||||
- Memory plugin: cached and buffered measurements re-added
|
- Memory plugin: cached and buffered measurements re-added
|
||||||
- Logging: additional logging for each collection interval, track the number
|
- Logging: additional logging for each collection interval, track the number
|
||||||
of metrics collected and from how many plugins.
|
of metrics collected and from how many inputs.
|
||||||
- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||||
- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou!
|
- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||||
- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||||
- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||||
- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||||
- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2.
|
- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
|
||||||
- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||||
- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||||
- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||||
- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals
|
- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
|
||||||
- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes
|
- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
|
||||||
- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||||
- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||||
- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||||
- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||||
- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||||
- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||||
- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings.
|
- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||||
- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags.
|
- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||||
- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||||
|
|
||||||
## v0.1.9 [2015-09-22]
|
## v0.1.9 [2015-09-22]
|
||||||
|
|
||||||
@@ -163,7 +646,7 @@ will still be backwards compatible if only `url` is specified.
|
|||||||
- The -test flag will now output two metric collections
|
- The -test flag will now output two metric collections
|
||||||
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
|
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
|
||||||
allow filtering of output sinks on the command-line using the `-outputfilter`
|
allow filtering of output sinks on the command-line using the `-outputfilter`
|
||||||
flag, much like how the `-filter` flag works for plugins.
|
flag, much like how the `-filter` flag works for inputs.
|
||||||
- Support for filtering on config-file creation -- Telegraf now supports
|
- Support for filtering on config-file creation -- Telegraf now supports
|
||||||
filtering to -sample-config command. You can now run
|
filtering to -sample-config command. You can now run
|
||||||
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
|
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
|
||||||
@@ -179,27 +662,27 @@ have been renamed for consistency. Some measurements have also been removed from
|
|||||||
re-added in a "verbose" mode if there is demand for it.
|
re-added in a "verbose" mode if there is demand for it.
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support
|
- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
|
||||||
- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||||
- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini!
|
- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||||
- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||||
- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup.
|
- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
|
||||||
- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||||
- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks
|
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
|
||||||
and filtering when specifying a config file.
|
and filtering when specifying a config file.
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support
|
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
|
||||||
- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics
|
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
|
||||||
- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||||
- Fix net plugin on darwin
|
- Fix net plugin on darwin
|
||||||
- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||||
- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||||
- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||||
- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||||
- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||||
- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||||
|
|
||||||
## v0.1.8 [2015-09-04]
|
## v0.1.8 [2015-09-04]
|
||||||
|
|
||||||
@@ -208,106 +691,106 @@ and filtering when specifying a config file.
|
|||||||
- Now using Go 1.5 to build telegraf
|
- Now using Go 1.5 to build telegraf
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin
|
- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||||
- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||||
- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes
|
- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||||
- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||||
- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option
|
- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
|
||||||
- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3
|
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
|
||||||
- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin
|
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
## v0.1.7 [2015-08-28]
|
## v0.1.7 [2015-08-28]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer.
|
- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
|
||||||
- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||||
- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||||
- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space
|
- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||||
- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag.
|
- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
|
||||||
- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||||
- Indent the toml config file for readability
|
- Indent the toml config file for readability
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing.
|
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
|
||||||
- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix.
|
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
|
||||||
- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||||
- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||||
|
|
||||||
## v0.1.6 [2015-08-20]
|
## v0.1.6 [2015-08-20]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||||
- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies
|
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
|
||||||
- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||||
- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||||
- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||||
- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||||
|
|
||||||
## v0.1.5 [2015-08-13]
|
## v0.1.5 [2015-08-13]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||||
- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||||
- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||||
- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||||
- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||||
- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database.
|
- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
|
||||||
- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||||
- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||||
- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing
|
- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
|
||||||
- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||||
- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||||
- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||||
- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||||
- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||||
- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||||
- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||||
- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes
|
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
|
||||||
- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||||
- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally
|
- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
|
||||||
- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||||
|
|
||||||
## v0.1.4 [2015-07-09]
|
## v0.1.4 [2015-07-09]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||||
- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||||
|
|
||||||
## v0.1.3 [2015-07-05]
|
## v0.1.3 [2015-07-05]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||||
- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||||
- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||||
|
|
||||||
## v0.1.2 [2015-07-01]
|
## v0.1.2 [2015-07-01]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||||
- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||||
- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||||
- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script.
|
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
|
||||||
- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||||
- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||||
- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||||
- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||||
|
|
||||||
## v0.1.1 [2015-06-19]
|
## v0.1.1 [2015-06-19]
|
||||||
|
|
||||||
|
|||||||
271
CONTRIBUTING.md
271
CONTRIBUTING.md
@@ -1,103 +1,72 @@
|
|||||||
|
## Steps for Contributing:
|
||||||
|
|
||||||
|
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||||
|
1. Make changes or write plugin (see below for details)
|
||||||
|
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
||||||
|
1. If your plugin requires a new Go package,
|
||||||
|
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||||
|
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||||
|
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
||||||
|
Output plugins READMEs are less structured,
|
||||||
|
but any information you can provide on how the data will look is appreciated.
|
||||||
|
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||||
|
for a good example.
|
||||||
|
|
||||||
|
## GoDoc
|
||||||
|
|
||||||
|
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
|
||||||
|
on the GoDoc
|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/influxdata/telegraf)
|
||||||
|
|
||||||
## Sign the CLA
|
## Sign the CLA
|
||||||
|
|
||||||
Before we can merge a pull request, you will need to sign the CLA,
|
Before we can merge a pull request, you will need to sign the CLA,
|
||||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||||
|
|
||||||
## Plugins
|
## Adding a dependency
|
||||||
|
|
||||||
This section is for developers who want to create new collection plugins.
|
Assuming you can already build the project, run these in the telegraf directory:
|
||||||
|
|
||||||
|
1. `go get github.com/sparrc/gdm`
|
||||||
|
1. `gdm restore`
|
||||||
|
1. `gdm save`
|
||||||
|
|
||||||
|
## Input Plugins
|
||||||
|
|
||||||
|
This section is for developers who want to create new collection inputs.
|
||||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||||
pick and chose what is gathered as well as makes it easy for developers
|
pick and chose what is gathered and makes it easy for developers
|
||||||
to create new ways of generating metrics.
|
to create new ways of generating metrics.
|
||||||
|
|
||||||
Plugin authorship is kept as simple as possible to promote people to develop
|
Plugin authorship is kept as simple as possible to promote people to develop
|
||||||
and submit new plugins.
|
and submit new inputs.
|
||||||
|
|
||||||
### Plugin Guidelines
|
### Input Plugin Guidelines
|
||||||
|
|
||||||
* A plugin must conform to the `plugins.Plugin` interface.
|
* A plugin must conform to the `telegraf.Input` interface.
|
||||||
* Each generated metric automatically has the name of the plugin that generated
|
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||||
it prepended. This is to keep plugins honest.
|
|
||||||
* Plugins should call `plugins.Add` in their `init` function to register themselves.
|
|
||||||
See below for a quick example.
|
See below for a quick example.
|
||||||
* To be available within Telegraf itself, plugins must add themselves to the
|
* Input Plugins must be added to the
|
||||||
`github.com/influxdb/telegraf/plugins/all/all.go` file.
|
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||||
* The `SampleConfig` function should return valid toml that describes how the
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||||
* The `Description` function should say in one line what this plugin does.
|
* The `Description` function should say in one line what this plugin does.
|
||||||
|
|
||||||
### Plugin interface
|
Let's say you've written a plugin that emits metrics about processes on the
|
||||||
|
current host.
|
||||||
|
|
||||||
```go
|
### Input Plugin Example
|
||||||
type Plugin interface {
|
|
||||||
SampleConfig() string
|
|
||||||
Description() string
|
|
||||||
Gather(Accumulator) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Accumulator interface {
|
|
||||||
Add(measurement string,
|
|
||||||
value interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
timestamp ...time.Time)
|
|
||||||
AddFields(measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
timestamp ...time.Time)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Accumulator
|
|
||||||
|
|
||||||
The way that a plugin emits metrics is by interacting with the Accumulator.
|
|
||||||
|
|
||||||
The `Add` function takes 3 arguments:
|
|
||||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
|
|
||||||
* **value**: A value for the metric. This accepts 5 different types of value:
|
|
||||||
* **int**: The most common type. All int types are accepted but favor using `int64`
|
|
||||||
Useful for counters, etc.
|
|
||||||
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
|
||||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
|
|
||||||
* **string**: Typically used to indicate a message, or some kind of freeform information.
|
|
||||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
|
|
||||||
* **tags**: This is a map of strings to strings to describe the where or who
|
|
||||||
about the metric. For instance, the `net` plugin adds a tag named `"interface"`
|
|
||||||
set to the name of the network interface, like `"eth0"`.
|
|
||||||
|
|
||||||
The `AddFieldsWithTime` allows multiple values for a point to be passed. The values
|
|
||||||
used are the same type profile as **value** above. The **timestamp** argument
|
|
||||||
allows a point to be registered as having occurred at an arbitrary time.
|
|
||||||
|
|
||||||
Let's say you've written a plugin that emits metrics about processes on the current host.
|
|
||||||
|
|
||||||
```go
|
|
||||||
|
|
||||||
type Process struct {
|
|
||||||
CPUTime float64
|
|
||||||
MemoryBytes int64
|
|
||||||
PID int
|
|
||||||
}
|
|
||||||
|
|
||||||
func Gather(acc plugins.Accumulator) error {
|
|
||||||
for _, process := range system.Processes() {
|
|
||||||
tags := map[string]string {
|
|
||||||
"pid": fmt.Sprintf("%d", process.Pid),
|
|
||||||
}
|
|
||||||
|
|
||||||
acc.Add("cpu", process.CPUTime, tags, time.Now())
|
|
||||||
acc.Add("memory", process.MemoryBytes, tags, time.Now())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Plugin Example
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package simple
|
package simple
|
||||||
|
|
||||||
// simple.go
|
// simple.go
|
||||||
|
|
||||||
import "github.com/influxdb/telegraf/plugins"
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
type Simple struct {
|
type Simple struct {
|
||||||
Ok bool
|
Ok bool
|
||||||
@@ -111,7 +80,7 @@ func (s *Simple) SampleConfig() string {
|
|||||||
return "ok = true # indicate if everything is fine"
|
return "ok = true # indicate if everything is fine"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Simple) Gather(acc plugins.Accumulator) error {
|
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||||
if s.Ok {
|
if s.Ok {
|
||||||
acc.Add("state", "pretty good", nil)
|
acc.Add("state", "pretty good", nil)
|
||||||
} else {
|
} else {
|
||||||
@@ -122,19 +91,65 @@ func (s *Simple) Gather(acc plugins.Accumulator) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
plugins.Add("simple", func() plugins.Plugin { return &Simple{} })
|
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Service Plugins
|
## Input Plugins Accepting Arbitrary Data Formats
|
||||||
|
|
||||||
|
Some input plugins (such as
|
||||||
|
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
|
||||||
|
accept arbitrary input data formats. An overview of these data formats can
|
||||||
|
be found
|
||||||
|
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||||
|
|
||||||
|
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
|
||||||
|
function on the plugin object (see the exec plugin for an example), as well as
|
||||||
|
defining `parser` as a field of the object.
|
||||||
|
|
||||||
|
You can then utilize the parser internally in your plugin, parsing data as you
|
||||||
|
see fit. Telegraf's configuration layer will take care of instantiating and
|
||||||
|
creating the `Parser` object.
|
||||||
|
|
||||||
|
You should also add the following to your SampleConfig() return:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
Below is the `Parser` interface.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Parser is an interface defining functions that a parser plugin must satisfy.
|
||||||
|
type Parser interface {
|
||||||
|
// Parse takes a byte buffer separated by newlines
|
||||||
|
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
|
||||||
|
// and parses it into telegraf metrics
|
||||||
|
Parse(buf []byte) ([]telegraf.Metric, error)
|
||||||
|
|
||||||
|
// ParseLine takes a single string metric
|
||||||
|
// ie, "cpu.usage.idle 90"
|
||||||
|
// and parses it into a telegraf metric.
|
||||||
|
ParseLine(line string) (telegraf.Metric, error)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
And you can view the code
|
||||||
|
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
|
||||||
|
|
||||||
|
## Service Input Plugins
|
||||||
|
|
||||||
This section is for developers who want to create new "service" collection
|
This section is for developers who want to create new "service" collection
|
||||||
plugins. A service plugin differs from a regular plugin in that it operates
|
inputs. A service plugin differs from a regular plugin in that it operates
|
||||||
a background service while Telegraf is running. One example would be the `statsd`
|
a background service while Telegraf is running. One example would be the `statsd`
|
||||||
plugin, which operates a statsd server.
|
plugin, which operates a statsd server.
|
||||||
|
|
||||||
Service Plugins are substantially more complicated than a regular plugin, as they
|
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
||||||
will require threads and locks to verify data integrity. Service Plugins should
|
will require threads and locks to verify data integrity. Service Input Plugins should
|
||||||
be avoided unless there is no way to create their behavior with a regular plugin.
|
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||||
|
|
||||||
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||||
@@ -143,49 +158,25 @@ and `Stop()` methods.
|
|||||||
### Service Plugin Guidelines
|
### Service Plugin Guidelines
|
||||||
|
|
||||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||||
`plugins.ServicePlugin` interface.
|
`inputs.ServiceInput` interface.
|
||||||
|
|
||||||
### Service Plugin interface
|
## Output Plugins
|
||||||
|
|
||||||
```go
|
|
||||||
type ServicePlugin interface {
|
|
||||||
SampleConfig() string
|
|
||||||
Description() string
|
|
||||||
Gather(Accumulator) error
|
|
||||||
Start() error
|
|
||||||
Stop()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Outputs
|
|
||||||
|
|
||||||
This section is for developers who want to create a new output sink. Outputs
|
This section is for developers who want to create a new output sink. Outputs
|
||||||
are created in a similar manner as collection plugins, and their interface has
|
are created in a similar manner as collection plugins, and their interface has
|
||||||
similar constructs.
|
similar constructs.
|
||||||
|
|
||||||
### Output Guidelines
|
### Output Plugin Guidelines
|
||||||
|
|
||||||
* An output must conform to the `outputs.Output` interface.
|
* An output must conform to the `outputs.Output` interface.
|
||||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||||
See below for a quick example.
|
See below for a quick example.
|
||||||
* To be available within Telegraf itself, plugins must add themselves to the
|
* To be available within Telegraf itself, plugins must add themselves to the
|
||||||
`github.com/influxdb/telegraf/outputs/all/all.go` file.
|
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||||
* The `SampleConfig` function should return valid toml that describes how the
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
output can be configured. This is include in `telegraf -sample-config`.
|
output can be configured. This is include in `telegraf -sample-config`.
|
||||||
* The `Description` function should say in one line what this output does.
|
* The `Description` function should say in one line what this output does.
|
||||||
|
|
||||||
### Output interface
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Output interface {
|
|
||||||
Connect() error
|
|
||||||
Close() error
|
|
||||||
Description() string
|
|
||||||
SampleConfig() string
|
|
||||||
Write(points []*client.Point) error
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Output Example
|
### Output Example
|
||||||
|
|
||||||
```go
|
```go
|
||||||
@@ -193,7 +184,10 @@ package simpleoutput
|
|||||||
|
|
||||||
// simpleoutput.go
|
// simpleoutput.go
|
||||||
|
|
||||||
import "github.com/influxdb/telegraf/outputs"
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
type Simple struct {
|
type Simple struct {
|
||||||
Ok bool
|
Ok bool
|
||||||
@@ -217,20 +211,47 @@ func (s *Simple) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Simple) Write(points []*client.Point) error {
|
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||||
for _, pt := range points {
|
for _, metric := range metrics {
|
||||||
// write `pt` to the output sink here
|
// write `metric` to the output sink here
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
outputs.Add("simpleoutput", func() outputs.Output { return &Simple{} })
|
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Service Outputs
|
## Output Plugins Writing Arbitrary Data Formats
|
||||||
|
|
||||||
|
Some output plugins (such as
|
||||||
|
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
|
||||||
|
can write arbitrary output data formats. An overview of these data formats can
|
||||||
|
be found
|
||||||
|
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
|
||||||
|
|
||||||
|
In order to enable this, you must specify a
|
||||||
|
`SetSerializer(serializer serializers.Serializer)`
|
||||||
|
function on the plugin object (see the file plugin for an example), as well as
|
||||||
|
defining `serializer` as a field of the object.
|
||||||
|
|
||||||
|
You can then utilize the serializer internally in your plugin, serializing data
|
||||||
|
before it's written. Telegraf's configuration layer will take care of
|
||||||
|
instantiating and creating the `Serializer` object.
|
||||||
|
|
||||||
|
You should also add the following to your SampleConfig() return:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Service Output Plugins
|
||||||
|
|
||||||
This section is for developers who want to create new "service" output. A
|
This section is for developers who want to create new "service" output. A
|
||||||
service output differs from a regular output in that it operates a background service
|
service output differs from a regular output in that it operates a background service
|
||||||
@@ -243,21 +264,7 @@ and `Stop()` methods.
|
|||||||
### Service Output Guidelines
|
### Service Output Guidelines
|
||||||
|
|
||||||
* Same as the `Output` guidelines, except that they must conform to the
|
* Same as the `Output` guidelines, except that they must conform to the
|
||||||
`plugins.ServiceOutput` interface.
|
`output.ServiceOutput` interface.
|
||||||
|
|
||||||
### Service Output interface
|
|
||||||
|
|
||||||
```go
|
|
||||||
type ServiceOutput interface {
|
|
||||||
Connect() error
|
|
||||||
Close() error
|
|
||||||
Description() string
|
|
||||||
SampleConfig() string
|
|
||||||
Write(points []*client.Point) error
|
|
||||||
Start() error
|
|
||||||
Stop()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Unit Tests
|
## Unit Tests
|
||||||
|
|
||||||
@@ -274,7 +281,7 @@ which would take some time to replicate.
|
|||||||
To overcome this situation we've decided to use docker containers to provide a
|
To overcome this situation we've decided to use docker containers to provide a
|
||||||
fast and reproducible environment to test those services which require it.
|
fast and reproducible environment to test those services which require it.
|
||||||
For other situations
|
For other situations
|
||||||
(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go )
|
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
|
||||||
a simple mock will suffice.
|
a simple mock will suffice.
|
||||||
|
|
||||||
To execute Telegraf tests follow these simple steps:
|
To execute Telegraf tests follow these simple steps:
|
||||||
|
|||||||
85
Godeps
85
Godeps
@@ -1,52 +1,59 @@
|
|||||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||||
github.com/Shopify/sarama 159e9990b0796511607dd0d7aaa3eb37d1829d16
|
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||||
github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81
|
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||||
github.com/armon/go-metrics 06b60999766278efd6d2b5d8418a58c3d5b99e87
|
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||||
github.com/aws/aws-sdk-go 999b1591218c36d5050d1ba7266eba956e65965f
|
|
||||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
|
||||||
github.com/boltdb/bolt b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0
|
|
||||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||||
github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e
|
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||||
|
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||||
|
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||||
|
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||||
github.com/eapache/go-resiliency f341fb4dca45128e4aa86389fa6a675d55fe25e1
|
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||||
|
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||||
|
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||||
|
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||||
github.com/fsouza/go-dockerclient 7177a9e3543b0891a5d91dbf7051e0f71455c8ef
|
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||||
github.com/go-ini/ini 9314fb0ef64171d6a3d0a4fa570dfa33441cba05
|
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||||
github.com/go-sql-driver/mysql d512f204a577a4ab037a1816604c48c9c13210be
|
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||||
github.com/gogo/protobuf e492fd34b12d0230755c45aa5fb1e1eea6a84aa9
|
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||||
github.com/golang/protobuf 68415e7123da32b07eab49c96d2c4d6158360e9b
|
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
|
||||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||||
github.com/hailocab/go-hostpool 0637eae892be221164aff5fcbccc57171aea6406
|
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||||
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
|
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||||
github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294
|
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||||
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
|
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
||||||
github.com/influxdb/influxdb 69a7664f2d4b75aec300b7cbfc7e57c971721f04
|
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||||
github.com/klauspost/crc32 0aff1ea9c20474c3901672b5b6ead0ac611156de
|
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
||||||
github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9
|
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||||
|
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||||
|
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||||
|
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||||
github.com/pborman/uuid cccd189d45f7ac3368a0d127efb7f4d08ae0b655
|
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||||
github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4
|
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||||
github.com/prometheus/common 56b90312e937d43b930f06a59bf0d6a4ae1944bc
|
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||||
github.com/shirou/gopsutil fc932d9090f13a84fb4b3cb8baa124610cab184c
|
github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c
|
||||||
|
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||||
github.com/stretchr/testify e3a8ff8ce36581f87a15341206f205b1da467059
|
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
|
||||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||||
golang.org/x/crypto 7b85b097bf7527677d54d3220065e966a0e3b613
|
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||||
golang.org/x/net 1796f9b8b7178e3c7587dff118d3bb9d37f9b0b3
|
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||||
gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e
|
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||||
|
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||||
|
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||||
gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
|
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||||
|
|||||||
59
Godeps_windows
Normal file
59
Godeps_windows
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98
|
||||||
|
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||||
|
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||||
|
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||||
|
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||||
|
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||||
|
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||||
|
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||||
|
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||||
|
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||||
|
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||||
|
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||||
|
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||||
|
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||||
|
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||||
|
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||||
|
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||||
|
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||||
|
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||||
|
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
|
||||||
|
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||||
|
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||||
|
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||||
|
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||||
|
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||||
|
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||||
|
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||||
|
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||||
|
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
|
||||||
|
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||||
|
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||||
|
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||||
|
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||||
|
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||||
|
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||||
|
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||||
|
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||||
|
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||||
|
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||||
|
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||||
|
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||||
|
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||||
|
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||||
|
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||||
|
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||||
|
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
|
||||||
|
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
|
||||||
|
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||||
|
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||||
|
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||||
|
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||||
|
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||||
|
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||||
|
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||||
|
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||||
|
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||||
|
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||||
|
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||||
|
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||||
56
Makefile
56
Makefile
@@ -9,36 +9,41 @@ endif
|
|||||||
# Standard Telegraf build
|
# Standard Telegraf build
|
||||||
default: prepare build
|
default: prepare build
|
||||||
|
|
||||||
|
# Windows build
|
||||||
|
windows: prepare-windows build-windows
|
||||||
|
|
||||||
# Only run the build (no dependency grabbing)
|
# Only run the build (no dependency grabbing)
|
||||||
build:
|
build:
|
||||||
go build -o telegraf -ldflags \
|
go install -ldflags "-X main.version=$(VERSION)" ./...
|
||||||
"-X main.Version=$(VERSION)" \
|
|
||||||
|
build-windows:
|
||||||
|
go build -o telegraf.exe -ldflags \
|
||||||
|
"-X main.version=$(VERSION)" \
|
||||||
./cmd/telegraf/telegraf.go
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
|
build-for-docker:
|
||||||
|
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||||
|
"-s -X main.version=$(VERSION)" \
|
||||||
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
# Build with race detector
|
# Build with race detector
|
||||||
dev: prepare
|
dev: prepare
|
||||||
go build -race -o telegraf -ldflags \
|
go build -race -ldflags "-X main.version=$(VERSION)" ./...
|
||||||
"-X main.Version=$(VERSION)" \
|
|
||||||
./cmd/telegraf/telegraf.go
|
|
||||||
|
|
||||||
# Build linux 64-bit, 32-bit and arm architectures
|
# run package script
|
||||||
build-linux-bins: prepare
|
package:
|
||||||
GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \
|
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||||
-ldflags "-X main.Version=$(VERSION)" \
|
|
||||||
./cmd/telegraf/telegraf.go
|
|
||||||
GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \
|
|
||||||
-ldflags "-X main.Version=$(VERSION)" \
|
|
||||||
./cmd/telegraf/telegraf.go
|
|
||||||
GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \
|
|
||||||
-ldflags "-X main.Version=$(VERSION)" \
|
|
||||||
./cmd/telegraf/telegraf.go
|
|
||||||
|
|
||||||
# Get dependencies and use gdm to checkout changesets
|
# Get dependencies and use gdm to checkout changesets
|
||||||
prepare:
|
prepare:
|
||||||
go get ./...
|
|
||||||
go get github.com/sparrc/gdm
|
go get github.com/sparrc/gdm
|
||||||
gdm restore
|
gdm restore
|
||||||
|
|
||||||
|
# Use the windows godeps file to prepare dependencies
|
||||||
|
prepare-windows:
|
||||||
|
go get github.com/sparrc/gdm
|
||||||
|
gdm restore -f Godeps_windows
|
||||||
|
|
||||||
# Run all docker containers necessary for unit tests
|
# Run all docker containers necessary for unit tests
|
||||||
docker-run:
|
docker-run:
|
||||||
ifeq ($(UNAME), Darwin)
|
ifeq ($(UNAME), Darwin)
|
||||||
@@ -59,12 +64,12 @@ endif
|
|||||||
docker run --name memcached -p "11211:11211" -d memcached
|
docker run --name memcached -p "11211:11211" -d memcached
|
||||||
docker run --name postgres -p "5432:5432" -d postgres
|
docker run --name postgres -p "5432:5432" -d postgres
|
||||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
|
||||||
docker run --name redis -p "6379:6379" -d redis
|
docker run --name redis -p "6379:6379" -d redis
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||||
|
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||||
|
|
||||||
# Run docker containers necessary for CircleCI unit tests
|
# Run docker containers necessary for CircleCI unit tests
|
||||||
docker-run-circle:
|
docker-run-circle:
|
||||||
@@ -73,26 +78,29 @@ docker-run-circle:
|
|||||||
-e ADVERTISED_PORT=9092 \
|
-e ADVERTISED_PORT=9092 \
|
||||||
-p "2181:2181" -p "9092:9092" \
|
-p "2181:2181" -p "9092:9092" \
|
||||||
-d spotify/kafka
|
-d spotify/kafka
|
||||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||||
|
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||||
|
|
||||||
# Kill all docker containers, ignore errors
|
# Kill all docker containers, ignore errors
|
||||||
docker-kill:
|
docker-kill:
|
||||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||||
|
|
||||||
# Run full unit tests using docker containers (includes setup and teardown)
|
# Run full unit tests using docker containers (includes setup and teardown)
|
||||||
test: docker-kill docker-run
|
test: vet docker-kill docker-run
|
||||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||||
sleep 60
|
sleep 60
|
||||||
# SUCCESS, running tests
|
# SUCCESS, running tests
|
||||||
go test -race ./...
|
go test -race ./...
|
||||||
|
|
||||||
# Run "short" unit tests
|
# Run "short" unit tests
|
||||||
test-short:
|
test-short: vet
|
||||||
go test -short ./...
|
go test -short ./...
|
||||||
|
|
||||||
.PHONY: test
|
vet:
|
||||||
|
go vet ./...
|
||||||
|
|
||||||
|
.PHONY: test test-short vet build default
|
||||||
|
|||||||
390
README.md
390
README.md
@@ -1,48 +1,63 @@
|
|||||||
# Telegraf - A native agent for InfluxDB [](https://circleci.com/gh/influxdb/telegraf)
|
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
||||||
|
|
||||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||||
running on, or from other services, and writing them into InfluxDB.
|
running on, or from other services, and writing them into InfluxDB or other
|
||||||
|
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||||
|
|
||||||
Design goals are to have a minimal memory footprint with a plugin system so
|
Design goals are to have a minimal memory footprint with a plugin system so
|
||||||
that developers in the community can easily add support for collecting metrics
|
that developers in the community can easily add support for collecting metrics
|
||||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||||
|
|
||||||
We'll eagerly accept pull requests for new plugins and will manage the set of
|
New input and output plugins are designed to be easy to contribute,
|
||||||
plugins that Telegraf supports. See the
|
we'll eagerly accept pull
|
||||||
[contributing guide](CONTRIBUTING.md) for instructions on
|
requests and will manage the set of plugins that Telegraf supports.
|
||||||
writing new plugins.
|
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||||
|
new plugins.
|
||||||
|
|
||||||
## Installation:
|
## Installation:
|
||||||
|
|
||||||
### Linux deb and rpm packages:
|
### Linux deb and rpm Packages:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta1_amd64.deb
|
||||||
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta1.x86_64.rpm
|
||||||
|
|
||||||
##### Package instructions:
|
Latest (arm):
|
||||||
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta1_armhf.deb
|
||||||
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta1.armhf.rpm
|
||||||
|
|
||||||
* Telegraf binary is installed in `/opt/telegraf/telegraf`
|
##### Package Instructions:
|
||||||
* Telegraf daemon configuration file is in `/etc/opt/telegraf/telegraf.conf`
|
|
||||||
|
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||||
|
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||||
* On sysv systems, the telegraf daemon can be controlled via
|
* On sysv systems, the telegraf daemon can be controlled via
|
||||||
`service telegraf [action]`
|
`service telegraf [action]`
|
||||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||||
controlled via `systemctl [action] telegraf`
|
controlled via `systemctl [action] telegraf`
|
||||||
|
|
||||||
### Linux binaries:
|
### yum/apt Repositories:
|
||||||
|
|
||||||
|
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||||
|
[here](https://docs.influxdata.com/influxdb/v0.10/introduction/installation/#installation)
|
||||||
|
for instructions on setting up the repo. Once it is configured, you will be able
|
||||||
|
to use this repo to install & update telegraf.
|
||||||
|
|
||||||
|
### Linux tarballs:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_amd64.tar.gz
|
||||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_i386.tar.gz
|
||||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_armhf.tar.gz
|
||||||
|
|
||||||
##### Binary instructions:
|
### FreeBSD tarball:
|
||||||
|
|
||||||
These are standalone binaries that can be unpacked and executed on any linux
|
Latest:
|
||||||
system. They can be unpacked and renamed in a location such as
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_freebsd_amd64.tar.gz
|
||||||
`/usr/local/bin` for convenience. A config file will need to be generated,
|
|
||||||
see "How to use it" below.
|
### Ansible Role:
|
||||||
|
|
||||||
|
Ansible role: https://github.com/rossmcdonald/telegraf
|
||||||
|
|
||||||
### OSX via Homebrew:
|
### OSX via Homebrew:
|
||||||
|
|
||||||
@@ -51,244 +66,185 @@ brew update
|
|||||||
brew install telegraf
|
brew install telegraf
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Windows Binaries (EXPERIMENTAL)
|
||||||
|
|
||||||
|
Latest:
|
||||||
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_windows_amd64.zip
|
||||||
|
|
||||||
### From Source:
|
### From Source:
|
||||||
|
|
||||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||||
which gets installed via the Makefile
|
which gets installed via the Makefile
|
||||||
if you don't have it already. You also must build with golang version 1.4+.
|
if you don't have it already. You also must build with golang version 1.5+.
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install)
|
1. [Install Go](https://golang.org/doc/install)
|
||||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||||
3. Run `go get github.com/influxdb/telegraf`
|
3. Run `go get github.com/influxdata/telegraf`
|
||||||
4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
|
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||||
5. Run `make`
|
5. Run `make`
|
||||||
|
|
||||||
### How to use it:
|
## How to use it:
|
||||||
|
|
||||||
* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration.
|
```console
|
||||||
* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`.
|
$ telegraf -help
|
||||||
to create a config file with only CPU and memory plugins defined, and InfluxDB
|
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||||
output defined.
|
|
||||||
* Edit the configuration to match your needs.
|
|
||||||
* Run `telegraf -config telegraf.conf -test` to output one full measurement
|
|
||||||
sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using
|
|
||||||
the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test`
|
|
||||||
* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs.
|
|
||||||
* Run `telegraf -config telegraf.conf -filter system:swap`.
|
|
||||||
to run telegraf with only the system & swap plugins defined in the config.
|
|
||||||
|
|
||||||
## Telegraf Options
|
Usage:
|
||||||
|
|
||||||
Telegraf has a few options you can configure under the `agent` section of the
|
telegraf <flags>
|
||||||
config.
|
|
||||||
|
|
||||||
* **hostname**: The hostname is passed as a tag. By default this will be
|
The flags are:
|
||||||
the value returned by `hostname` on the machine running Telegraf.
|
|
||||||
You can override that value here.
|
|
||||||
* **interval**: How often to gather metrics. Uses a simple number +
|
|
||||||
unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
|
||||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
|
||||||
InfluxDB.
|
|
||||||
|
|
||||||
## Plugin Options
|
-config <file> configuration file to load
|
||||||
|
-test gather metrics once, print them to stdout, and exit
|
||||||
|
-sample-config print out full sample configuration to stdout
|
||||||
|
-config-directory directory containing additional *.conf files
|
||||||
|
-input-filter filter the input plugins to enable, separator is :
|
||||||
|
-output-filter filter the output plugins to enable, separator is :
|
||||||
|
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||||
|
-debug print metrics as they're generated to stdout
|
||||||
|
-quiet run in quiet mode
|
||||||
|
-version print the version to stdout
|
||||||
|
|
||||||
There are 5 configuration options that are configurable per plugin:
|
Examples:
|
||||||
|
|
||||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
# generate a telegraf config file:
|
||||||
current plugin. Each string in the array is tested as a glob match against metric names
|
telegraf -sample-config > telegraf.conf
|
||||||
and if it matches, the metric is emitted.
|
|
||||||
* **drop**: The inverse of pass, if a metric name matches, it is not emitted.
|
|
||||||
* **tagpass**: tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as a glob match against
|
|
||||||
the tag name, and if it matches the metric is emitted.
|
|
||||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the metric is not emitted.
|
|
||||||
This is tested on metrics that have passed the tagpass test.
|
|
||||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
|
||||||
global interval, but if one particular plugin should be run less or more often,
|
|
||||||
you can configure that here.
|
|
||||||
|
|
||||||
### Plugin Configuration Examples
|
# generate config with only cpu input & influxdb output plugins defined
|
||||||
|
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||||
|
|
||||||
This is a full working config that will output CPU data to an InfluxDB instance
|
# run a single telegraf collection, outputing metrics to stdout
|
||||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
telegraf -config telegraf.conf -test
|
||||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
|
||||||
measurements which begin with `cpu_time`.
|
|
||||||
|
|
||||||
```toml
|
# run telegraf with all plugins defined in config file
|
||||||
[tags]
|
telegraf -config telegraf.conf
|
||||||
dc = "denver-1"
|
|
||||||
|
|
||||||
[agent]
|
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||||
interval = "10s"
|
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||||
|
|
||||||
# OUTPUTS
|
|
||||||
[outputs]
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
url = "http://192.168.59.103:8086" # required.
|
|
||||||
database = "telegraf" # required.
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
# PLUGINS
|
|
||||||
[plugins]
|
|
||||||
[[plugins.cpu]]
|
|
||||||
percpu = true
|
|
||||||
totalcpu = false
|
|
||||||
drop = ["cpu_time*"]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Below is how to configure `tagpass` and `tagdrop` parameters
|
## Configuration
|
||||||
|
|
||||||
```toml
|
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||||
[plugins]
|
configuration options.
|
||||||
[[plugins.cpu]]
|
|
||||||
percpu = true
|
|
||||||
totalcpu = false
|
|
||||||
drop = ["cpu_time"]
|
|
||||||
# Don't collect CPU data for cpu6 & cpu7
|
|
||||||
[plugins.cpu.tagdrop]
|
|
||||||
cpu = [ "cpu6", "cpu7" ]
|
|
||||||
|
|
||||||
[[plugins.disk]]
|
## Supported Input Plugins
|
||||||
[plugins.disk.tagpass]
|
|
||||||
# tagpass conditions are OR, not AND.
|
|
||||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
|
||||||
# then the metric passes
|
|
||||||
fstype = [ "ext4", "xfs" ]
|
|
||||||
# Globs can also be used on the tag values
|
|
||||||
path = [ "/opt", "/home*" ]
|
|
||||||
```
|
|
||||||
|
|
||||||
Below is how to configure `pass` and `drop` parameters
|
Telegraf currently has support for collecting metrics from many sources. For
|
||||||
|
more information on each, please look at the directory of the same name in
|
||||||
|
`plugins/inputs`.
|
||||||
|
|
||||||
```toml
|
Currently implemented sources:
|
||||||
# Drop all metrics for guest CPU usage
|
|
||||||
[[plugins.cpu]]
|
|
||||||
drop = [ "cpu_usage_guest" ]
|
|
||||||
|
|
||||||
# Only store inode related metrics for disks
|
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch)
|
||||||
[[plugins.disk]]
|
* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike)
|
||||||
pass = [ "disk_inodes*" ]
|
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
||||||
```
|
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
||||||
|
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
||||||
|
* [ceph](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ceph)
|
||||||
Additional plugins (or outputs) of the same type can be specified,
|
* [chrony](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/chrony)
|
||||||
just define more instances in the config file:
|
* [consul](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul)
|
||||||
|
* [conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack)
|
||||||
```toml
|
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
||||||
[[plugins.cpu]]
|
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
||||||
percpu = false
|
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
||||||
totalcpu = true
|
* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query)
|
||||||
|
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
|
||||||
[[plugins.cpu]]
|
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
|
||||||
percpu = true
|
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
|
||||||
totalcpu = false
|
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||||
drop = ["cpu_time*"]
|
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
|
||||||
```
|
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
||||||
|
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
||||||
## Supported Plugins
|
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||||
|
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
|
||||||
**You can view usage instructions for each plugin by running**
|
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
|
||||||
`telegraf -usage <pluginname>`.
|
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
|
||||||
|
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
|
||||||
Telegraf currently has support for collecting metrics from:
|
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
|
||||||
|
* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp)
|
||||||
* aerospike
|
* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached)
|
||||||
* apache
|
* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos)
|
||||||
* bcache
|
* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb)
|
||||||
* disque
|
* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql)
|
||||||
* elasticsearch
|
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
||||||
* exec (generic JSON-emitting executable plugin)
|
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
||||||
* haproxy
|
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
||||||
* httpjson (generic JSON-emitting http service plugin)
|
* [nstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nstat)
|
||||||
* influxdb
|
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
||||||
* jolokia (remote JMX with JSON over HTTP)
|
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
||||||
* leofs
|
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
||||||
* lustre2
|
* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping)
|
||||||
* mailchimp
|
* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql)
|
||||||
* memcached
|
* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible)
|
||||||
* mongodb
|
* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns)
|
||||||
* mysql
|
* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat)
|
||||||
* nginx
|
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus)
|
||||||
* phpfpm
|
* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent)
|
||||||
* ping
|
* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq)
|
||||||
* postgresql
|
* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops)
|
||||||
* procstat
|
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
|
||||||
* prometheus
|
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
|
||||||
* puppetagent
|
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
|
||||||
* rabbitmq
|
* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source)
|
||||||
* redis
|
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||||
* rethinkdb
|
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
||||||
* twemproxy
|
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
||||||
* zfs
|
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
|
||||||
* zookeeper
|
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
||||||
* system
|
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
||||||
|
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
||||||
|
* [sysstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sysstat)
|
||||||
|
* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system)
|
||||||
* cpu
|
* cpu
|
||||||
* mem
|
* mem
|
||||||
* io
|
|
||||||
* net
|
* net
|
||||||
* netstat
|
* netstat
|
||||||
* disk
|
* disk
|
||||||
|
* diskio
|
||||||
* swap
|
* swap
|
||||||
|
* processes
|
||||||
|
* kernel (/proc/stat)
|
||||||
|
* kernel (/proc/vmstat)
|
||||||
|
|
||||||
## Supported Service Plugins
|
Telegraf can also collect metrics via the following service plugins:
|
||||||
|
|
||||||
Telegraf can collect metrics via the following services:
|
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||||
|
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
|
||||||
* statsd
|
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||||
* kafka_consumer
|
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||||
|
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||||
|
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||||
|
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||||
|
* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
|
||||||
|
* [rollbar_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rollbar_webhooks)
|
||||||
|
|
||||||
We'll be adding support for many more over the coming months. Read on if you
|
We'll be adding support for many more over the coming months. Read on if you
|
||||||
want to add support for another service or third-party API.
|
want to add support for another service or third-party API.
|
||||||
|
|
||||||
## Output options
|
## Supported Output Plugins
|
||||||
|
|
||||||
Telegraf also supports specifying multiple output sinks to send data to,
|
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb)
|
||||||
configuring each output sink is different, but examples can be
|
* [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon)
|
||||||
found by running `telegraf -sample-config`.
|
* [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp)
|
||||||
|
* [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis)
|
||||||
Outputs also support the same configurable options as plugins
|
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch)
|
||||||
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
||||||
|
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
||||||
```toml
|
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
||||||
[[outputs.influxdb]]
|
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
|
||||||
urls = [ "http://localhost:8086" ]
|
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
|
||||||
database = "telegraf"
|
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
||||||
precision = "s"
|
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
||||||
# Drop all measurements that start with "aerospike"
|
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
||||||
drop = ["aerospike*"]
|
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
|
||||||
|
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||||
[[outputs.influxdb]]
|
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
|
||||||
urls = [ "http://localhost:8086" ]
|
* [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann)
|
||||||
database = "telegraf-aerospike-data"
|
|
||||||
precision = "s"
|
|
||||||
# Only accept aerospike data:
|
|
||||||
pass = ["aerospike*"]
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
urls = [ "http://localhost:8086" ]
|
|
||||||
database = "telegraf-cpu0-data"
|
|
||||||
precision = "s"
|
|
||||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
|
||||||
[outputs.influxdb.tagpass]
|
|
||||||
cpu = ["cpu0"]
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Supported Outputs
|
|
||||||
|
|
||||||
* influxdb
|
|
||||||
* nsq
|
|
||||||
* kafka
|
|
||||||
* datadog
|
|
||||||
* opentsdb
|
|
||||||
* amqp (rabbitmq)
|
|
||||||
* mqtt
|
|
||||||
* librato
|
|
||||||
* prometheus
|
|
||||||
* amon
|
|
||||||
* riemann
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
Please see the
|
Please see the
|
||||||
[contributing guide](CONTRIBUTING.md)
|
[contributing guide](CONTRIBUTING.md)
|
||||||
for details on contributing a plugin or output to Telegraf.
|
for details on contributing a plugin to Telegraf.
|
||||||
|
|||||||
157
accumulator.go
157
accumulator.go
@@ -1,154 +1,21 @@
|
|||||||
package telegraf
|
package telegraf
|
||||||
|
|
||||||
import (
|
import "time"
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/internal/config"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Accumulator interface {
|
type Accumulator interface {
|
||||||
Add(measurement string, value interface{},
|
// Create a point with a value, decorating it with tags
|
||||||
tags map[string]string, t ...time.Time)
|
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||||
AddFields(measurement string, fields map[string]interface{},
|
// it after passing to Add.
|
||||||
tags map[string]string, t ...time.Time)
|
Add(measurement string,
|
||||||
|
value interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
t ...time.Time)
|
||||||
|
|
||||||
SetDefaultTags(tags map[string]string)
|
AddFields(measurement string,
|
||||||
AddDefaultTag(key, value string)
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
Prefix() string
|
t ...time.Time)
|
||||||
SetPrefix(prefix string)
|
|
||||||
|
|
||||||
Debug() bool
|
Debug() bool
|
||||||
SetDebug(enabled bool)
|
SetDebug(enabled bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAccumulator(
|
|
||||||
pluginConfig *config.PluginConfig,
|
|
||||||
points chan *client.Point,
|
|
||||||
) Accumulator {
|
|
||||||
acc := accumulator{}
|
|
||||||
acc.points = points
|
|
||||||
acc.pluginConfig = pluginConfig
|
|
||||||
return &acc
|
|
||||||
}
|
|
||||||
|
|
||||||
type accumulator struct {
|
|
||||||
sync.Mutex
|
|
||||||
|
|
||||||
points chan *client.Point
|
|
||||||
|
|
||||||
defaultTags map[string]string
|
|
||||||
|
|
||||||
debug bool
|
|
||||||
|
|
||||||
pluginConfig *config.PluginConfig
|
|
||||||
|
|
||||||
prefix string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) Add(
|
|
||||||
measurement string,
|
|
||||||
value interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time,
|
|
||||||
) {
|
|
||||||
fields := make(map[string]interface{})
|
|
||||||
fields["value"] = value
|
|
||||||
ac.AddFields(measurement, fields, tags, t...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) AddFields(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time,
|
|
||||||
) {
|
|
||||||
// Validate uint64 and float64 fields
|
|
||||||
for k, v := range fields {
|
|
||||||
switch val := v.(type) {
|
|
||||||
case uint64:
|
|
||||||
// InfluxDB does not support writing uint64
|
|
||||||
if val < uint64(9223372036854775808) {
|
|
||||||
fields[k] = int64(val)
|
|
||||||
} else {
|
|
||||||
fields[k] = int64(9223372036854775807)
|
|
||||||
}
|
|
||||||
case float64:
|
|
||||||
// NaNs are invalid values in influxdb, skip measurement
|
|
||||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
|
||||||
if ac.debug {
|
|
||||||
log.Printf("Measurement [%s] has a NaN or Inf field, skipping",
|
|
||||||
measurement)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tags == nil {
|
|
||||||
tags = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
var timestamp time.Time
|
|
||||||
if len(t) > 0 {
|
|
||||||
timestamp = t[0]
|
|
||||||
} else {
|
|
||||||
timestamp = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ac.prefix != "" {
|
|
||||||
measurement = ac.prefix + measurement
|
|
||||||
}
|
|
||||||
|
|
||||||
if ac.pluginConfig != nil {
|
|
||||||
if !ac.pluginConfig.Filter.ShouldPass(measurement) || !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range ac.defaultTags {
|
|
||||||
if _, ok := tags[k]; !ok {
|
|
||||||
tags[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pt, err := client.NewPoint(measurement, tags, fields, timestamp)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ac.debug {
|
|
||||||
fmt.Println("> " + pt.String())
|
|
||||||
}
|
|
||||||
ac.points <- pt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) SetDefaultTags(tags map[string]string) {
|
|
||||||
ac.defaultTags = tags
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) AddDefaultTag(key, value string) {
|
|
||||||
ac.defaultTags[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) Prefix() string {
|
|
||||||
return ac.prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) SetPrefix(prefix string) {
|
|
||||||
ac.prefix = prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) Debug() bool {
|
|
||||||
return ac.debug
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) SetDebug(debug bool) {
|
|
||||||
ac.debug = debug
|
|
||||||
}
|
|
||||||
|
|||||||
397
agent.go
397
agent.go
@@ -1,397 +0,0 @@
|
|||||||
package telegraf
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math/big"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/internal/config"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Agent runs telegraf and collects data based on the given config
|
|
||||||
type Agent struct {
|
|
||||||
Config *config.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAgent returns an Agent struct based off the given Config
|
|
||||||
func NewAgent(config *config.Config) (*Agent, error) {
|
|
||||||
a := &Agent{
|
|
||||||
Config: config,
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.Config.Agent.Hostname == "" {
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Config.Agent.Hostname = hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
config.Tags["host"] = a.Config.Agent.Hostname
|
|
||||||
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect connects to all configured outputs
|
|
||||||
func (a *Agent) Connect() error {
|
|
||||||
for _, o := range a.Config.Outputs {
|
|
||||||
switch ot := o.Output.(type) {
|
|
||||||
case outputs.ServiceOutput:
|
|
||||||
if err := ot.Start(); err != nil {
|
|
||||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
|
||||||
o.Name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.Config.Agent.Debug {
|
|
||||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
|
||||||
}
|
|
||||||
err := o.Output.Connect()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name)
|
|
||||||
time.Sleep(15 * time.Second)
|
|
||||||
err = o.Output.Connect()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if a.Config.Agent.Debug {
|
|
||||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection to all configured outputs
|
|
||||||
func (a *Agent) Close() error {
|
|
||||||
var err error
|
|
||||||
for _, o := range a.Config.Outputs {
|
|
||||||
err = o.Output.Close()
|
|
||||||
switch ot := o.Output.(type) {
|
|
||||||
case outputs.ServiceOutput:
|
|
||||||
ot.Stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// gatherParallel runs the plugins that are using the same reporting interval
|
|
||||||
// as the telegraf agent.
|
|
||||||
func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
counter := 0
|
|
||||||
for _, plugin := range a.Config.Plugins {
|
|
||||||
if plugin.Config.Interval != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
counter++
|
|
||||||
go func(plugin *config.RunningPlugin) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
acc := NewAccumulator(plugin.Config, pointChan)
|
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
|
||||||
acc.SetPrefix(plugin.Name + "_")
|
|
||||||
acc.SetDefaultTags(a.Config.Tags)
|
|
||||||
|
|
||||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
|
||||||
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}(plugin)
|
|
||||||
}
|
|
||||||
|
|
||||||
if counter == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n",
|
|
||||||
a.Config.Agent.Interval, counter, elapsed)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gatherSeparate runs the plugins that have been configured with their own
|
|
||||||
// reporting interval.
|
|
||||||
func (a *Agent) gatherSeparate(
|
|
||||||
shutdown chan struct{},
|
|
||||||
plugin *config.RunningPlugin,
|
|
||||||
pointChan chan *client.Point,
|
|
||||||
) error {
|
|
||||||
ticker := time.NewTicker(plugin.Config.Interval)
|
|
||||||
|
|
||||||
for {
|
|
||||||
var outerr error
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
acc := NewAccumulator(plugin.Config, pointChan)
|
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
|
||||||
acc.SetPrefix(plugin.Name + "_")
|
|
||||||
acc.SetDefaultTags(a.Config.Tags)
|
|
||||||
|
|
||||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
|
||||||
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
|
||||||
plugin.Config.Interval, plugin.Name, elapsed)
|
|
||||||
|
|
||||||
if outerr != nil {
|
|
||||||
return outerr
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
return nil
|
|
||||||
case <-ticker.C:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test verifies that we can 'Gather' from all plugins with their configured
|
|
||||||
// Config struct
|
|
||||||
func (a *Agent) Test() error {
|
|
||||||
shutdown := make(chan struct{})
|
|
||||||
defer close(shutdown)
|
|
||||||
pointChan := make(chan *client.Point)
|
|
||||||
|
|
||||||
// dummy receiver for the point channel
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-pointChan:
|
|
||||||
// do nothing
|
|
||||||
case <-shutdown:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for _, plugin := range a.Config.Plugins {
|
|
||||||
acc := NewAccumulator(plugin.Config, pointChan)
|
|
||||||
acc.SetDebug(true)
|
|
||||||
acc.SetPrefix(plugin.Name + "_")
|
|
||||||
|
|
||||||
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name)
|
|
||||||
if plugin.Config.Interval != 0 {
|
|
||||||
fmt.Printf("* Internal: %s\n", plugin.Config.Interval)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special instructions for some plugins. cpu, for example, needs to be
|
|
||||||
// run twice in order to return cpu usage percentages.
|
|
||||||
switch plugin.Name {
|
|
||||||
case "cpu", "mongodb":
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
fmt.Printf("* Plugin: %s, Collection 2\n", plugin.Name)
|
|
||||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeOutput writes a list of points to a single output, with retries.
|
|
||||||
// Optionally takes a `done` channel to indicate that it is done writing.
|
|
||||||
func (a *Agent) writeOutput(
|
|
||||||
points []*client.Point,
|
|
||||||
ro *config.RunningOutput,
|
|
||||||
shutdown chan struct{},
|
|
||||||
wg *sync.WaitGroup,
|
|
||||||
) {
|
|
||||||
defer wg.Done()
|
|
||||||
if len(points) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
retry := 0
|
|
||||||
retries := a.Config.Agent.FlushRetries
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
for {
|
|
||||||
filtered := ro.FilterPoints(points)
|
|
||||||
err := ro.Output.Write(filtered)
|
|
||||||
if err == nil {
|
|
||||||
// Write successful
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
log.Printf("Flushed %d metrics to output %s in %s\n",
|
|
||||||
len(filtered), ro.Name, elapsed)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
if retry >= retries {
|
|
||||||
// No more retries
|
|
||||||
msg := "FATAL: Write to output [%s] failed %d times, dropping" +
|
|
||||||
" %d metrics\n"
|
|
||||||
log.Printf(msg, ro.Name, retries+1, len(points))
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
// Sleep for a retry
|
|
||||||
log.Printf("Error in output [%s]: %s, retrying in %s",
|
|
||||||
ro.Name, err.Error(), a.Config.Agent.FlushInterval.Duration)
|
|
||||||
time.Sleep(a.Config.Agent.FlushInterval.Duration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
retry++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush writes a list of points to all configured outputs
|
|
||||||
func (a *Agent) flush(
|
|
||||||
points []*client.Point,
|
|
||||||
shutdown chan struct{},
|
|
||||||
wait bool,
|
|
||||||
) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for _, o := range a.Config.Outputs {
|
|
||||||
wg.Add(1)
|
|
||||||
go a.writeOutput(points, o, shutdown, &wg)
|
|
||||||
}
|
|
||||||
if wait {
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flusher monitors the points input channel and flushes on the minimum interval
|
|
||||||
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
|
|
||||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
|
||||||
// the flusher will flush after metrics are collected.
|
|
||||||
time.Sleep(time.Millisecond * 100)
|
|
||||||
|
|
||||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
|
||||||
points := make([]*client.Point, 0)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
log.Println("Hang on, flushing any cached points before shutdown")
|
|
||||||
a.flush(points, shutdown, true)
|
|
||||||
return nil
|
|
||||||
case <-ticker.C:
|
|
||||||
a.flush(points, shutdown, false)
|
|
||||||
points = make([]*client.Point, 0)
|
|
||||||
case pt := <-pointChan:
|
|
||||||
points = append(points, pt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// jitterInterval applies the the interval jitter to the flush interval using
|
|
||||||
// crypto/rand number generator
|
|
||||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
|
||||||
var jitter int64
|
|
||||||
outinterval := ininterval
|
|
||||||
if injitter.Nanoseconds() != 0 {
|
|
||||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
|
||||||
if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
|
|
||||||
jitter = j.Int64()
|
|
||||||
}
|
|
||||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
|
||||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
|
||||||
outinterval = time.Duration(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
return outinterval
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run runs the agent daemon, gathering every Interval
|
|
||||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration,
|
|
||||||
a.Config.Agent.FlushJitter.Duration)
|
|
||||||
|
|
||||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
|
|
||||||
"Flush Interval:%s\n",
|
|
||||||
a.Config.Agent.Interval, a.Config.Agent.Debug,
|
|
||||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval)
|
|
||||||
|
|
||||||
// channel shared between all plugin threads for accumulating points
|
|
||||||
pointChan := make(chan *client.Point, 1000)
|
|
||||||
|
|
||||||
// Round collection to nearest interval by sleeping
|
|
||||||
if a.Config.Agent.RoundInterval {
|
|
||||||
i := int64(a.Config.Agent.Interval.Duration)
|
|
||||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
|
||||||
}
|
|
||||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := a.flusher(shutdown, pointChan); err != nil {
|
|
||||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
|
||||||
close(shutdown)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for _, plugin := range a.Config.Plugins {
|
|
||||||
|
|
||||||
// Start service of any ServicePlugins
|
|
||||||
switch p := plugin.Plugin.(type) {
|
|
||||||
case plugins.ServicePlugin:
|
|
||||||
if err := p.Start(); err != nil {
|
|
||||||
log.Printf("Service for plugin %s failed to start, exiting\n%s\n",
|
|
||||||
plugin.Name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer p.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special handling for plugins that have their own collection interval
|
|
||||||
// configured. Default intervals are handled below with gatherParallel
|
|
||||||
if plugin.Config.Interval != 0 {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(plugin *config.RunningPlugin) {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil {
|
|
||||||
log.Printf(err.Error())
|
|
||||||
}
|
|
||||||
}(plugin)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defer wg.Wait()
|
|
||||||
|
|
||||||
for {
|
|
||||||
if err := a.gatherParallel(pointChan); err != nil {
|
|
||||||
log.Printf(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
return nil
|
|
||||||
case <-ticker.C:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
185
agent/accumulator.go
Normal file
185
agent/accumulator.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewAccumulator(
|
||||||
|
inputConfig *internal_models.InputConfig,
|
||||||
|
metrics chan telegraf.Metric,
|
||||||
|
) *accumulator {
|
||||||
|
acc := accumulator{}
|
||||||
|
acc.metrics = metrics
|
||||||
|
acc.inputConfig = inputConfig
|
||||||
|
return &acc
|
||||||
|
}
|
||||||
|
|
||||||
|
type accumulator struct {
|
||||||
|
metrics chan telegraf.Metric
|
||||||
|
|
||||||
|
defaultTags map[string]string
|
||||||
|
|
||||||
|
debug bool
|
||||||
|
// print every point added to the accumulator
|
||||||
|
trace bool
|
||||||
|
|
||||||
|
inputConfig *internal_models.InputConfig
|
||||||
|
|
||||||
|
prefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) Add(
|
||||||
|
measurement string,
|
||||||
|
value interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
t ...time.Time,
|
||||||
|
) {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
fields["value"] = value
|
||||||
|
|
||||||
|
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ac.AddFields(measurement, fields, tags, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) AddFields(
|
||||||
|
measurement string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
t ...time.Time,
|
||||||
|
) {
|
||||||
|
if len(fields) == 0 || len(measurement) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override measurement name if set
|
||||||
|
if len(ac.inputConfig.NameOverride) != 0 {
|
||||||
|
measurement = ac.inputConfig.NameOverride
|
||||||
|
}
|
||||||
|
// Apply measurement prefix and suffix if set
|
||||||
|
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||||
|
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||||
|
}
|
||||||
|
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||||
|
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
if tags == nil {
|
||||||
|
tags = make(map[string]string)
|
||||||
|
}
|
||||||
|
// Apply plugin-wide tags if set
|
||||||
|
for k, v := range ac.inputConfig.Tags {
|
||||||
|
if _, ok := tags[k]; !ok {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Apply daemon-wide tags if set
|
||||||
|
for k, v := range ac.defaultTags {
|
||||||
|
if _, ok := tags[k]; !ok {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ac.inputConfig.Filter.FilterTags(tags)
|
||||||
|
|
||||||
|
result := make(map[string]interface{})
|
||||||
|
for k, v := range fields {
|
||||||
|
// Filter out any filtered fields
|
||||||
|
if ac.inputConfig != nil {
|
||||||
|
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate uint64 and float64 fields
|
||||||
|
switch val := v.(type) {
|
||||||
|
case uint64:
|
||||||
|
// InfluxDB does not support writing uint64
|
||||||
|
if val < uint64(9223372036854775808) {
|
||||||
|
result[k] = int64(val)
|
||||||
|
} else {
|
||||||
|
result[k] = int64(9223372036854775807)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case float64:
|
||||||
|
// NaNs are invalid values in influxdb, skip measurement
|
||||||
|
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||||
|
if ac.debug {
|
||||||
|
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||||
|
"field, skipping",
|
||||||
|
measurement, k)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result[k] = v
|
||||||
|
}
|
||||||
|
fields = nil
|
||||||
|
if len(result) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var timestamp time.Time
|
||||||
|
if len(t) > 0 {
|
||||||
|
timestamp = t[0]
|
||||||
|
} else {
|
||||||
|
timestamp = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ac.prefix != "" {
|
||||||
|
measurement = ac.prefix + measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ac.trace {
|
||||||
|
fmt.Println("> " + m.String())
|
||||||
|
}
|
||||||
|
ac.metrics <- m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) Debug() bool {
|
||||||
|
return ac.debug
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) SetDebug(debug bool) {
|
||||||
|
ac.debug = debug
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) Trace() bool {
|
||||||
|
return ac.trace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) SetTrace(trace bool) {
|
||||||
|
ac.trace = trace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||||
|
ac.defaultTags = tags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) addDefaultTag(key, value string) {
|
||||||
|
if ac.defaultTags == nil {
|
||||||
|
ac.defaultTags = make(map[string]string)
|
||||||
|
}
|
||||||
|
ac.defaultTags[key] = value
|
||||||
|
}
|
||||||
334
agent/accumulator_test.go
Normal file
334
agent/accumulator_test.go
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/models"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAdd(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddDefaultTags(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
a.addDefaultTag("default", "tag")
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,default=tag value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddFields(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage": float64(99),
|
||||||
|
}
|
||||||
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest usage=99")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that all Inf fields get dropped, and not added to metrics channel
|
||||||
|
func TestAddInfFields(t *testing.T) {
|
||||||
|
inf := math.Inf(1)
|
||||||
|
ninf := math.Inf(-1)
|
||||||
|
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage": inf,
|
||||||
|
"nusage": ninf,
|
||||||
|
}
|
||||||
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
assert.Len(t, a.metrics, 0)
|
||||||
|
|
||||||
|
// test that non-inf fields are kept and not dropped
|
||||||
|
fields["notinf"] = float64(100)
|
||||||
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest notinf=100")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that nan fields are dropped and not added
|
||||||
|
func TestAddNaNFields(t *testing.T) {
|
||||||
|
nan := math.NaN()
|
||||||
|
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage": nan,
|
||||||
|
}
|
||||||
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
assert.Len(t, a.metrics, 0)
|
||||||
|
|
||||||
|
// test that non-nan fields are kept and not dropped
|
||||||
|
fields["notnan"] = float64(100)
|
||||||
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest notnan=100")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddUint64Fields(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage": uint64(99),
|
||||||
|
}
|
||||||
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest usage=99i")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test usage=99i")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddUint64Overflow(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage": uint64(9223372036854775808),
|
||||||
|
}
|
||||||
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddInts(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
a.addDefaultTag("default", "tag")
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.Add("acctest", int(101), map[string]string{})
|
||||||
|
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,default=tag value=101i")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddFloats(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
a.addDefaultTag("default", "tag")
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddStrings(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
a.addDefaultTag("default", "tag")
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.Add("acctest", "test", map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddBools(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
a.addDefaultTag("default", "tag")
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.Add("acctest", true, map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that tag filters get applied to metrics.
|
||||||
|
func TestAccFilterTags(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Now()
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
filter := internal_models.Filter{
|
||||||
|
TagExclude: []string{"acc"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
a.inputConfig.Filter = filter
|
||||||
|
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
334
agent/agent.go
Normal file
334
agent/agent.go
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
|
"github.com/influxdata/telegraf/internal/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Agent runs telegraf and collects data based on the given config
|
||||||
|
type Agent struct {
|
||||||
|
Config *config.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAgent returns an Agent struct based off the given Config
|
||||||
|
func NewAgent(config *config.Config) (*Agent, error) {
|
||||||
|
a := &Agent{
|
||||||
|
Config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
if !a.Config.Agent.OmitHostname {
|
||||||
|
if a.Config.Agent.Hostname == "" {
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
a.Config.Agent.Hostname = hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
config.Tags["host"] = a.Config.Agent.Hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
return a, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect connects to all configured outputs
|
||||||
|
func (a *Agent) Connect() error {
|
||||||
|
for _, o := range a.Config.Outputs {
|
||||||
|
o.Quiet = a.Config.Agent.Quiet
|
||||||
|
|
||||||
|
switch ot := o.Output.(type) {
|
||||||
|
case telegraf.ServiceOutput:
|
||||||
|
if err := ot.Start(); err != nil {
|
||||||
|
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||||
|
o.Name, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Config.Agent.Debug {
|
||||||
|
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||||
|
}
|
||||||
|
err := o.Output.Connect()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to connect to output %s, retrying in 15s, "+
|
||||||
|
"error was '%s' \n", o.Name, err)
|
||||||
|
time.Sleep(15 * time.Second)
|
||||||
|
err = o.Output.Connect()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if a.Config.Agent.Debug {
|
||||||
|
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection to all configured outputs
|
||||||
|
func (a *Agent) Close() error {
|
||||||
|
var err error
|
||||||
|
for _, o := range a.Config.Outputs {
|
||||||
|
err = o.Output.Close()
|
||||||
|
switch ot := o.Output.(type) {
|
||||||
|
case telegraf.ServiceOutput:
|
||||||
|
ot.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func panicRecover(input *internal_models.RunningInput) {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
trace := make([]byte, 2048)
|
||||||
|
runtime.Stack(trace, true)
|
||||||
|
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||||
|
input.Name, err, trace)
|
||||||
|
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||||
|
"stack trace, configuration, and OS information: " +
|
||||||
|
"https://github.com/influxdata/telegraf/issues/new")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// gatherer runs the inputs that have been configured with their own
|
||||||
|
// reporting interval.
|
||||||
|
func (a *Agent) gatherer(
|
||||||
|
shutdown chan struct{},
|
||||||
|
input *internal_models.RunningInput,
|
||||||
|
interval time.Duration,
|
||||||
|
metricC chan telegraf.Metric,
|
||||||
|
) error {
|
||||||
|
defer panicRecover(input)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
var outerr error
|
||||||
|
|
||||||
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
|
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
gatherWithTimeout(shutdown, input, acc, interval)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
|
if outerr != nil {
|
||||||
|
return outerr
|
||||||
|
}
|
||||||
|
if a.Config.Agent.Debug {
|
||||||
|
log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||||
|
input.Name, interval, elapsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-shutdown:
|
||||||
|
return nil
|
||||||
|
case <-ticker.C:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// gatherWithTimeout gathers from the given input, with the given timeout.
|
||||||
|
// when the given timeout is reached, gatherWithTimeout logs an error message
|
||||||
|
// but continues waiting for it to return. This is to avoid leaving behind
|
||||||
|
// hung processes, and to prevent re-calling the same hung process over and
|
||||||
|
// over.
|
||||||
|
func gatherWithTimeout(
|
||||||
|
shutdown chan struct{},
|
||||||
|
input *internal_models.RunningInput,
|
||||||
|
acc *accumulator,
|
||||||
|
timeout time.Duration,
|
||||||
|
) {
|
||||||
|
ticker := time.NewTicker(timeout)
|
||||||
|
defer ticker.Stop()
|
||||||
|
done := make(chan error)
|
||||||
|
go func() {
|
||||||
|
done <- input.Input.Gather(acc)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR in input [%s]: %s", input.Name, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
log.Printf("ERROR: input [%s] took longer to collect than "+
|
||||||
|
"collection interval (%s)",
|
||||||
|
input.Name, timeout)
|
||||||
|
continue
|
||||||
|
case <-shutdown:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||||
|
// Config struct
|
||||||
|
func (a *Agent) Test() error {
|
||||||
|
shutdown := make(chan struct{})
|
||||||
|
defer close(shutdown)
|
||||||
|
metricC := make(chan telegraf.Metric)
|
||||||
|
|
||||||
|
// dummy receiver for the point channel
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-metricC:
|
||||||
|
// do nothing
|
||||||
|
case <-shutdown:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, input := range a.Config.Inputs {
|
||||||
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
|
acc.SetTrace(true)
|
||||||
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
|
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||||
|
if input.Config.Interval != 0 {
|
||||||
|
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := input.Input.Gather(acc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special instructions for some inputs. cpu, for example, needs to be
|
||||||
|
// run twice in order to return cpu usage percentages.
|
||||||
|
switch input.Name {
|
||||||
|
case "cpu", "mongodb", "procstat":
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||||
|
if err := input.Input.Gather(acc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush writes a list of metrics to all configured outputs
|
||||||
|
func (a *Agent) flush() {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
wg.Add(len(a.Config.Outputs))
|
||||||
|
for _, o := range a.Config.Outputs {
|
||||||
|
go func(output *internal_models.RunningOutput) {
|
||||||
|
defer wg.Done()
|
||||||
|
err := output.Write()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error writing to output [%s]: %s\n",
|
||||||
|
output.Name, err.Error())
|
||||||
|
}
|
||||||
|
}(o)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// flusher monitors the metrics input channel and flushes on the minimum interval
|
||||||
|
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||||
|
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||||
|
// the flusher will flush after metrics are collected.
|
||||||
|
time.Sleep(time.Millisecond * 200)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-shutdown:
|
||||||
|
log.Println("Hang on, flushing any cached metrics before shutdown")
|
||||||
|
a.flush()
|
||||||
|
return nil
|
||||||
|
case <-ticker.C:
|
||||||
|
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||||
|
a.flush()
|
||||||
|
case m := <-metricC:
|
||||||
|
for _, o := range a.Config.Outputs {
|
||||||
|
o.AddMetric(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run runs the agent daemon, gathering every Interval
|
||||||
|
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||||
|
"Flush Interval:%s \n",
|
||||||
|
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||||
|
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||||
|
|
||||||
|
// channel shared between all input threads for accumulating metrics
|
||||||
|
metricC := make(chan telegraf.Metric, 10000)
|
||||||
|
|
||||||
|
for _, input := range a.Config.Inputs {
|
||||||
|
// Start service of any ServicePlugins
|
||||||
|
switch p := input.Input.(type) {
|
||||||
|
case telegraf.ServiceInput:
|
||||||
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
if err := p.Start(acc); err != nil {
|
||||||
|
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||||
|
input.Name, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer p.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Round collection to nearest interval by sleeping
|
||||||
|
if a.Config.Agent.RoundInterval {
|
||||||
|
i := int64(a.Config.Agent.Interval.Duration)
|
||||||
|
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := a.flusher(shutdown, metricC); err != nil {
|
||||||
|
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||||
|
close(shutdown)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Add(len(a.Config.Inputs))
|
||||||
|
for _, input := range a.Config.Inputs {
|
||||||
|
interval := a.Config.Agent.Interval.Duration
|
||||||
|
// overwrite global interval if this plugin has it's own.
|
||||||
|
if input.Config.Interval != 0 {
|
||||||
|
interval = input.Config.Interval
|
||||||
|
}
|
||||||
|
go func(in *internal_models.RunningInput, interv time.Duration) {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
|
||||||
|
log.Printf(err.Error())
|
||||||
|
}
|
||||||
|
}(input, interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
111
agent/agent_test.go
Normal file
111
agent/agent_test.go
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
|
|
||||||
|
// needing to load the plugins
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||||
|
// needing to load the outputs
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAgent_OmitHostname(t *testing.T) {
|
||||||
|
c := config.NewConfig()
|
||||||
|
c.Agent.OmitHostname = true
|
||||||
|
_, err := NewAgent(c)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotContains(t, c.Tags, "host")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_LoadPlugin(t *testing.T) {
|
||||||
|
c := config.NewConfig()
|
||||||
|
c.InputFilters = []string{"mysql"}
|
||||||
|
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ := NewAgent(c)
|
||||||
|
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.InputFilters = []string{"foo"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.InputFilters = []string{"mysql", "foo"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.InputFilters = []string{"mysql", "redis"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_LoadOutput(t *testing.T) {
|
||||||
|
c := config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb"}
|
||||||
|
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ := NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"kafka"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"foo"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb", "foo"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 3, len(c.Outputs))
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||||
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
|
}
|
||||||
156
agent_test.go
156
agent_test.go
@@ -1,156 +0,0 @@
|
|||||||
package telegraf
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/internal/config"
|
|
||||||
|
|
||||||
// needing to load the plugins
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/all"
|
|
||||||
// needing to load the outputs
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/all"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAgent_LoadPlugin(t *testing.T) {
|
|
||||||
c := config.NewConfig()
|
|
||||||
c.PluginFilters = []string{"mysql"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ := NewAgent(c)
|
|
||||||
assert.Equal(t, 1, len(a.Config.Plugins))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.PluginFilters = []string{"foo"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 0, len(a.Config.Plugins))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.PluginFilters = []string{"mysql", "foo"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 1, len(a.Config.Plugins))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.PluginFilters = []string{"mysql", "redis"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Plugins))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.PluginFilters = []string{"mysql", "foo", "redis", "bar"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Plugins))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_LoadOutput(t *testing.T) {
|
|
||||||
c := config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ := NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"foo"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb", "foo"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
|
||||||
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_ZeroJitter(t *testing.T) {
|
|
||||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
|
||||||
time.Duration(0*time.Second))
|
|
||||||
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
if actual != exp {
|
|
||||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_ZeroInterval(t *testing.T) {
|
|
||||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
|
||||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
|
||||||
time.Duration(5*time.Second))
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
|
|
||||||
if actual > max {
|
|
||||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if actual < min {
|
|
||||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_ZeroBoth(t *testing.T) {
|
|
||||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
|
||||||
time.Duration(0*time.Second))
|
|
||||||
|
|
||||||
actual := flushinterval
|
|
||||||
exp := time.Duration(500 * time.Millisecond)
|
|
||||||
|
|
||||||
if actual != exp {
|
|
||||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_JitterMax(t *testing.T) {
|
|
||||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
|
||||||
time.Duration(2*time.Second))
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
if actual > max {
|
|
||||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_JitterMin(t *testing.T) {
|
|
||||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
|
||||||
time.Duration(2*time.Second))
|
|
||||||
actual := flushinterval.Nanoseconds()
|
|
||||||
if actual < min {
|
|
||||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
11
circle.yml
11
circle.yml
@@ -4,16 +4,17 @@ machine:
|
|||||||
post:
|
post:
|
||||||
- sudo service zookeeper stop
|
- sudo service zookeeper stop
|
||||||
- go version
|
- go version
|
||||||
- go version | grep 1.5.1 || sudo rm -rf /usr/local/go
|
- go version | grep 1.6.2 || sudo rm -rf /usr/local/go
|
||||||
- wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
|
- wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz
|
||||||
- sudo tar -C /usr/local -xzf go1.5.1.linux-amd64.tar.gz
|
- sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz
|
||||||
- go version
|
- go version
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
cache_directories:
|
|
||||||
- "~/telegraf-build/src"
|
|
||||||
override:
|
override:
|
||||||
- docker info
|
- docker info
|
||||||
|
post:
|
||||||
|
- gem install fpm
|
||||||
|
- sudo apt-get install -y rpm python-boto
|
||||||
|
|
||||||
test:
|
test:
|
||||||
override:
|
override:
|
||||||
|
|||||||
@@ -7,146 +7,275 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/influxdb/telegraf"
|
"github.com/influxdata/telegraf/agent"
|
||||||
"github.com/influxdb/telegraf/internal/config"
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
_ "github.com/influxdb/telegraf/outputs/all"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
_ "github.com/influxdb/telegraf/plugins/all"
|
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||||
|
"github.com/influxdata/telegraf/plugins/outputs"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||||
)
|
)
|
||||||
|
|
||||||
var fDebug = flag.Bool("debug", false,
|
var fDebug = flag.Bool("debug", false,
|
||||||
"show metrics as they're generated to stdout")
|
"show metrics as they're generated to stdout")
|
||||||
|
var fQuiet = flag.Bool("quiet", false,
|
||||||
|
"run in quiet mode")
|
||||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||||
var fConfig = flag.String("config", "", "configuration file to load")
|
var fConfig = flag.String("config", "", "configuration file to load")
|
||||||
var fConfigDirectory = flag.String("configdirectory", "",
|
var fConfigDirectory = flag.String("config-directory", "",
|
||||||
"directory containing additional *.conf files")
|
"directory containing additional *.conf files")
|
||||||
var fVersion = flag.Bool("version", false, "display the version")
|
var fVersion = flag.Bool("version", false, "display the version")
|
||||||
var fSampleConfig = flag.Bool("sample-config", false,
|
var fSampleConfig = flag.Bool("sample-config", false,
|
||||||
"print out full sample configuration")
|
"print out full sample configuration")
|
||||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||||
var fPLuginFilters = flag.String("filter", "",
|
var fInputFilters = flag.String("input-filter", "",
|
||||||
"filter the plugins to enable, separator is :")
|
"filter the inputs to enable, separator is :")
|
||||||
var fOutputFilters = flag.String("outputfilter", "",
|
var fInputList = flag.Bool("input-list", false,
|
||||||
|
"print available input plugins.")
|
||||||
|
var fOutputFilters = flag.String("output-filter", "",
|
||||||
"filter the outputs to enable, separator is :")
|
"filter the outputs to enable, separator is :")
|
||||||
|
var fOutputList = flag.Bool("output-list", false,
|
||||||
|
"print available output plugins.")
|
||||||
var fUsage = flag.String("usage", "",
|
var fUsage = flag.String("usage", "",
|
||||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||||
|
var fInputFiltersLegacy = flag.String("filter", "",
|
||||||
|
"filter the inputs to enable, separator is :")
|
||||||
|
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||||
|
"filter the outputs to enable, separator is :")
|
||||||
|
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||||
|
"directory containing additional *.conf files")
|
||||||
|
|
||||||
// Telegraf version
|
// Telegraf version, populated linker.
|
||||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||||
var Version string
|
var (
|
||||||
|
version string
|
||||||
|
commit string
|
||||||
|
branch string
|
||||||
|
)
|
||||||
|
|
||||||
|
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
telegraf <flags>
|
||||||
|
|
||||||
|
The flags are:
|
||||||
|
|
||||||
|
-config <file> configuration file to load
|
||||||
|
-test gather metrics once, print them to stdout, and exit
|
||||||
|
-sample-config print out full sample configuration to stdout
|
||||||
|
-config-directory directory containing additional *.conf files
|
||||||
|
-input-filter filter the input plugins to enable, separator is :
|
||||||
|
-input-list print all the plugins inputs
|
||||||
|
-output-filter filter the output plugins to enable, separator is :
|
||||||
|
-output-list print all the available outputs
|
||||||
|
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||||
|
-debug print metrics as they're generated to stdout
|
||||||
|
-quiet run in quiet mode
|
||||||
|
-version print the version to stdout
|
||||||
|
|
||||||
|
In addition to the -config flag, telegraf will also load the config file from
|
||||||
|
an environment variable or default location. Precedence is:
|
||||||
|
1. -config flag
|
||||||
|
2. $TELEGRAF_CONFIG_PATH environment variable
|
||||||
|
3. $HOME/.telegraf/telegraf.conf
|
||||||
|
4. /etc/telegraf/telegraf.conf
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
# generate a telegraf config file:
|
||||||
|
telegraf -sample-config > telegraf.conf
|
||||||
|
|
||||||
|
# generate config with only cpu input & influxdb output plugins defined
|
||||||
|
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||||
|
|
||||||
|
# run a single telegraf collection, outputing metrics to stdout
|
||||||
|
telegraf -config telegraf.conf -test
|
||||||
|
|
||||||
|
# run telegraf with all plugins defined in config file
|
||||||
|
telegraf -config telegraf.conf
|
||||||
|
|
||||||
|
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||||
|
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||||
|
`
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
reload := make(chan bool, 1)
|
||||||
|
reload <- true
|
||||||
|
for <-reload {
|
||||||
|
reload <- false
|
||||||
|
flag.Usage = func() { usageExit(0) }
|
||||||
|
flag.Parse()
|
||||||
|
args := flag.Args()
|
||||||
|
|
||||||
var pluginFilters []string
|
var inputFilters []string
|
||||||
if *fPLuginFilters != "" {
|
if *fInputFiltersLegacy != "" {
|
||||||
pluginsFilter := strings.TrimSpace(*fPLuginFilters)
|
fmt.Printf("WARNING '--filter' flag is deprecated, please use" +
|
||||||
pluginFilters = strings.Split(":"+pluginsFilter+":", ":")
|
" '--input-filter'")
|
||||||
}
|
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||||
|
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||||
|
}
|
||||||
|
if *fInputFilters != "" {
|
||||||
|
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||||
|
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||||
|
}
|
||||||
|
|
||||||
var outputFilters []string
|
var outputFilters []string
|
||||||
if *fOutputFilters != "" {
|
if *fOutputFiltersLegacy != "" {
|
||||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
fmt.Printf("WARNING '--outputfilter' flag is deprecated, please use" +
|
||||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
" '--output-filter'")
|
||||||
}
|
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||||
|
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||||
|
}
|
||||||
|
if *fOutputFilters != "" {
|
||||||
|
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||||
|
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||||
|
}
|
||||||
|
|
||||||
if *fVersion {
|
if len(args) > 0 {
|
||||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
switch args[0] {
|
||||||
fmt.Println(v)
|
case "version":
|
||||||
return
|
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||||
}
|
fmt.Println(v)
|
||||||
|
return
|
||||||
if *fSampleConfig {
|
case "config":
|
||||||
config.PrintSampleConfig(pluginFilters, outputFilters)
|
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||||
return
|
return
|
||||||
}
|
|
||||||
|
|
||||||
if *fUsage != "" {
|
|
||||||
if err := config.PrintPluginConfig(*fUsage); err != nil {
|
|
||||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
|
||||||
log.Fatalf("%s and %s", err, err2)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
if *fOutputList {
|
||||||
c *config.Config
|
fmt.Println("Available Output Plugins:")
|
||||||
err error
|
for k, _ := range outputs.Outputs {
|
||||||
)
|
fmt.Printf(" %s\n", k)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if *fConfig != "" {
|
if *fInputList {
|
||||||
c = config.NewConfig()
|
fmt.Println("Available Input Plugins:")
|
||||||
|
for k, _ := range inputs.Inputs {
|
||||||
|
fmt.Printf(" %s\n", k)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fVersion {
|
||||||
|
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||||
|
fmt.Println(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fSampleConfig {
|
||||||
|
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fUsage != "" {
|
||||||
|
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||||
|
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||||
|
log.Fatalf("%s and %s", err, err2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no other options are specified, load the config file and run.
|
||||||
|
c := config.NewConfig()
|
||||||
c.OutputFilters = outputFilters
|
c.OutputFilters = outputFilters
|
||||||
c.PluginFilters = pluginFilters
|
c.InputFilters = inputFilters
|
||||||
err = c.LoadConfig(*fConfig)
|
err := c.LoadConfig(*fConfig)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fConfigDirectoryLegacy != "" {
|
||||||
|
fmt.Printf("WARNING '--configdirectory' flag is deprecated, please use" +
|
||||||
|
" '--config-directory'")
|
||||||
|
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fConfigDirectory != "" {
|
||||||
|
err = c.LoadDirectory(*fConfigDirectory)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(c.Outputs) == 0 {
|
||||||
|
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||||
|
}
|
||||||
|
if len(c.Inputs) == 0 {
|
||||||
|
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
||||||
|
}
|
||||||
|
|
||||||
|
ag, err := agent.NewAgent(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
fmt.Println("Usage: Telegraf")
|
|
||||||
flag.PrintDefaults()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if *fConfigDirectory != "" {
|
if *fDebug {
|
||||||
err = c.LoadDirectory(*fConfigDirectory)
|
ag.Config.Agent.Debug = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fQuiet {
|
||||||
|
ag.Config.Agent.Quiet = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fTest {
|
||||||
|
err = ag.Test()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ag.Connect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if len(c.Outputs) == 0 {
|
|
||||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
|
||||||
}
|
|
||||||
if len(c.Plugins) == 0 {
|
|
||||||
log.Fatalf("Error: no plugins found, did you provide a valid config file?")
|
|
||||||
}
|
|
||||||
|
|
||||||
ag, err := telegraf.NewAgent(c)
|
shutdown := make(chan struct{})
|
||||||
if err != nil {
|
signals := make(chan os.Signal)
|
||||||
log.Fatal(err)
|
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||||
}
|
go func() {
|
||||||
|
sig := <-signals
|
||||||
|
if sig == os.Interrupt {
|
||||||
|
close(shutdown)
|
||||||
|
}
|
||||||
|
if sig == syscall.SIGHUP {
|
||||||
|
log.Printf("Reloading Telegraf config\n")
|
||||||
|
<-reload
|
||||||
|
reload <- true
|
||||||
|
close(shutdown)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if *fDebug {
|
log.Printf("Starting Telegraf (version %s)\n", version)
|
||||||
ag.Config.Agent.Debug = true
|
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||||
}
|
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||||
|
log.Printf("Tags enabled: %s", c.ListTags())
|
||||||
|
|
||||||
if *fTest {
|
if *fPidfile != "" {
|
||||||
err = ag.Test()
|
f, err := os.Create(*fPidfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("Unable to create pidfile: %s", err)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = ag.Connect()
|
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
shutdown := make(chan struct{})
|
f.Close()
|
||||||
signals := make(chan os.Signal)
|
|
||||||
signal.Notify(signals, os.Interrupt)
|
|
||||||
go func() {
|
|
||||||
<-signals
|
|
||||||
close(shutdown)
|
|
||||||
}()
|
|
||||||
|
|
||||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
|
||||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
|
||||||
log.Printf("Loaded plugins: %s", strings.Join(c.PluginNames(), " "))
|
|
||||||
log.Printf("Tags enabled: %s", c.ListTags())
|
|
||||||
|
|
||||||
if *fPidfile != "" {
|
|
||||||
f, err := os.Create(*fPidfile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Unable to create pidfile: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
ag.Run(shutdown)
|
||||||
|
|
||||||
f.Close()
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
ag.Run(shutdown)
|
|
||||||
|
func usageExit(rc int) {
|
||||||
|
fmt.Println(usage)
|
||||||
|
os.Exit(rc)
|
||||||
}
|
}
|
||||||
|
|||||||
277
docs/CONFIGURATION.md
Normal file
277
docs/CONFIGURATION.md
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
# Telegraf Configuration
|
||||||
|
|
||||||
|
## Generating a Configuration File
|
||||||
|
|
||||||
|
A default Telegraf config file can be generated using the -sample-config flag:
|
||||||
|
|
||||||
|
```
|
||||||
|
telegraf -sample-config > telegraf.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
To generate a file with specific inputs and outputs, you can use the
|
||||||
|
-input-filter and -output-filter flags:
|
||||||
|
|
||||||
|
```
|
||||||
|
telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka
|
||||||
|
```
|
||||||
|
|
||||||
|
You can see the latest config file with all available plugins here:
|
||||||
|
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
Environment variables can be used anywhere in the config file, simply prepend
|
||||||
|
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||||
|
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||||
|
|
||||||
|
## `[global_tags]` Configuration
|
||||||
|
|
||||||
|
Global tags can be specified in the `[global_tags]` section of the config file
|
||||||
|
in key="value" format. All metrics being gathered on this host will be tagged
|
||||||
|
with the tags specified here.
|
||||||
|
|
||||||
|
## `[agent]` Configuration
|
||||||
|
|
||||||
|
Telegraf has a few options you can configure under the `agent` section of the
|
||||||
|
config.
|
||||||
|
|
||||||
|
* **interval**: Default data collection interval for all inputs
|
||||||
|
* **round_interval**: Rounds collection interval to 'interval'
|
||||||
|
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
|
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
|
||||||
|
most metric_batch_size metrics.
|
||||||
|
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||||
|
for each output, and will flush this buffer on a successful write.
|
||||||
|
This should be a multiple of metric_batch_size and could not be less
|
||||||
|
than 2 times metric_batch_size.
|
||||||
|
* **collection_jitter**: Collection jitter is used to jitter
|
||||||
|
the collection by a random amount.
|
||||||
|
Each plugin will sleep for a random time within jitter before collecting.
|
||||||
|
This can be used to avoid many plugins querying things like sysfs at the
|
||||||
|
same time, which can have a measurable effect on the system.
|
||||||
|
* **flush_interval**: Default data flushing interval for all outputs.
|
||||||
|
You should not set this below
|
||||||
|
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||||
|
* **flush_jitter**: Jitter the flush interval by a random amount.
|
||||||
|
This is primarily to avoid
|
||||||
|
large write spikes for users running a large number of telegraf instances.
|
||||||
|
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||||
|
* **debug**: Run telegraf in debug mode.
|
||||||
|
* **quiet**: Run telegraf in quiet mode.
|
||||||
|
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||||
|
|
||||||
|
#### Measurement Filtering
|
||||||
|
|
||||||
|
Filters can be configured per input or output, see below for examples.
|
||||||
|
|
||||||
|
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||||
|
current input. Each string in the array is tested as a glob match against
|
||||||
|
measurement names and if it matches, the field is emitted.
|
||||||
|
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||||
|
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||||
|
current input. Each string in the array is tested as a glob match against field names
|
||||||
|
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
||||||
|
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||||
|
fielddrop is not available for outputs.
|
||||||
|
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||||
|
measurements by the current input. Each string in the array is tested as a glob
|
||||||
|
match against the tag name, and if it matches the measurement is emitted.
|
||||||
|
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||||
|
emitted. This is tested on measurements that have passed the tagpass test.
|
||||||
|
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
||||||
|
As opposed to tagdrop, which will drop an entire measurement based on it's
|
||||||
|
tags, tagexclude simply strips the given tag keys from the measurement. This
|
||||||
|
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
||||||
|
as it is more efficient to filter out tags at the ingestion point.
|
||||||
|
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||||
|
the tag keys in the final measurement.
|
||||||
|
|
||||||
|
## Input Configuration
|
||||||
|
|
||||||
|
Some configuration options are configurable per input:
|
||||||
|
|
||||||
|
* **name_override**: Override the base name of the measurement.
|
||||||
|
(Default is the name of the input).
|
||||||
|
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||||
|
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||||
|
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||||
|
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||||
|
global interval, but if one particular input should be run less or more often,
|
||||||
|
you can configure that here.
|
||||||
|
|
||||||
|
#### Input Configuration Examples
|
||||||
|
|
||||||
|
This is a full working config that will output CPU data to an InfluxDB instance
|
||||||
|
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||||
|
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||||
|
fields which begin with `time_`.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[global_tags]
|
||||||
|
dc = "denver-1"
|
||||||
|
|
||||||
|
[agent]
|
||||||
|
interval = "10s"
|
||||||
|
|
||||||
|
# OUTPUTS
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
url = "http://192.168.59.103:8086" # required.
|
||||||
|
database = "telegraf" # required.
|
||||||
|
precision = "s"
|
||||||
|
|
||||||
|
# INPUTS
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = false
|
||||||
|
# filter all fields beginning with 'time_'
|
||||||
|
fielddrop = ["time_*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Input Config: tagpass and tagdrop
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = false
|
||||||
|
fielddrop = ["cpu_time"]
|
||||||
|
# Don't collect CPU data for cpu6 & cpu7
|
||||||
|
[inputs.cpu.tagdrop]
|
||||||
|
cpu = [ "cpu6", "cpu7" ]
|
||||||
|
|
||||||
|
[[inputs.disk]]
|
||||||
|
[inputs.disk.tagpass]
|
||||||
|
# tagpass conditions are OR, not AND.
|
||||||
|
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||||
|
# then the metric passes
|
||||||
|
fstype = [ "ext4", "xfs" ]
|
||||||
|
# Globs can also be used on the tag values
|
||||||
|
path = [ "/opt", "/home*" ]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Input Config: fieldpass and fielddrop
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Drop all metrics for guest & steal CPU usage
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
fielddrop = ["usage_guest", "usage_steal"]
|
||||||
|
|
||||||
|
# Only store inode related metrics for disks
|
||||||
|
[[inputs.disk]]
|
||||||
|
fieldpass = ["inodes*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Input Config: namepass and namedrop
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Drop all metrics about containers for kubelet
|
||||||
|
[[inputs.prometheus]]
|
||||||
|
urls = ["http://kube-node-1:4194/metrics"]
|
||||||
|
namedrop = ["container_*"]
|
||||||
|
|
||||||
|
# Only store rest client related metrics for kubelet
|
||||||
|
[[inputs.prometheus]]
|
||||||
|
urls = ["http://kube-node-1:4194/metrics"]
|
||||||
|
namepass = ["rest_client_*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Input Config: taginclude and tagexclude
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = true
|
||||||
|
taginclude = ["cpu"]
|
||||||
|
|
||||||
|
# Exclude the "fstype" tag from the measurements for the disk plugin.
|
||||||
|
[[inputs.disk]]
|
||||||
|
tagexclude = ["fstype"]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Input config: prefix, suffix, and override
|
||||||
|
|
||||||
|
This plugin will emit measurements with the name `cpu_total`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
name_suffix = "_total"
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
```
|
||||||
|
|
||||||
|
This will emit measurements with the name `foobar`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
name_override = "foobar"
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Input config: tags
|
||||||
|
|
||||||
|
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||||
|
`tag2=bar`
|
||||||
|
|
||||||
|
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
|
||||||
|
plugin definition.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
[inputs.cpu.tags]
|
||||||
|
tag1 = "foo"
|
||||||
|
tag2 = "bar"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multiple inputs of the same type
|
||||||
|
|
||||||
|
Additional inputs (or outputs) of the same type can be specified,
|
||||||
|
just define more instances in the config file. It is highly recommended that
|
||||||
|
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
||||||
|
to avoid measurement collisions:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = false
|
||||||
|
name_override = "percpu_usage"
|
||||||
|
fielddrop = ["cpu_time*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Configuration
|
||||||
|
|
||||||
|
Telegraf also supports specifying multiple output sinks to send data to,
|
||||||
|
configuring each output sink is different, but examples can be
|
||||||
|
found by running `telegraf -sample-config`.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
urls = [ "http://localhost:8086" ]
|
||||||
|
database = "telegraf"
|
||||||
|
precision = "s"
|
||||||
|
# Drop all measurements that start with "aerospike"
|
||||||
|
namedrop = ["aerospike*"]
|
||||||
|
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
urls = [ "http://localhost:8086" ]
|
||||||
|
database = "telegraf-aerospike-data"
|
||||||
|
precision = "s"
|
||||||
|
# Only accept aerospike data:
|
||||||
|
namepass = ["aerospike*"]
|
||||||
|
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
urls = [ "http://localhost:8086" ]
|
||||||
|
database = "telegraf-cpu0-data"
|
||||||
|
precision = "s"
|
||||||
|
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||||
|
[outputs.influxdb.tagpass]
|
||||||
|
cpu = ["cpu0"]
|
||||||
|
```
|
||||||
374
docs/DATA_FORMATS_INPUT.md
Normal file
374
docs/DATA_FORMATS_INPUT.md
Normal file
@@ -0,0 +1,374 @@
|
|||||||
|
# Telegraf Input Data Formats
|
||||||
|
|
||||||
|
Telegraf is able to parse the following input data formats into metrics:
|
||||||
|
|
||||||
|
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
|
||||||
|
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
|
||||||
|
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||||
|
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||||
|
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||||
|
|
||||||
|
Telegraf metrics, like InfluxDB
|
||||||
|
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||||
|
are a combination of four basic parts:
|
||||||
|
|
||||||
|
1. Measurement Name
|
||||||
|
1. Tags
|
||||||
|
1. Fields
|
||||||
|
1. Timestamp
|
||||||
|
|
||||||
|
These four parts are easily defined when using InfluxDB line-protocol as a
|
||||||
|
data format. But there are other data formats that users may want to use which
|
||||||
|
require more advanced configuration to create usable Telegraf metrics.
|
||||||
|
|
||||||
|
Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now,
|
||||||
|
these plugins were statically configured to parse just a single
|
||||||
|
data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only
|
||||||
|
supported data in InfluxDB line-protocol.
|
||||||
|
|
||||||
|
But now we are normalizing the parsing of various data formats across all
|
||||||
|
plugins that can support it. You will be able to identify a plugin that supports
|
||||||
|
different data formats by the presence of a `data_format` config option, for
|
||||||
|
example, in the exec plugin:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "json"
|
||||||
|
|
||||||
|
## Additional configuration options go here
|
||||||
|
```
|
||||||
|
|
||||||
|
Each data_format has an additional set of configuration options available, which
|
||||||
|
I'll go over below.
|
||||||
|
|
||||||
|
# Influx:
|
||||||
|
|
||||||
|
There are no additional configuration options for InfluxDB line-protocol. The
|
||||||
|
metrics are parsed directly into Telegraf metrics.
|
||||||
|
|
||||||
|
#### Influx Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
# JSON:
|
||||||
|
|
||||||
|
The JSON data format flattens JSON into metric _fields_.
|
||||||
|
NOTE: Only numerical values are converted to fields, and they are converted
|
||||||
|
into a float. strings are ignored unless specified as a tag_key (see below).
|
||||||
|
|
||||||
|
So for example, this JSON:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"a": 5,
|
||||||
|
"b": {
|
||||||
|
"c": 6
|
||||||
|
},
|
||||||
|
"ignored": "I'm a string"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Would get translated into _fields_ of a measurement:
|
||||||
|
|
||||||
|
```
|
||||||
|
myjsonmetric a=5,b_c=6
|
||||||
|
```
|
||||||
|
|
||||||
|
The _measurement_ _name_ is usually the name of the plugin,
|
||||||
|
but can be overridden using the `name_override` config option.
|
||||||
|
|
||||||
|
#### JSON Configuration:
|
||||||
|
|
||||||
|
The JSON data format supports specifying "tag keys". If specified, keys
|
||||||
|
will be searched for in the root-level of the JSON blob. If the key(s) exist,
|
||||||
|
they will be applied as tags to the Telegraf metrics.
|
||||||
|
|
||||||
|
For example, if you had this configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "json"
|
||||||
|
|
||||||
|
## List of tag names to extract from top-level of JSON server response
|
||||||
|
tag_keys = [
|
||||||
|
"my_tag_1",
|
||||||
|
"my_tag_2"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
with this JSON output from a command:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"a": 5,
|
||||||
|
"b": {
|
||||||
|
"c": 6
|
||||||
|
},
|
||||||
|
"my_tag_1": "foo"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Your Telegraf metrics would get tagged with "my_tag_1"
|
||||||
|
|
||||||
|
```
|
||||||
|
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||||
|
```
|
||||||
|
|
||||||
|
# Value:
|
||||||
|
|
||||||
|
The "value" data format translates single values into Telegraf metrics. This
|
||||||
|
is done by assigning a measurement name and setting a single field ("value")
|
||||||
|
as the parsed metric.
|
||||||
|
|
||||||
|
#### Value Configuration:
|
||||||
|
|
||||||
|
You **must** tell Telegraf what type of metric to collect by using the
|
||||||
|
`data_type` configuration option. Available options are:
|
||||||
|
|
||||||
|
1. integer
|
||||||
|
2. float or long
|
||||||
|
3. string
|
||||||
|
4. boolean
|
||||||
|
|
||||||
|
**Note:** It is also recommended that you set `name_override` to a measurement
|
||||||
|
name that makes sense for your metric, otherwise it will just be set to the
|
||||||
|
name of the plugin.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
|
||||||
|
|
||||||
|
## override the default metric name of "exec"
|
||||||
|
name_override = "entropy_available"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "value"
|
||||||
|
data_type = "integer" # required
|
||||||
|
```
|
||||||
|
|
||||||
|
# Graphite:
|
||||||
|
|
||||||
|
The Graphite data format translates graphite _dot_ buckets directly into
|
||||||
|
telegraf measurement names, with a single value field, and without any tags.
|
||||||
|
By default, the separator is left as ".", but this can be changed using the
|
||||||
|
"separator" argument. For more advanced options,
|
||||||
|
Telegraf supports specifying "templates" to translate
|
||||||
|
graphite buckets into Telegraf metrics.
|
||||||
|
|
||||||
|
Templates are of the form:
|
||||||
|
|
||||||
|
```
|
||||||
|
"host.mytag.mytag.measurement.measurement.field*"
|
||||||
|
```
|
||||||
|
|
||||||
|
Where the following keywords exist:
|
||||||
|
|
||||||
|
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
||||||
|
to the measurement name. This can be specified multiple times.
|
||||||
|
2. `field`: specifies that this section of the graphite bucket corresponds
|
||||||
|
to the field name. This can be specified multiple times.
|
||||||
|
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
||||||
|
correspond to the measurement name.
|
||||||
|
4. `field*`: specifies that all remaining elements of the graphite bucket
|
||||||
|
correspond to the field name.
|
||||||
|
|
||||||
|
Any part of the template that is not a keyword is treated as a tag key. This
|
||||||
|
can also be specified multiple times.
|
||||||
|
|
||||||
|
NOTE: `field*` cannot be used in conjunction with `measurement*`!
|
||||||
|
|
||||||
|
#### Measurement & Tag Templates:
|
||||||
|
|
||||||
|
The most basic template is to specify a single transformation to apply to all
|
||||||
|
incoming metrics. So the following template:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
templates = [
|
||||||
|
"region.region.measurement*"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
us.west.cpu.load 100
|
||||||
|
=> cpu.load,region=us.west value=100
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Field Templates:
|
||||||
|
|
||||||
|
The field keyword tells Telegraf to give the metric that field name.
|
||||||
|
So the following template:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
separator = "_"
|
||||||
|
templates = [
|
||||||
|
"measurement.measurement.field.field.region"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.usage.idle.percent.eu-east 100
|
||||||
|
=> cpu_usage,region=eu-east idle_percent=100
|
||||||
|
```
|
||||||
|
|
||||||
|
The field key can also be derived from all remaining elements of the graphite
|
||||||
|
bucket by specifying `field*`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
separator = "_"
|
||||||
|
templates = [
|
||||||
|
"measurement.measurement.region.field*"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
which would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.usage.eu-east.idle.percentage 100
|
||||||
|
=> cpu_usage,region=eu-east idle_percentage=100
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Filter Templates:
|
||||||
|
|
||||||
|
Users can also filter the template(s) to use based on the name of the bucket,
|
||||||
|
using glob matching, like so:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
templates = [
|
||||||
|
"cpu.* measurement.measurement.region",
|
||||||
|
"mem.* measurement.measurement.host"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
which would result in the following transformation:
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.load.eu-east 100
|
||||||
|
=> cpu_load,region=eu-east value=100
|
||||||
|
|
||||||
|
mem.cached.localhost 256
|
||||||
|
=> mem_cached,host=localhost value=256
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Adding Tags:
|
||||||
|
|
||||||
|
Additional tags can be added to a metric that don't exist on the received metric.
|
||||||
|
You can add additional tags by specifying them after the pattern.
|
||||||
|
Tags have the same format as the line protocol.
|
||||||
|
Multiple tags are separated by commas.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
templates = [
|
||||||
|
"measurement.measurement.field.region datacenter=1a"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.usage.idle.eu-east 100
|
||||||
|
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
||||||
|
```
|
||||||
|
|
||||||
|
There are many more options available,
|
||||||
|
[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
|
||||||
|
|
||||||
|
#### Graphite Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "graphite"
|
||||||
|
|
||||||
|
## This string will be used to join the matched values.
|
||||||
|
separator = "_"
|
||||||
|
|
||||||
|
## Each template line requires a template pattern. It can have an optional
|
||||||
|
## filter before the template and separated by spaces. It can also have optional extra
|
||||||
|
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||||
|
## similar to the line protocol format. There can be only one default template.
|
||||||
|
## Templates support below format:
|
||||||
|
## 1. filter + template
|
||||||
|
## 2. filter + template + extra tag(s)
|
||||||
|
## 3. filter + template with field key
|
||||||
|
## 4. default template
|
||||||
|
templates = [
|
||||||
|
"*.app env.service.resource.measurement",
|
||||||
|
"stats.* .host.measurement* region=eu-east,agent=sensu",
|
||||||
|
"stats2.* .host.measurement.field",
|
||||||
|
"measurement*"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
# Nagios:
|
||||||
|
|
||||||
|
There are no additional configuration options for Nagios line-protocol. The
|
||||||
|
metrics are parsed directly into Telegraf metrics.
|
||||||
|
|
||||||
|
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||||
|
|
||||||
|
#### Nagios Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "nagios"
|
||||||
|
```
|
||||||
150
docs/DATA_FORMATS_OUTPUT.md
Normal file
150
docs/DATA_FORMATS_OUTPUT.md
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
# Telegraf Output Data Formats
|
||||||
|
|
||||||
|
Telegraf is able to serialize metrics into the following output data formats:
|
||||||
|
|
||||||
|
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||||
|
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||||
|
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||||
|
|
||||||
|
Telegraf metrics, like InfluxDB
|
||||||
|
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||||
|
are a combination of four basic parts:
|
||||||
|
|
||||||
|
1. Measurement Name
|
||||||
|
1. Tags
|
||||||
|
1. Fields
|
||||||
|
1. Timestamp
|
||||||
|
|
||||||
|
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
||||||
|
|
||||||
|
```
|
||||||
|
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
||||||
|
```
|
||||||
|
|
||||||
|
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
||||||
|
InfluxDB line protocol was originally the only available output format. But now
|
||||||
|
we are normalizing telegraf metric "serializers" into a
|
||||||
|
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
||||||
|
across all output plugins that can support it.
|
||||||
|
You will be able to identify a plugin that supports different data formats
|
||||||
|
by the presence of a `data_format`
|
||||||
|
config option, for example, in the `file` output plugin:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.file]]
|
||||||
|
## Files to write to, "stdout" is a specially handled file.
|
||||||
|
files = ["stdout"]
|
||||||
|
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
|
||||||
|
## Additional configuration options go here
|
||||||
|
```
|
||||||
|
|
||||||
|
Each data_format has an additional set of configuration options available, which
|
||||||
|
I'll go over below.
|
||||||
|
|
||||||
|
# Influx:
|
||||||
|
|
||||||
|
There are no additional configuration options for InfluxDB line-protocol. The
|
||||||
|
metrics are serialized directly into InfluxDB line-protocol.
|
||||||
|
|
||||||
|
### Influx Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.file]]
|
||||||
|
## Files to write to, "stdout" is a specially handled file.
|
||||||
|
files = ["stdout", "/tmp/metrics.out"]
|
||||||
|
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
# Graphite:
|
||||||
|
|
||||||
|
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
||||||
|
template can be specified for the output of Telegraf metrics into Graphite
|
||||||
|
buckets. The default template is:
|
||||||
|
|
||||||
|
```
|
||||||
|
template = "host.tags.measurement.field"
|
||||||
|
```
|
||||||
|
|
||||||
|
In the above template, we have four parts:
|
||||||
|
|
||||||
|
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
|
||||||
|
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
|
||||||
|
tag value will be filled in.
|
||||||
|
1. _tags_ is a special keyword that outputs all remaining tag values, separated
|
||||||
|
by dots and in alphabetical order (by tag key). These will be filled after all
|
||||||
|
tag keys are filled.
|
||||||
|
1. _measurement_ is a special keyword that outputs the measurement name.
|
||||||
|
1. _field_ is a special keyword that outputs the field name.
|
||||||
|
|
||||||
|
Which means the following influx metric -> graphite conversion would happen:
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||||
|
=>
|
||||||
|
tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||||
|
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||||
|
```
|
||||||
|
|
||||||
|
### Graphite Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.file]]
|
||||||
|
## Files to write to, "stdout" is a specially handled file.
|
||||||
|
files = ["stdout", "/tmp/metrics.out"]
|
||||||
|
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "graphite"
|
||||||
|
|
||||||
|
# prefix each graphite bucket
|
||||||
|
prefix = "telegraf"
|
||||||
|
# graphite template
|
||||||
|
template = "host.tags.measurement.field"
|
||||||
|
```
|
||||||
|
|
||||||
|
# JSON:
|
||||||
|
|
||||||
|
The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"fields":{
|
||||||
|
"field_1":30,
|
||||||
|
"field_2":4,
|
||||||
|
"field_N":59,
|
||||||
|
"n_images":660
|
||||||
|
},
|
||||||
|
"name":"docker",
|
||||||
|
"tags":{
|
||||||
|
"host":"raynor"
|
||||||
|
},
|
||||||
|
"timestamp":1458229140
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### JSON Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.file]]
|
||||||
|
## Files to write to, "stdout" is a specially handled file.
|
||||||
|
files = ["stdout", "/tmp/metrics.out"]
|
||||||
|
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has it's own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "json"
|
||||||
|
```
|
||||||
@@ -28,6 +28,5 @@
|
|||||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||||
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||||
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
|
|
||||||
|
|
||||||
36
docs/WINDOWS_SERVICE.md
Normal file
36
docs/WINDOWS_SERVICE.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Running Telegraf as a Windows Service
|
||||||
|
|
||||||
|
If you have tried to install Go binaries as Windows Services with the **sc.exe**
|
||||||
|
tool you may have seen that the service errors and stops running after a while.
|
||||||
|
|
||||||
|
**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a
|
||||||
|
[number of scenarios](http://nssm.cc/scenarios) including running Go binaries
|
||||||
|
that were not specifically designed to run only in Windows platforms.
|
||||||
|
|
||||||
|
## NSSM Installation via Chocolatey
|
||||||
|
|
||||||
|
You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/)
|
||||||
|
with these commands
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))
|
||||||
|
choco install -y nssm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installing Telegraf as a Windows Service with NSSM
|
||||||
|
|
||||||
|
You can download the latest Telegraf Windows binaries (still Experimental at
|
||||||
|
the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf).
|
||||||
|
|
||||||
|
Then you can create a C:\telegraf folder, unzip the binary there and modify the
|
||||||
|
**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**.
|
||||||
|
|
||||||
|
Once you have NSSM installed in your system, the process is quite straightforward.
|
||||||
|
You only need to type this command in your Windows shell
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config
|
||||||
|
```
|
||||||
|
|
||||||
|
And now your service will be installed in Windows and you will be able to start and
|
||||||
|
stop it gracefully
|
||||||
1681
etc/telegraf.conf
1681
etc/telegraf.conf
File diff suppressed because it is too large
Load Diff
164
etc/telegraf_windows.conf
Normal file
164
etc/telegraf_windows.conf
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
# Telegraf configuration
|
||||||
|
|
||||||
|
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||||
|
# declared inputs, and sent to the declared outputs.
|
||||||
|
|
||||||
|
# Plugins must be declared in here to be active.
|
||||||
|
# To deactivate a plugin, comment out the name and any variables.
|
||||||
|
|
||||||
|
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||||
|
# file would generate.
|
||||||
|
|
||||||
|
# Global tags can be specified here in key="value" format.
|
||||||
|
[global_tags]
|
||||||
|
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||||
|
# rack = "1a"
|
||||||
|
|
||||||
|
# Configuration for telegraf agent
|
||||||
|
[agent]
|
||||||
|
## Default data collection interval for all inputs
|
||||||
|
interval = "10s"
|
||||||
|
## Rounds collection interval to 'interval'
|
||||||
|
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
|
round_interval = true
|
||||||
|
|
||||||
|
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||||
|
## flush this buffer on a successful write.
|
||||||
|
metric_buffer_limit = 1000
|
||||||
|
## Flush the buffer whenever full, regardless of flush_interval.
|
||||||
|
flush_buffer_when_full = true
|
||||||
|
|
||||||
|
## Collection jitter is used to jitter the collection by a random amount.
|
||||||
|
## Each plugin will sleep for a random time within jitter before collecting.
|
||||||
|
## This can be used to avoid many plugins querying things like sysfs at the
|
||||||
|
## same time, which can have a measurable effect on the system.
|
||||||
|
collection_jitter = "0s"
|
||||||
|
|
||||||
|
## Default flushing interval for all outputs. You shouldn't set this below
|
||||||
|
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||||
|
flush_interval = "10s"
|
||||||
|
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||||
|
## large write spikes for users running a large number of telegraf instances.
|
||||||
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
|
flush_jitter = "0s"
|
||||||
|
|
||||||
|
## Run telegraf in debug mode
|
||||||
|
debug = false
|
||||||
|
## Run telegraf in quiet mode
|
||||||
|
quiet = false
|
||||||
|
## Override default hostname, if empty use os.Hostname()
|
||||||
|
hostname = ""
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# OUTPUTS #
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# Configuration for influxdb server to send metrics to
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||||
|
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||||
|
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||||
|
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||||
|
urls = ["http://localhost:8086"] # required
|
||||||
|
# The target database for metrics (telegraf will create it if not exists)
|
||||||
|
database = "telegraf" # required
|
||||||
|
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
# note: using second precision greatly helps InfluxDB compression
|
||||||
|
precision = "s"
|
||||||
|
|
||||||
|
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||||
|
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||||
|
timeout = "5s"
|
||||||
|
# username = "telegraf"
|
||||||
|
# password = "metricsmetricsmetricsmetrics"
|
||||||
|
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||||
|
# user_agent = "telegraf"
|
||||||
|
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||||
|
# udp_payload = 512
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# INPUTS #
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# Windows Performance Counters plugin.
|
||||||
|
# These are the recommended method of monitoring system metrics on windows,
|
||||||
|
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||||
|
# which utilizes a lot of system resources.
|
||||||
|
#
|
||||||
|
# See more configuration examples at:
|
||||||
|
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||||
|
|
||||||
|
[[inputs.win_perf_counters]]
|
||||||
|
[[inputs.win_perf_counters.object]]
|
||||||
|
# Processor usage, alternative to native, reports on a per core.
|
||||||
|
ObjectName = "Processor"
|
||||||
|
Instances = ["*"]
|
||||||
|
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
||||||
|
Measurement = "win_cpu"
|
||||||
|
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||||
|
|
||||||
|
[[inputs.win_perf_counters.object]]
|
||||||
|
# Disk times and queues
|
||||||
|
ObjectName = "LogicalDisk"
|
||||||
|
Instances = ["*"]
|
||||||
|
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
||||||
|
Measurement = "win_disk"
|
||||||
|
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||||
|
|
||||||
|
[[inputs.win_perf_counters.object]]
|
||||||
|
ObjectName = "System"
|
||||||
|
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||||
|
Instances = ["------"]
|
||||||
|
Measurement = "win_system"
|
||||||
|
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||||
|
|
||||||
|
[[inputs.win_perf_counters.object]]
|
||||||
|
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
||||||
|
ObjectName = "Memory"
|
||||||
|
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
||||||
|
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
||||||
|
Measurement = "win_mem"
|
||||||
|
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||||
|
|
||||||
|
|
||||||
|
# Windows system plugins using WMI (disabled by default, using
|
||||||
|
# win_perf_counters over WMI is recommended)
|
||||||
|
|
||||||
|
# Read metrics about cpu usage
|
||||||
|
#[[inputs.cpu]]
|
||||||
|
## Whether to report per-cpu stats or not
|
||||||
|
#percpu = true
|
||||||
|
## Whether to report total system cpu stats or not
|
||||||
|
#totalcpu = true
|
||||||
|
## Comment this line if you want the raw CPU time metrics
|
||||||
|
#fielddrop = ["time_*"]
|
||||||
|
|
||||||
|
# Read metrics about disk usage by mount point
|
||||||
|
#[[inputs.disk]]
|
||||||
|
## By default, telegraf gather stats for all mountpoints.
|
||||||
|
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||||
|
## mount_points=["/"]
|
||||||
|
|
||||||
|
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||||
|
## present on /run, /var/run, /dev/shm or /dev).
|
||||||
|
#ignore_fs = ["tmpfs", "devtmpfs"]
|
||||||
|
|
||||||
|
# Read metrics about disk IO by device
|
||||||
|
#[[inputs.diskio]]
|
||||||
|
## By default, telegraf will gather stats for all devices including
|
||||||
|
## disk partitions.
|
||||||
|
## Setting devices will restrict the stats to the specified devices.
|
||||||
|
## devices = ["sda", "sdb"]
|
||||||
|
## Uncomment the following line if you do not need disk serial numbers.
|
||||||
|
## skip_serial_number = true
|
||||||
|
|
||||||
|
# Read metrics about memory usage
|
||||||
|
#[[inputs.mem]]
|
||||||
|
# no configuration
|
||||||
|
|
||||||
|
# Read metrics about swap memory usage
|
||||||
|
#[[inputs.swap]]
|
||||||
|
# no configuration
|
||||||
|
|
||||||
31
input.go
Normal file
31
input.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package telegraf
|
||||||
|
|
||||||
|
type Input interface {
|
||||||
|
// SampleConfig returns the default configuration of the Input
|
||||||
|
SampleConfig() string
|
||||||
|
|
||||||
|
// Description returns a one-sentence description on the Input
|
||||||
|
Description() string
|
||||||
|
|
||||||
|
// Gather takes in an accumulator and adds the metrics that the Input
|
||||||
|
// gathers. This is called every "interval"
|
||||||
|
Gather(Accumulator) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServiceInput interface {
|
||||||
|
// SampleConfig returns the default configuration of the Input
|
||||||
|
SampleConfig() string
|
||||||
|
|
||||||
|
// Description returns a one-sentence description on the Input
|
||||||
|
Description() string
|
||||||
|
|
||||||
|
// Gather takes in an accumulator and adds the metrics that the Input
|
||||||
|
// gathers. This is called every "interval"
|
||||||
|
Gather(Accumulator) error
|
||||||
|
|
||||||
|
// Start starts the ServiceInput's service, whatever that may be
|
||||||
|
Start(Accumulator) error
|
||||||
|
|
||||||
|
// Stop stops the services and closes any necessary channels and connections
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
77
internal/buffer/buffer.go
Normal file
77
internal/buffer/buffer.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Buffer is an object for storing metrics in a circular buffer.
|
||||||
|
type Buffer struct {
|
||||||
|
buf chan telegraf.Metric
|
||||||
|
// total dropped metrics
|
||||||
|
drops int
|
||||||
|
// total metrics added
|
||||||
|
total int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuffer returns a Buffer
|
||||||
|
// size is the maximum number of metrics that Buffer will cache. If Add is
|
||||||
|
// called when the buffer is full, then the oldest metric(s) will be dropped.
|
||||||
|
func NewBuffer(size int) *Buffer {
|
||||||
|
return &Buffer{
|
||||||
|
buf: make(chan telegraf.Metric, size),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns true if Buffer is empty.
|
||||||
|
func (b *Buffer) IsEmpty() bool {
|
||||||
|
return len(b.buf) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the current length of the buffer.
|
||||||
|
func (b *Buffer) Len() int {
|
||||||
|
return len(b.buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drops returns the total number of dropped metrics that have occured in this
|
||||||
|
// buffer since instantiation.
|
||||||
|
func (b *Buffer) Drops() int {
|
||||||
|
return b.drops
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total returns the total number of metrics that have been added to this buffer.
|
||||||
|
func (b *Buffer) Total() int {
|
||||||
|
return b.total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds metrics to the buffer.
|
||||||
|
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||||
|
for i, _ := range metrics {
|
||||||
|
b.total++
|
||||||
|
select {
|
||||||
|
case b.buf <- metrics[i]:
|
||||||
|
default:
|
||||||
|
b.drops++
|
||||||
|
<-b.buf
|
||||||
|
b.buf <- metrics[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch returns a batch of metrics of size batchSize.
|
||||||
|
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
||||||
|
// if the length of Buffer is less than batchSize.
|
||||||
|
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
||||||
|
n := min(len(b.buf), batchSize)
|
||||||
|
out := make([]telegraf.Metric, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
out[i] = <-b.buf
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if b < a {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
94
internal/buffer/buffer_test.go
Normal file
94
internal/buffer/buffer_test.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
var metricList = []telegraf.Metric{
|
||||||
|
testutil.TestMetric(2, "mymetric1"),
|
||||||
|
testutil.TestMetric(1, "mymetric2"),
|
||||||
|
testutil.TestMetric(11, "mymetric3"),
|
||||||
|
testutil.TestMetric(15, "mymetric4"),
|
||||||
|
testutil.TestMetric(8, "mymetric5"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAddMetrics(b *testing.B) {
|
||||||
|
buf := NewBuffer(10000)
|
||||||
|
m := testutil.TestMetric(1, "mymetric")
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
buf.Add(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||||
|
b := NewBuffer(10)
|
||||||
|
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
assert.Zero(t, b.Len())
|
||||||
|
assert.Zero(t, b.Drops())
|
||||||
|
assert.Zero(t, b.Total())
|
||||||
|
|
||||||
|
m := testutil.TestMetric(1, "mymetric")
|
||||||
|
b.Add(m)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 1)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 1)
|
||||||
|
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 6)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 6)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDroppingMetrics(t *testing.T) {
|
||||||
|
b := NewBuffer(10)
|
||||||
|
|
||||||
|
// Add up to the size of the buffer
|
||||||
|
b.Add(metricList...)
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 10)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 10)
|
||||||
|
|
||||||
|
// Add 5 more and verify they were dropped
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 10)
|
||||||
|
assert.Equal(t, b.Drops(), 5)
|
||||||
|
assert.Equal(t, b.Total(), 15)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGettingBatches(t *testing.T) {
|
||||||
|
b := NewBuffer(20)
|
||||||
|
|
||||||
|
// Verify that the buffer returned is smaller than requested when there are
|
||||||
|
// not as many items as requested.
|
||||||
|
b.Add(metricList...)
|
||||||
|
batch := b.Batch(10)
|
||||||
|
assert.Len(t, batch, 5)
|
||||||
|
|
||||||
|
// Verify that the buffer is now empty
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
assert.Zero(t, b.Len())
|
||||||
|
assert.Zero(t, b.Drops())
|
||||||
|
assert.Equal(t, b.Total(), 5)
|
||||||
|
|
||||||
|
// Verify that the buffer returned is not more than the size requested
|
||||||
|
b.Add(metricList...)
|
||||||
|
batch = b.Batch(3)
|
||||||
|
assert.Len(t, batch, 3)
|
||||||
|
|
||||||
|
// Verify that buffer is not empty
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 2)
|
||||||
|
assert.Equal(t, b.Drops(), 0)
|
||||||
|
assert.Equal(t, b.Total(), 10)
|
||||||
|
}
|
||||||
49
internal/config/aws/credentials.go
Normal file
49
internal/config/aws/credentials.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CredentialConfig struct {
|
||||||
|
Region string
|
||||||
|
AccessKey string
|
||||||
|
SecretKey string
|
||||||
|
RoleARN string
|
||||||
|
Profile string
|
||||||
|
Filename string
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
||||||
|
if c.RoleARN != "" {
|
||||||
|
return c.assumeCredentials()
|
||||||
|
} else {
|
||||||
|
return c.rootCredentials()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
||||||
|
config := &aws.Config{
|
||||||
|
Region: aws.String(c.Region),
|
||||||
|
}
|
||||||
|
if c.AccessKey != "" || c.SecretKey != "" {
|
||||||
|
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
||||||
|
} else if c.Profile != "" || c.Filename != "" {
|
||||||
|
config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile)
|
||||||
|
}
|
||||||
|
|
||||||
|
return session.New(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
|
||||||
|
rootCredentials := c.rootCredentials()
|
||||||
|
config := &aws.Config{
|
||||||
|
Region: aws.String(c.Region),
|
||||||
|
}
|
||||||
|
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
||||||
|
return session.New(config)
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,48 +1,101 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
"github.com/influxdata/telegraf/internal/models"
|
||||||
"github.com/influxdb/telegraf/plugins/exec"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"github.com/influxdb/telegraf/plugins/memcached"
|
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||||
"github.com/influxdb/telegraf/plugins/procstat"
|
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||||
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig_LoadSinglePlugin(t *testing.T) {
|
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||||
|
c := NewConfig()
|
||||||
|
err := os.Setenv("MY_TEST_SERVER", "192.168.1.1")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.Setenv("TEST_INTERVAL", "10s")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
c.LoadConfig("./testdata/single_plugin_env_vars.toml")
|
||||||
|
|
||||||
|
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||||
|
memcached.Servers = []string{"192.168.1.1"}
|
||||||
|
|
||||||
|
filter := internal_models.Filter{
|
||||||
|
NameDrop: []string{"metricname2"},
|
||||||
|
NamePass: []string{"metricname1"},
|
||||||
|
FieldDrop: []string{"other", "stuff"},
|
||||||
|
FieldPass: []string{"some", "strings"},
|
||||||
|
TagDrop: []internal_models.TagFilter{
|
||||||
|
internal_models.TagFilter{
|
||||||
|
Name: "badtag",
|
||||||
|
Filter: []string{"othertag"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TagPass: []internal_models.TagFilter{
|
||||||
|
internal_models.TagFilter{
|
||||||
|
Name: "goodtag",
|
||||||
|
Filter: []string{"mytag"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
mConfig := &internal_models.InputConfig{
|
||||||
|
Name: "memcached",
|
||||||
|
Filter: filter,
|
||||||
|
Interval: 10 * time.Second,
|
||||||
|
}
|
||||||
|
mConfig.Tags = make(map[string]string)
|
||||||
|
|
||||||
|
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||||
|
"Testdata did not produce a correct memcached struct.")
|
||||||
|
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||||
|
"Testdata did not produce correct memcached metadata.")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfig_LoadSingleInput(t *testing.T) {
|
||||||
c := NewConfig()
|
c := NewConfig()
|
||||||
c.LoadConfig("./testdata/single_plugin.toml")
|
c.LoadConfig("./testdata/single_plugin.toml")
|
||||||
|
|
||||||
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||||
memcached.Servers = []string{"localhost"}
|
memcached.Servers = []string{"localhost"}
|
||||||
|
|
||||||
mConfig := &PluginConfig{
|
filter := internal_models.Filter{
|
||||||
Name: "memcached",
|
NameDrop: []string{"metricname2"},
|
||||||
Filter: Filter{
|
NamePass: []string{"metricname1"},
|
||||||
Drop: []string{"other", "stuff"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
Pass: []string{"some", "strings"},
|
FieldPass: []string{"some", "strings"},
|
||||||
TagDrop: []TagFilter{
|
TagDrop: []internal_models.TagFilter{
|
||||||
TagFilter{
|
internal_models.TagFilter{
|
||||||
Name: "badtag",
|
Name: "badtag",
|
||||||
Filter: []string{"othertag"},
|
Filter: []string{"othertag"},
|
||||||
},
|
|
||||||
},
|
},
|
||||||
TagPass: []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "goodtag",
|
|
||||||
Filter: []string{"mytag"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
IsActive: true,
|
|
||||||
},
|
},
|
||||||
|
TagPass: []internal_models.TagFilter{
|
||||||
|
internal_models.TagFilter{
|
||||||
|
Name: "goodtag",
|
||||||
|
Filter: []string{"mytag"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
mConfig := &internal_models.InputConfig{
|
||||||
|
Name: "memcached",
|
||||||
|
Filter: filter,
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
}
|
}
|
||||||
|
mConfig.Tags = make(map[string]string)
|
||||||
|
|
||||||
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||||
"Testdata did not produce a correct memcached struct.")
|
"Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||||
"Testdata did not produce correct memcached metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,240 +110,70 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||||
memcached.Servers = []string{"localhost"}
|
memcached.Servers = []string{"localhost"}
|
||||||
|
|
||||||
mConfig := &PluginConfig{
|
filter := internal_models.Filter{
|
||||||
Name: "memcached",
|
NameDrop: []string{"metricname2"},
|
||||||
Filter: Filter{
|
NamePass: []string{"metricname1"},
|
||||||
Drop: []string{"other", "stuff"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
Pass: []string{"some", "strings"},
|
FieldPass: []string{"some", "strings"},
|
||||||
TagDrop: []TagFilter{
|
TagDrop: []internal_models.TagFilter{
|
||||||
TagFilter{
|
internal_models.TagFilter{
|
||||||
Name: "badtag",
|
Name: "badtag",
|
||||||
Filter: []string{"othertag"},
|
Filter: []string{"othertag"},
|
||||||
},
|
|
||||||
},
|
},
|
||||||
TagPass: []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "goodtag",
|
|
||||||
Filter: []string{"mytag"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
IsActive: true,
|
|
||||||
},
|
},
|
||||||
|
TagPass: []internal_models.TagFilter{
|
||||||
|
internal_models.TagFilter{
|
||||||
|
Name: "goodtag",
|
||||||
|
Filter: []string{"mytag"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
assert.NoError(t, filter.CompileFilter())
|
||||||
|
mConfig := &internal_models.InputConfig{
|
||||||
|
Name: "memcached",
|
||||||
|
Filter: filter,
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
}
|
}
|
||||||
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
mConfig.Tags = make(map[string]string)
|
||||||
|
|
||||||
|
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||||
"Testdata did not produce a correct memcached struct.")
|
"Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||||
"Testdata did not produce correct memcached metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
|
|
||||||
ex := plugins.Plugins["exec"]().(*exec.Exec)
|
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||||
ex.Commands = []*exec.Command{
|
p, err := parsers.NewJSONParser("exec", nil, nil)
|
||||||
&exec.Command{
|
assert.NoError(t, err)
|
||||||
Command: "/usr/bin/myothercollector --foo=bar",
|
ex.SetParser(p)
|
||||||
Name: "myothercollector",
|
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||||
},
|
eConfig := &internal_models.InputConfig{
|
||||||
|
Name: "exec",
|
||||||
|
MeasurementSuffix: "_myothercollector",
|
||||||
}
|
}
|
||||||
eConfig := &PluginConfig{Name: "exec"}
|
eConfig.Tags = make(map[string]string)
|
||||||
assert.Equal(t, ex, c.Plugins[1].Plugin,
|
assert.Equal(t, ex, c.Inputs[1].Input,
|
||||||
"Merged Testdata did not produce a correct exec struct.")
|
"Merged Testdata did not produce a correct exec struct.")
|
||||||
assert.Equal(t, eConfig, c.Plugins[1].Config,
|
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
||||||
"Merged Testdata did not produce correct exec metadata.")
|
"Merged Testdata did not produce correct exec metadata.")
|
||||||
|
|
||||||
memcached.Servers = []string{"192.168.1.1"}
|
memcached.Servers = []string{"192.168.1.1"}
|
||||||
assert.Equal(t, memcached, c.Plugins[2].Plugin,
|
assert.Equal(t, memcached, c.Inputs[2].Input,
|
||||||
"Testdata did not produce a correct memcached struct.")
|
"Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, mConfig, c.Plugins[2].Config,
|
assert.Equal(t, mConfig, c.Inputs[2].Config,
|
||||||
"Testdata did not produce correct memcached metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
|
|
||||||
pstat := plugins.Plugins["procstat"]().(*procstat.Procstat)
|
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||||
pstat.Specifications = []*procstat.Specification{
|
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||||
&procstat.Specification{
|
|
||||||
PidFile: "/var/run/grafana-server.pid",
|
|
||||||
},
|
|
||||||
&procstat.Specification{
|
|
||||||
PidFile: "/var/run/influxdb/influxd.pid",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig := &PluginConfig{Name: "procstat"}
|
pConfig := &internal_models.InputConfig{Name: "procstat"}
|
||||||
|
pConfig.Tags = make(map[string]string)
|
||||||
|
|
||||||
assert.Equal(t, pstat, c.Plugins[3].Plugin,
|
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||||
"Merged Testdata did not produce a correct procstat struct.")
|
"Merged Testdata did not produce a correct procstat struct.")
|
||||||
assert.Equal(t, pConfig, c.Plugins[3].Config,
|
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||||
"Merged Testdata did not produce correct procstat metadata.")
|
"Merged Testdata did not produce correct procstat metadata.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_Empty(t *testing.T) {
|
|
||||||
f := Filter{}
|
|
||||||
|
|
||||||
measurements := []string{
|
|
||||||
"foo",
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"supercalifradjulisticexpialidocious",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range measurements {
|
|
||||||
if !f.ShouldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_Pass(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
Pass: []string{"foo*", "cpu_usage_idle"},
|
|
||||||
}
|
|
||||||
|
|
||||||
passes := []string{
|
|
||||||
"foo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"cpu_usage_idle",
|
|
||||||
}
|
|
||||||
|
|
||||||
drops := []string{
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"bar_foo",
|
|
||||||
"cpu_usage_busy",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range passes {
|
|
||||||
if !f.ShouldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range drops {
|
|
||||||
if f.ShouldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to drop", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_Drop(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
Drop: []string{"foo*", "cpu_usage_idle"},
|
|
||||||
}
|
|
||||||
|
|
||||||
drops := []string{
|
|
||||||
"foo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"cpu_usage_idle",
|
|
||||||
}
|
|
||||||
|
|
||||||
passes := []string{
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"bar_foo",
|
|
||||||
"cpu_usage_busy",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range passes {
|
|
||||||
if !f.ShouldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range drops {
|
|
||||||
if f.ShouldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to drop", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_TagPass(t *testing.T) {
|
|
||||||
filters := []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"cpu-*"},
|
|
||||||
},
|
|
||||||
TagFilter{
|
|
||||||
Name: "mem",
|
|
||||||
Filter: []string{"mem_free"},
|
|
||||||
}}
|
|
||||||
f := Filter{
|
|
||||||
TagPass: filters,
|
|
||||||
}
|
|
||||||
|
|
||||||
passes := []map[string]string{
|
|
||||||
{"cpu": "cpu-total"},
|
|
||||||
{"cpu": "cpu-0"},
|
|
||||||
{"cpu": "cpu-1"},
|
|
||||||
{"cpu": "cpu-2"},
|
|
||||||
{"mem": "mem_free"},
|
|
||||||
}
|
|
||||||
|
|
||||||
drops := []map[string]string{
|
|
||||||
{"cpu": "cputotal"},
|
|
||||||
{"cpu": "cpu0"},
|
|
||||||
{"cpu": "cpu1"},
|
|
||||||
{"cpu": "cpu2"},
|
|
||||||
{"mem": "mem_used"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range passes {
|
|
||||||
if !f.ShouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to pass", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range drops {
|
|
||||||
if f.ShouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to drop", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_TagDrop(t *testing.T) {
|
|
||||||
filters := []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"cpu-*"},
|
|
||||||
},
|
|
||||||
TagFilter{
|
|
||||||
Name: "mem",
|
|
||||||
Filter: []string{"mem_free"},
|
|
||||||
}}
|
|
||||||
f := Filter{
|
|
||||||
TagDrop: filters,
|
|
||||||
}
|
|
||||||
|
|
||||||
drops := []map[string]string{
|
|
||||||
{"cpu": "cpu-total"},
|
|
||||||
{"cpu": "cpu-0"},
|
|
||||||
{"cpu": "cpu-1"},
|
|
||||||
{"cpu": "cpu-2"},
|
|
||||||
{"mem": "mem_free"},
|
|
||||||
}
|
|
||||||
|
|
||||||
passes := []map[string]string{
|
|
||||||
{"cpu": "cputotal"},
|
|
||||||
{"cpu": "cpu0"},
|
|
||||||
{"cpu": "cpu1"},
|
|
||||||
{"cpu": "cpu2"},
|
|
||||||
{"mem": "mem_used"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range passes {
|
|
||||||
if !f.ShouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to pass", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range drops {
|
|
||||||
if f.ShouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to drop", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
12
internal/config/testdata/single_plugin.toml
vendored
12
internal/config/testdata/single_plugin.toml
vendored
@@ -1,9 +1,11 @@
|
|||||||
[[plugins.memcached]]
|
[[inputs.memcached]]
|
||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
pass = ["some", "strings"]
|
namepass = ["metricname1"]
|
||||||
drop = ["other", "stuff"]
|
namedrop = ["metricname2"]
|
||||||
|
fieldpass = ["some", "strings"]
|
||||||
|
fielddrop = ["other", "stuff"]
|
||||||
interval = "5s"
|
interval = "5s"
|
||||||
[plugins.memcached.tagpass]
|
[inputs.memcached.tagpass]
|
||||||
goodtag = ["mytag"]
|
goodtag = ["mytag"]
|
||||||
[plugins.memcached.tagdrop]
|
[inputs.memcached.tagdrop]
|
||||||
badtag = ["othertag"]
|
badtag = ["othertag"]
|
||||||
|
|||||||
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[[inputs.memcached]]
|
||||||
|
servers = ["$MY_TEST_SERVER"]
|
||||||
|
namepass = ["metricname1"]
|
||||||
|
namedrop = ["metricname2"]
|
||||||
|
fieldpass = ["some", "strings"]
|
||||||
|
fielddrop = ["other", "stuff"]
|
||||||
|
interval = "$TEST_INTERVAL"
|
||||||
|
[inputs.memcached.tagpass]
|
||||||
|
goodtag = ["mytag"]
|
||||||
|
[inputs.memcached.tagdrop]
|
||||||
|
badtag = ["othertag"]
|
||||||
8
internal/config/testdata/subconfig/exec.conf
vendored
8
internal/config/testdata/subconfig/exec.conf
vendored
@@ -1,8 +1,4 @@
|
|||||||
[[plugins.exec]]
|
[[inputs.exec]]
|
||||||
# specify commands via an array of tables
|
|
||||||
[[plugins.exec.commands]]
|
|
||||||
# the command to run
|
# the command to run
|
||||||
command = "/usr/bin/myothercollector --foo=bar"
|
command = "/usr/bin/myothercollector --foo=bar"
|
||||||
|
name_suffix = "_myothercollector"
|
||||||
# name of the command (used as a prefix for measurements)
|
|
||||||
name = "myothercollector"
|
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
[[plugins.memcached]]
|
[[inputs.memcached]]
|
||||||
servers = ["192.168.1.1"]
|
servers = ["192.168.1.1"]
|
||||||
|
namepass = ["metricname1"]
|
||||||
|
namedrop = ["metricname2"]
|
||||||
pass = ["some", "strings"]
|
pass = ["some", "strings"]
|
||||||
drop = ["other", "stuff"]
|
drop = ["other", "stuff"]
|
||||||
interval = "5s"
|
interval = "5s"
|
||||||
[plugins.memcached.tagpass]
|
[inputs.memcached.tagpass]
|
||||||
goodtag = ["mytag"]
|
goodtag = ["mytag"]
|
||||||
[plugins.memcached.tagdrop]
|
[inputs.memcached.tagdrop]
|
||||||
badtag = ["othertag"]
|
badtag = ["othertag"]
|
||||||
|
|||||||
@@ -1,5 +1,2 @@
|
|||||||
[[plugins.procstat]]
|
[[inputs.procstat]]
|
||||||
[[plugins.procstat.specifications]]
|
|
||||||
pid_file = "/var/run/grafana-server.pid"
|
pid_file = "/var/run/grafana-server.pid"
|
||||||
[[plugins.procstat.specifications]]
|
|
||||||
pid_file = "/var/run/influxdb/influxd.pid"
|
|
||||||
|
|||||||
132
internal/config/testdata/telegraf-agent.toml
vendored
132
internal/config/testdata/telegraf-agent.toml
vendored
@@ -1,7 +1,7 @@
|
|||||||
# Telegraf configuration
|
# Telegraf configuration
|
||||||
|
|
||||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||||
# declared plugins.
|
# declared inputs.
|
||||||
|
|
||||||
# Even if a plugin has no configuration, it must be declared in here
|
# Even if a plugin has no configuration, it must be declared in here
|
||||||
# to be active. Declaring a plugin means just specifying the name
|
# to be active. Declaring a plugin means just specifying the name
|
||||||
@@ -20,21 +20,14 @@
|
|||||||
# with 'required'. Be sure to edit those to make this configuration work.
|
# with 'required'. Be sure to edit those to make this configuration work.
|
||||||
|
|
||||||
# Tags can also be specified via a normal map, but only one form at a time:
|
# Tags can also be specified via a normal map, but only one form at a time:
|
||||||
[tags]
|
[global_tags]
|
||||||
# dc = "us-east-1"
|
dc = "us-east-1"
|
||||||
|
|
||||||
# Configuration for telegraf agent
|
# Configuration for telegraf agent
|
||||||
[agent]
|
[agent]
|
||||||
# Default data collection interval for all plugins
|
# Default data collection interval for all plugins
|
||||||
interval = "10s"
|
interval = "10s"
|
||||||
|
|
||||||
# If utc = false, uses local time (utc is highly recommended)
|
|
||||||
utc = true
|
|
||||||
|
|
||||||
# Precision of writes, valid values are n, u, ms, s, m, and h
|
|
||||||
# note: using second precision greatly helps InfluxDB compression
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
# run telegraf in debug mode
|
# run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
|
|
||||||
@@ -46,8 +39,6 @@
|
|||||||
# OUTPUTS #
|
# OUTPUTS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
[outputs]
|
|
||||||
|
|
||||||
# Configuration for influxdb server to send metrics to
|
# Configuration for influxdb server to send metrics to
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
# The full HTTP endpoint URL for your InfluxDB instance
|
# The full HTTP endpoint URL for your InfluxDB instance
|
||||||
@@ -58,17 +49,6 @@
|
|||||||
# The target database for metrics. This database must already exist
|
# The target database for metrics. This database must already exist
|
||||||
database = "telegraf" # required.
|
database = "telegraf" # required.
|
||||||
|
|
||||||
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
|
||||||
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
|
||||||
# If not provided, will default to 0 (no timeout)
|
|
||||||
# timeout = "5s"
|
|
||||||
|
|
||||||
# username = "telegraf"
|
|
||||||
# password = "metricsmetricsmetricsmetrics"
|
|
||||||
|
|
||||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
|
||||||
# user_agent = "telegraf"
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
urls = ["udp://localhost:8089"]
|
urls = ["udp://localhost:8089"]
|
||||||
database = "udp-telegraf"
|
database = "udp-telegraf"
|
||||||
@@ -88,15 +68,13 @@
|
|||||||
# PLUGINS #
|
# PLUGINS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
[plugins]
|
|
||||||
|
|
||||||
# Read Apache status information (mod_status)
|
# Read Apache status information (mod_status)
|
||||||
[[plugins.apache]]
|
[[inputs.apache]]
|
||||||
# An array of Apache status URI to gather stats.
|
# An array of Apache status URI to gather stats.
|
||||||
urls = ["http://localhost/server-status?auto"]
|
urls = ["http://localhost/server-status?auto"]
|
||||||
|
|
||||||
# Read metrics about cpu usage
|
# Read metrics about cpu usage
|
||||||
[[plugins.cpu]]
|
[[inputs.cpu]]
|
||||||
# Whether to report per-cpu stats or not
|
# Whether to report per-cpu stats or not
|
||||||
percpu = true
|
percpu = true
|
||||||
# Whether to report total system cpu stats or not
|
# Whether to report total system cpu stats or not
|
||||||
@@ -105,11 +83,11 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
drop = ["cpu_time"]
|
drop = ["cpu_time"]
|
||||||
|
|
||||||
# Read metrics about disk usage by mount point
|
# Read metrics about disk usage by mount point
|
||||||
[[plugins.disk]]
|
[[inputs.diskio]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# Read metrics from one or many disque servers
|
# Read metrics from one or many disque servers
|
||||||
[[plugins.disque]]
|
[[inputs.disque]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||||
# 10.0.0.1:10000, etc.
|
# 10.0.0.1:10000, etc.
|
||||||
@@ -118,7 +96,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
# Read stats from one or more Elasticsearch servers or clusters
|
# Read stats from one or more Elasticsearch servers or clusters
|
||||||
[[plugins.elasticsearch]]
|
[[inputs.elasticsearch]]
|
||||||
# specify a list of one or more Elasticsearch servers
|
# specify a list of one or more Elasticsearch servers
|
||||||
servers = ["http://localhost:9200"]
|
servers = ["http://localhost:9200"]
|
||||||
|
|
||||||
@@ -127,17 +105,13 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
local = true
|
local = true
|
||||||
|
|
||||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||||
[[plugins.exec]]
|
[[inputs.exec]]
|
||||||
# specify commands via an array of tables
|
|
||||||
[[exec.commands]]
|
|
||||||
# the command to run
|
# the command to run
|
||||||
command = "/usr/bin/mycollector --foo=bar"
|
command = "/usr/bin/mycollector --foo=bar"
|
||||||
|
name_suffix = "_mycollector"
|
||||||
# name of the command (used as a prefix for measurements)
|
|
||||||
name = "mycollector"
|
|
||||||
|
|
||||||
# Read metrics of haproxy, via socket or csv stats page
|
# Read metrics of haproxy, via socket or csv stats page
|
||||||
[[plugins.haproxy]]
|
[[inputs.haproxy]]
|
||||||
# An array of address to gather stats about. Specify an ip on hostname
|
# An array of address to gather stats about. Specify an ip on hostname
|
||||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||||
#
|
#
|
||||||
@@ -147,33 +121,30 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||||
|
|
||||||
# Read flattened metrics from one or more JSON HTTP endpoints
|
# Read flattened metrics from one or more JSON HTTP endpoints
|
||||||
[[plugins.httpjson]]
|
[[inputs.httpjson]]
|
||||||
# Specify services via an array of tables
|
# a name for the service being polled
|
||||||
[[httpjson.services]]
|
name = "webserver_stats"
|
||||||
|
|
||||||
# a name for the service being polled
|
# URL of each server in the service's cluster
|
||||||
name = "webserver_stats"
|
servers = [
|
||||||
|
"http://localhost:9999/stats/",
|
||||||
|
"http://localhost:9998/stats/",
|
||||||
|
]
|
||||||
|
|
||||||
# URL of each server in the service's cluster
|
# HTTP method to use (case-sensitive)
|
||||||
servers = [
|
method = "GET"
|
||||||
"http://localhost:9999/stats/",
|
|
||||||
"http://localhost:9998/stats/",
|
|
||||||
]
|
|
||||||
|
|
||||||
# HTTP method to use (case-sensitive)
|
# HTTP parameters (all values must be strings)
|
||||||
method = "GET"
|
[httpjson.parameters]
|
||||||
|
event_type = "cpu_spike"
|
||||||
# HTTP parameters (all values must be strings)
|
threshold = "0.75"
|
||||||
[httpjson.services.parameters]
|
|
||||||
event_type = "cpu_spike"
|
|
||||||
threshold = "0.75"
|
|
||||||
|
|
||||||
# Read metrics about disk IO by device
|
# Read metrics about disk IO by device
|
||||||
[[plugins.io]]
|
[[inputs.diskio]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# read metrics from a Kafka topic
|
# read metrics from a Kafka topic
|
||||||
[[plugins.kafka_consumer]]
|
[[inputs.kafka_consumer]]
|
||||||
# topic(s) to consume
|
# topic(s) to consume
|
||||||
topics = ["telegraf"]
|
topics = ["telegraf"]
|
||||||
# an array of Zookeeper connection strings
|
# an array of Zookeeper connection strings
|
||||||
@@ -186,7 +157,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
offset = "oldest"
|
offset = "oldest"
|
||||||
|
|
||||||
# Read metrics from a LeoFS Server via SNMP
|
# Read metrics from a LeoFS Server via SNMP
|
||||||
[[plugins.leofs]]
|
[[inputs.leofs]]
|
||||||
# An array of URI to gather stats about LeoFS.
|
# An array of URI to gather stats about LeoFS.
|
||||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||||
#
|
#
|
||||||
@@ -194,7 +165,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
servers = ["127.0.0.1:4021"]
|
servers = ["127.0.0.1:4021"]
|
||||||
|
|
||||||
# Read metrics from local Lustre service on OST, MDS
|
# Read metrics from local Lustre service on OST, MDS
|
||||||
[[plugins.lustre2]]
|
[[inputs.lustre2]]
|
||||||
# An array of /proc globs to search for Lustre stats
|
# An array of /proc globs to search for Lustre stats
|
||||||
# If not specified, the default will work on Lustre 2.5.x
|
# If not specified, the default will work on Lustre 2.5.x
|
||||||
#
|
#
|
||||||
@@ -202,19 +173,28 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||||
|
|
||||||
# Read metrics about memory usage
|
# Read metrics about memory usage
|
||||||
[[plugins.mem]]
|
[[inputs.mem]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# Read metrics from one or many memcached servers
|
# Read metrics from one or many memcached servers
|
||||||
[[plugins.memcached]]
|
[[inputs.memcached]]
|
||||||
# An array of address to gather stats about. Specify an ip on hostname
|
# An array of address to gather stats about. Specify an ip on hostname
|
||||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||||
#
|
#
|
||||||
# If no servers are specified, then localhost is used as the host.
|
# If no servers are specified, then localhost is used as the host.
|
||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
|
# Telegraf plugin for gathering metrics from N Mesos masters
|
||||||
|
[[inputs.mesos]]
|
||||||
|
# Timeout, in ms.
|
||||||
|
timeout = 100
|
||||||
|
# A list of Mesos masters, default value is localhost:5050.
|
||||||
|
masters = ["localhost:5050"]
|
||||||
|
# Metrics groups to be collected, by default, all enabled.
|
||||||
|
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"]
|
||||||
|
|
||||||
# Read metrics from one or many MongoDB servers
|
# Read metrics from one or many MongoDB servers
|
||||||
[[plugins.mongodb]]
|
[[inputs.mongodb]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||||
@@ -223,7 +203,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
servers = ["127.0.0.1:27017"]
|
servers = ["127.0.0.1:27017"]
|
||||||
|
|
||||||
# Read metrics from one or many mysql servers
|
# Read metrics from one or many mysql servers
|
||||||
[[plugins.mysql]]
|
[[inputs.mysql]]
|
||||||
# specify servers via a url matching:
|
# specify servers via a url matching:
|
||||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||||
# e.g.
|
# e.g.
|
||||||
@@ -234,7 +214,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
# Read metrics about network interface usage
|
# Read metrics about network interface usage
|
||||||
[[plugins.net]]
|
[[inputs.net]]
|
||||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||||
# regardless of status.
|
# regardless of status.
|
||||||
@@ -242,12 +222,12 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
# interfaces = ["eth0", ... ]
|
# interfaces = ["eth0", ... ]
|
||||||
|
|
||||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||||
[[plugins.nginx]]
|
[[inputs.nginx]]
|
||||||
# An array of Nginx stub_status URI to gather stats.
|
# An array of Nginx stub_status URI to gather stats.
|
||||||
urls = ["http://localhost/status"]
|
urls = ["http://localhost/status"]
|
||||||
|
|
||||||
# Ping given url(s) and return statistics
|
# Ping given url(s) and return statistics
|
||||||
[[plugins.ping]]
|
[[inputs.ping]]
|
||||||
# urls to ping
|
# urls to ping
|
||||||
urls = ["www.google.com"] # required
|
urls = ["www.google.com"] # required
|
||||||
# number of pings to send (ping -c <COUNT>)
|
# number of pings to send (ping -c <COUNT>)
|
||||||
@@ -260,10 +240,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
interface = ""
|
interface = ""
|
||||||
|
|
||||||
# Read metrics from one or many postgresql servers
|
# Read metrics from one or many postgresql servers
|
||||||
[[plugins.postgresql]]
|
[[inputs.postgresql]]
|
||||||
# specify servers via an array of tables
|
|
||||||
[[postgresql.servers]]
|
|
||||||
|
|
||||||
# specify address via a url matching:
|
# specify address via a url matching:
|
||||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||||
# or a simple string:
|
# or a simple string:
|
||||||
@@ -290,14 +267,13 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
# address = "influx@remoteserver"
|
# address = "influx@remoteserver"
|
||||||
|
|
||||||
# Read metrics from one or many prometheus clients
|
# Read metrics from one or many prometheus clients
|
||||||
[[plugins.prometheus]]
|
[[inputs.prometheus]]
|
||||||
# An array of urls to scrape metrics from.
|
# An array of urls to scrape metrics from.
|
||||||
urls = ["http://localhost:9100/metrics"]
|
urls = ["http://localhost:9100/metrics"]
|
||||||
|
|
||||||
# Read metrics from one or many RabbitMQ servers via the management API
|
# Read metrics from one or many RabbitMQ servers via the management API
|
||||||
[[plugins.rabbitmq]]
|
[[inputs.rabbitmq]]
|
||||||
# Specify servers via an array of tables
|
# Specify servers via an array of tables
|
||||||
[[rabbitmq.servers]]
|
|
||||||
# name = "rmq-server-1" # optional tag
|
# name = "rmq-server-1" # optional tag
|
||||||
# url = "http://localhost:15672"
|
# url = "http://localhost:15672"
|
||||||
# username = "guest"
|
# username = "guest"
|
||||||
@@ -308,7 +284,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||||
|
|
||||||
# Read metrics from one or many redis servers
|
# Read metrics from one or many redis servers
|
||||||
[[plugins.redis]]
|
[[inputs.redis]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
||||||
# 10.0.0.1:10000, etc.
|
# 10.0.0.1:10000, etc.
|
||||||
@@ -317,7 +293,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
# Read metrics from one or many RethinkDB servers
|
# Read metrics from one or many RethinkDB servers
|
||||||
[[plugins.rethinkdb]]
|
[[inputs.rethinkdb]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
||||||
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||||
@@ -326,9 +302,9 @@ urls = ["http://localhost/server-status?auto"]
|
|||||||
servers = ["127.0.0.1:28015"]
|
servers = ["127.0.0.1:28015"]
|
||||||
|
|
||||||
# Read metrics about swap memory usage
|
# Read metrics about swap memory usage
|
||||||
[[plugins.swap]]
|
[[inputs.swap]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# Read metrics about system load & uptime
|
# Read metrics about system load & uptime
|
||||||
[[plugins.system]]
|
[[inputs.system]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|||||||
37
internal/errchan/errchan.go
Normal file
37
internal/errchan/errchan.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package errchan
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ErrChan struct {
|
||||||
|
C chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an error channel of max length 'n'
|
||||||
|
// errors can be sent to the ErrChan.C channel, and will be returned when
|
||||||
|
// ErrChan.Error() is called.
|
||||||
|
func New(n int) *ErrChan {
|
||||||
|
return &ErrChan{
|
||||||
|
C: make(chan error, n),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error closes the ErrChan.C channel and returns an error if there are any
|
||||||
|
// non-nil errors, otherwise returns nil.
|
||||||
|
func (e *ErrChan) Error() error {
|
||||||
|
close(e.C)
|
||||||
|
|
||||||
|
var out string
|
||||||
|
for err := range e.C {
|
||||||
|
if err != nil {
|
||||||
|
out += "[" + err.Error() + "], "
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if out != "" {
|
||||||
|
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
98
internal/globpath/globpath.go
Normal file
98
internal/globpath/globpath.go
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
package globpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||||
|
|
||||||
|
type GlobPath struct {
|
||||||
|
path string
|
||||||
|
hasMeta bool
|
||||||
|
g glob.Glob
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func Compile(path string) (*GlobPath, error) {
|
||||||
|
out := GlobPath{
|
||||||
|
hasMeta: hasMeta(path),
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there are no glob meta characters in the path, don't bother compiling
|
||||||
|
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||||
|
if !out.hasMeta {
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Get the root directory for this filepath
|
||||||
|
out.root = findRootDir(path)
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||||
|
if !g.hasMeta {
|
||||||
|
out := make(map[string]os.FileInfo)
|
||||||
|
info, err := os.Stat(g.path)
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
out[g.path] = info
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return walkFilePath(g.root, g.g)
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk the filepath from the given root and return a list of files that match
|
||||||
|
// the given glob.
|
||||||
|
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
|
||||||
|
matchedFiles := make(map[string]os.FileInfo)
|
||||||
|
walkfn := func(path string, info os.FileInfo, _ error) error {
|
||||||
|
if g.Match(path) {
|
||||||
|
matchedFiles[path] = info
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
filepath.Walk(root, walkfn)
|
||||||
|
return matchedFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the root dir of the given path (could include globs).
|
||||||
|
// ie:
|
||||||
|
// /var/log/telegraf.conf -> /var/log
|
||||||
|
// /home/** -> /home
|
||||||
|
// /home/*/** -> /home
|
||||||
|
// /lib/share/*/*/**.txt -> /lib/share
|
||||||
|
func findRootDir(path string) string {
|
||||||
|
pathItems := strings.Split(path, sepStr)
|
||||||
|
out := sepStr
|
||||||
|
for i, item := range pathItems {
|
||||||
|
if i == len(pathItems)-1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if item == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hasMeta(item) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
out += item + sepStr
|
||||||
|
}
|
||||||
|
if out != "/" {
|
||||||
|
out = strings.TrimSuffix(out, "/")
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasMeta reports whether path contains any magic glob characters.
|
||||||
|
func hasMeta(path string) bool {
|
||||||
|
return strings.IndexAny(path, "*?[") >= 0
|
||||||
|
}
|
||||||
62
internal/globpath/globpath_test.go
Normal file
62
internal/globpath/globpath_test.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package globpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCompileAndMatch(t *testing.T) {
|
||||||
|
dir := getTestdataDir()
|
||||||
|
// test super asterisk
|
||||||
|
g1, err := Compile(dir + "/**")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test single asterisk
|
||||||
|
g2, err := Compile(dir + "/*.log")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test no meta characters (file exists)
|
||||||
|
g3, err := Compile(dir + "/log1.log")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test file that doesn't exist
|
||||||
|
g4, err := Compile(dir + "/i_dont_exist.log")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// test super asterisk that doesn't exist
|
||||||
|
g5, err := Compile(dir + "/dir_doesnt_exist/**")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
matches := g1.Match()
|
||||||
|
assert.Len(t, matches, 3)
|
||||||
|
matches = g2.Match()
|
||||||
|
assert.Len(t, matches, 2)
|
||||||
|
matches = g3.Match()
|
||||||
|
assert.Len(t, matches, 1)
|
||||||
|
matches = g4.Match()
|
||||||
|
assert.Len(t, matches, 0)
|
||||||
|
matches = g5.Match()
|
||||||
|
assert.Len(t, matches, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindRootDir(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
output string
|
||||||
|
}{
|
||||||
|
{"/var/log/telegraf.conf", "/var/log"},
|
||||||
|
{"/home/**", "/home"},
|
||||||
|
{"/home/*/**", "/home"},
|
||||||
|
{"/lib/share/*/*/**.txt", "/lib/share"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := findRootDir(test.input)
|
||||||
|
assert.Equal(t, test.output, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTestdataDir() string {
|
||||||
|
_, filename, _, _ := runtime.Caller(1)
|
||||||
|
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||||
|
}
|
||||||
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# this is a fake testing config file
|
||||||
|
# for testing the filestat plugin
|
||||||
|
|
||||||
|
option1 = "foo"
|
||||||
|
option2 = "bar"
|
||||||
@@ -2,10 +2,31 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
)
|
||||||
|
|
||||||
|
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||||
|
|
||||||
|
var (
|
||||||
|
TimeoutErr = errors.New("Command timed out.")
|
||||||
|
|
||||||
|
NotImplementedError = errors.New("not implemented yet")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Duration just wraps time.Duration
|
// Duration just wraps time.Duration
|
||||||
@@ -15,18 +36,29 @@ type Duration struct {
|
|||||||
|
|
||||||
// UnmarshalTOML parses the duration from the TOML config file
|
// UnmarshalTOML parses the duration from the TOML config file
|
||||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
var err error
|
||||||
if err != nil {
|
// Parse string duration, ie, "1s"
|
||||||
return err
|
d.Duration, err = time.ParseDuration(string(b[1 : len(b)-1]))
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Duration = dur
|
// First try parsing as integer seconds
|
||||||
|
sI, err := strconv.ParseInt(string(b), 10, 64)
|
||||||
|
if err == nil {
|
||||||
|
d.Duration = time.Second * time.Duration(sI)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Second try parsing as float seconds
|
||||||
|
sF, err := strconv.ParseFloat(string(b), 64)
|
||||||
|
if err == nil {
|
||||||
|
d.Duration = time.Second * time.Duration(sF)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var NotImplementedError = errors.New("not implemented yet")
|
|
||||||
|
|
||||||
// ReadLines reads contents from a file and splits them by new lines.
|
// ReadLines reads contents from a file and splits them by new lines.
|
||||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||||
func ReadLines(filename string) ([]string, error) {
|
func ReadLines(filename string) ([]string, error) {
|
||||||
@@ -62,58 +94,162 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Glob will test a string pattern, potentially containing globs, against a
|
// RandomString returns a random string of alpha-numeric characters
|
||||||
// subject string. The result is a simple true/false, determining whether or
|
func RandomString(n int) string {
|
||||||
// not the glob pattern matched the subject text.
|
var bytes = make([]byte, n)
|
||||||
//
|
rand.Read(bytes)
|
||||||
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
|
for i, b := range bytes {
|
||||||
// thanks Ryan Uber!
|
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||||
func Glob(pattern, measurement string) bool {
|
}
|
||||||
// Empty pattern can only match empty subject
|
return string(bytes)
|
||||||
if pattern == "" {
|
}
|
||||||
return measurement == pattern
|
|
||||||
|
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||||
|
// you must give the full path to the files.
|
||||||
|
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||||
|
func GetTLSConfig(
|
||||||
|
SSLCert, SSLKey, SSLCA string,
|
||||||
|
InsecureSkipVerify bool,
|
||||||
|
) (*tls.Config, error) {
|
||||||
|
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the pattern _is_ a glob, it matches everything
|
t := &tls.Config{
|
||||||
if pattern == "*" {
|
InsecureSkipVerify: InsecureSkipVerify,
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := strings.Split(pattern, "*")
|
if SSLCA != "" {
|
||||||
|
caCert, err := ioutil.ReadFile(SSLCA)
|
||||||
if len(parts) == 1 {
|
if err != nil {
|
||||||
// No globs in pattern, so test for match
|
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||||
return pattern == measurement
|
err))
|
||||||
}
|
|
||||||
|
|
||||||
leadingGlob := strings.HasPrefix(pattern, "*")
|
|
||||||
trailingGlob := strings.HasSuffix(pattern, "*")
|
|
||||||
end := len(parts) - 1
|
|
||||||
|
|
||||||
for i, part := range parts {
|
|
||||||
switch i {
|
|
||||||
case 0:
|
|
||||||
if leadingGlob {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(measurement, part) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case end:
|
|
||||||
if len(measurement) > 0 {
|
|
||||||
return trailingGlob || strings.HasSuffix(measurement, part)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if !strings.Contains(measurement, part) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trim evaluated text from measurement as we loop over the pattern.
|
caCertPool := x509.NewCertPool()
|
||||||
idx := strings.Index(measurement, part) + len(part)
|
caCertPool.AppendCertsFromPEM(caCert)
|
||||||
measurement = measurement[idx:]
|
t.RootCAs = caCertPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// All parts of the pattern matched
|
if SSLCert != "" && SSLKey != "" {
|
||||||
return true
|
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New(fmt.Sprintf(
|
||||||
|
"Could not load TLS client key/certificate: %s",
|
||||||
|
err))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Certificates = []tls.Certificate{cert}
|
||||||
|
t.BuildNameToCertificate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// will be nil by default if nothing is provided
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnakeCase converts the given string to snake case following the Golang format:
|
||||||
|
// acronyms are converted to lower-case and preceded by an underscore.
|
||||||
|
func SnakeCase(in string) string {
|
||||||
|
runes := []rune(in)
|
||||||
|
length := len(runes)
|
||||||
|
|
||||||
|
var out []rune
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {
|
||||||
|
out = append(out, '_')
|
||||||
|
}
|
||||||
|
out = append(out, unicode.ToLower(runes[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CombinedOutputTimeout runs the given command with the given timeout and
|
||||||
|
// returns the combined output of stdout and stderr.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
c.Stdout = &b
|
||||||
|
c.Stderr = &b
|
||||||
|
if err := c.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err := WaitTimeout(c, timeout)
|
||||||
|
return b.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunTimeout runs the given command with the given timeout.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||||
|
if err := c.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return WaitTimeout(c, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitTimeout waits for the given command to finish with a timeout.
|
||||||
|
// It assumes the command has already been started.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||||
|
timer := time.NewTimer(timeout)
|
||||||
|
done := make(chan error)
|
||||||
|
go func() { done <- c.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
timer.Stop()
|
||||||
|
return err
|
||||||
|
case <-timer.C:
|
||||||
|
if err := c.Process.Kill(); err != nil {
|
||||||
|
log.Printf("FATAL error killing process: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// wait for the command to return after killing it
|
||||||
|
<-done
|
||||||
|
return TimeoutErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompileFilter takes a list of glob "filters", ie:
|
||||||
|
// ["MAIN.*", "CPU.*", "NET"]
|
||||||
|
// and compiles them into a glob object. This glob object can
|
||||||
|
// then be used to match keys to the filter.
|
||||||
|
func CompileFilter(filters []string) (glob.Glob, error) {
|
||||||
|
var out glob.Glob
|
||||||
|
|
||||||
|
// return if there is nothing to compile
|
||||||
|
if len(filters) == 0 {
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if len(filters) == 1 {
|
||||||
|
out, err = glob.Compile(filters[0])
|
||||||
|
} else {
|
||||||
|
out, err = glob.Compile("{" + strings.Join(filters, ",") + "}")
|
||||||
|
}
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RandomSleep will sleep for a random amount of time up to max.
|
||||||
|
// If the shutdown channel is closed, it will return before it has finished
|
||||||
|
// sleeping.
|
||||||
|
func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||||
|
if max == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
maxSleep := big.NewInt(max.Nanoseconds())
|
||||||
|
|
||||||
|
var sleepns int64
|
||||||
|
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
|
||||||
|
sleepns = j.Int64()
|
||||||
|
}
|
||||||
|
|
||||||
|
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
return
|
||||||
|
case <-shutdown:
|
||||||
|
t.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,44 +1,164 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
func testGlobMatch(t *testing.T, pattern, subj string) {
|
"github.com/stretchr/testify/assert"
|
||||||
if !Glob(pattern, subj) {
|
)
|
||||||
t.Errorf("%s should match %s", pattern, subj)
|
|
||||||
|
type SnakeTest struct {
|
||||||
|
input string
|
||||||
|
output string
|
||||||
|
}
|
||||||
|
|
||||||
|
var tests = []SnakeTest{
|
||||||
|
{"a", "a"},
|
||||||
|
{"snake", "snake"},
|
||||||
|
{"A", "a"},
|
||||||
|
{"ID", "id"},
|
||||||
|
{"MOTD", "motd"},
|
||||||
|
{"Snake", "snake"},
|
||||||
|
{"SnakeTest", "snake_test"},
|
||||||
|
{"APIResponse", "api_response"},
|
||||||
|
{"SnakeID", "snake_id"},
|
||||||
|
{"SnakeIDGoogle", "snake_id_google"},
|
||||||
|
{"LinuxMOTD", "linux_motd"},
|
||||||
|
{"OMGWTFBBQ", "omgwtfbbq"},
|
||||||
|
{"omg_wtf_bbq", "omg_wtf_bbq"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSnakeCase(t *testing.T) {
|
||||||
|
for _, test := range tests {
|
||||||
|
if SnakeCase(test.input) != test.output {
|
||||||
|
t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testGlobNoMatch(t *testing.T, pattern, subj string) {
|
var (
|
||||||
if Glob(pattern, subj) {
|
sleepbin, _ = exec.LookPath("sleep")
|
||||||
t.Errorf("%s should not match %s", pattern, subj)
|
echobin, _ = exec.LookPath("echo")
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRunTimeout(t *testing.T) {
|
||||||
|
if sleepbin == "" {
|
||||||
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
}
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "10")
|
||||||
|
start := time.Now()
|
||||||
|
err := RunTimeout(cmd, time.Millisecond*20)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
|
assert.Equal(t, TimeoutErr, err)
|
||||||
|
// Verify that command gets killed in 20ms, with some breathing room
|
||||||
|
assert.True(t, elapsed < time.Millisecond*75)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyPattern(t *testing.T) {
|
func TestCombinedOutputTimeout(t *testing.T) {
|
||||||
testGlobMatch(t, "", "")
|
if sleepbin == "" {
|
||||||
testGlobNoMatch(t, "", "test")
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternWithoutGlobs(t *testing.T) {
|
|
||||||
testGlobMatch(t, "test", "test")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGlob(t *testing.T) {
|
|
||||||
for _, pattern := range []string{
|
|
||||||
"*test", // Leading glob
|
|
||||||
"this*", // Trailing glob
|
|
||||||
"*is*a*", // Lots of globs
|
|
||||||
"**test**", // Double glob characters
|
|
||||||
"**is**a***test*", // Varying number of globs
|
|
||||||
} {
|
|
||||||
testGlobMatch(t, pattern, "this_is_a_test")
|
|
||||||
}
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "10")
|
||||||
|
start := time.Now()
|
||||||
|
_, err := CombinedOutputTimeout(cmd, time.Millisecond*20)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
for _, pattern := range []string{
|
assert.Equal(t, TimeoutErr, err)
|
||||||
"test*", // Implicit substring match should fail
|
// Verify that command gets killed in 20ms, with some breathing room
|
||||||
"*is", // Partial match should fail
|
assert.True(t, elapsed < time.Millisecond*75)
|
||||||
"*no*", // Globs without a match between them should fail
|
}
|
||||||
} {
|
|
||||||
testGlobNoMatch(t, pattern, "this_is_a_test")
|
func TestCombinedOutput(t *testing.T) {
|
||||||
}
|
if echobin == "" {
|
||||||
|
t.Skip("'echo' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(echobin, "foo")
|
||||||
|
out, err := CombinedOutputTimeout(cmd, time.Second)
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "foo\n", string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
|
||||||
|
// the same output from a failed command.
|
||||||
|
func TestCombinedOutputError(t *testing.T) {
|
||||||
|
if sleepbin == "" {
|
||||||
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "foo")
|
||||||
|
expected, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
cmd2 := exec.Command(sleepbin, "foo")
|
||||||
|
actual, err := CombinedOutputTimeout(cmd2, time.Second)
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunError(t *testing.T) {
|
||||||
|
if sleepbin == "" {
|
||||||
|
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||||
|
}
|
||||||
|
cmd := exec.Command(sleepbin, "foo")
|
||||||
|
err := RunTimeout(cmd, time.Second)
|
||||||
|
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileFilter(t *testing.T) {
|
||||||
|
f, err := CompileFilter([]string{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, f)
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.True(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
assert.True(t, f.Match("network"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomSleep(t *testing.T) {
|
||||||
|
// test that zero max returns immediately
|
||||||
|
s := time.Now()
|
||||||
|
RandomSleep(time.Duration(0), make(chan struct{}))
|
||||||
|
elapsed := time.Since(s)
|
||||||
|
assert.True(t, elapsed < time.Millisecond)
|
||||||
|
|
||||||
|
// test that max sleep is respected
|
||||||
|
s = time.Now()
|
||||||
|
RandomSleep(time.Millisecond*50, make(chan struct{}))
|
||||||
|
elapsed = time.Since(s)
|
||||||
|
assert.True(t, elapsed < time.Millisecond*50)
|
||||||
|
|
||||||
|
// test that shutdown is respected
|
||||||
|
s = time.Now()
|
||||||
|
shutdown := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
close(shutdown)
|
||||||
|
}()
|
||||||
|
RandomSleep(time.Second, shutdown)
|
||||||
|
elapsed = time.Since(s)
|
||||||
|
assert.True(t, elapsed < time.Millisecond*150)
|
||||||
}
|
}
|
||||||
|
|||||||
59
internal/limiter/limiter.go
Normal file
59
internal/limiter/limiter.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package limiter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRateLimiter returns a rate limiter that will will emit from the C
|
||||||
|
// channel only 'n' times every 'rate' seconds.
|
||||||
|
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
|
||||||
|
r := &rateLimiter{
|
||||||
|
C: make(chan bool),
|
||||||
|
rate: rate,
|
||||||
|
n: n,
|
||||||
|
shutdown: make(chan bool),
|
||||||
|
}
|
||||||
|
r.wg.Add(1)
|
||||||
|
go r.limiter()
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
type rateLimiter struct {
|
||||||
|
C chan bool
|
||||||
|
rate time.Duration
|
||||||
|
n int
|
||||||
|
|
||||||
|
shutdown chan bool
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rateLimiter) Stop() {
|
||||||
|
close(r.shutdown)
|
||||||
|
r.wg.Wait()
|
||||||
|
close(r.C)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rateLimiter) limiter() {
|
||||||
|
defer r.wg.Done()
|
||||||
|
ticker := time.NewTicker(r.rate)
|
||||||
|
defer ticker.Stop()
|
||||||
|
counter := 0
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.shutdown:
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
counter = 0
|
||||||
|
default:
|
||||||
|
if counter < r.n {
|
||||||
|
select {
|
||||||
|
case r.C <- true:
|
||||||
|
counter++
|
||||||
|
case <-r.shutdown:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
54
internal/limiter/limiter_test.go
Normal file
54
internal/limiter/limiter_test.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package limiter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRateLimiter(t *testing.T) {
|
||||||
|
r := NewRateLimiter(5, time.Second)
|
||||||
|
ticker := time.NewTicker(time.Millisecond * 75)
|
||||||
|
|
||||||
|
// test that we can only get 5 receives from the rate limiter
|
||||||
|
counter := 0
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.C:
|
||||||
|
counter++
|
||||||
|
case <-ticker.C:
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, 5, counter)
|
||||||
|
r.Stop()
|
||||||
|
// verify that the Stop function closes the channel.
|
||||||
|
_, ok := <-r.C
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||||
|
r := NewRateLimiter(5, time.Millisecond*50)
|
||||||
|
ticker := time.NewTicker(time.Millisecond * 250)
|
||||||
|
|
||||||
|
// test that we can get 15 receives from the rate limiter
|
||||||
|
counter := 0
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
break outer
|
||||||
|
case <-r.C:
|
||||||
|
counter++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, counter > 10)
|
||||||
|
r.Stop()
|
||||||
|
// verify that the Stop function closes the channel.
|
||||||
|
_, ok := <-r.C
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
||||||
182
internal/models/filter.go
Normal file
182
internal/models/filter.go
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
package internal_models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TagFilter is the name of a tag, and the values on which to filter
|
||||||
|
type TagFilter struct {
|
||||||
|
Name string
|
||||||
|
Filter []string
|
||||||
|
filter glob.Glob
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||||
|
type Filter struct {
|
||||||
|
NameDrop []string
|
||||||
|
nameDrop glob.Glob
|
||||||
|
NamePass []string
|
||||||
|
namePass glob.Glob
|
||||||
|
|
||||||
|
FieldDrop []string
|
||||||
|
fieldDrop glob.Glob
|
||||||
|
FieldPass []string
|
||||||
|
fieldPass glob.Glob
|
||||||
|
|
||||||
|
TagDrop []TagFilter
|
||||||
|
TagPass []TagFilter
|
||||||
|
|
||||||
|
TagExclude []string
|
||||||
|
tagExclude glob.Glob
|
||||||
|
TagInclude []string
|
||||||
|
tagInclude glob.Glob
|
||||||
|
|
||||||
|
IsActive bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile all Filter lists into glob.Glob objects.
|
||||||
|
func (f *Filter) CompileFilter() error {
|
||||||
|
var err error
|
||||||
|
f.nameDrop, err = internal.CompileFilter(f.NameDrop)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||||
|
}
|
||||||
|
f.namePass, err = internal.CompileFilter(f.NamePass)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fieldDrop, err = internal.CompileFilter(f.FieldDrop)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||||
|
}
|
||||||
|
f.fieldPass, err = internal.CompileFilter(f.FieldPass)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.tagExclude, err = internal.CompileFilter(f.TagExclude)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||||
|
}
|
||||||
|
f.tagInclude, err = internal.CompileFilter(f.TagInclude)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, _ := range f.TagDrop {
|
||||||
|
f.TagDrop[i].filter, err = internal.CompileFilter(f.TagDrop[i].Filter)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, _ := range f.TagPass {
|
||||||
|
f.TagPass[i].filter, err = internal.CompileFilter(f.TagPass[i].Filter)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||||
|
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||||
|
// based on the drop/pass filter parameters
|
||||||
|
func (f *Filter) ShouldNamePass(key string) bool {
|
||||||
|
if f.namePass != nil {
|
||||||
|
if f.namePass.Match(key) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.nameDrop != nil {
|
||||||
|
if f.nameDrop.Match(key) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||||
|
// based on the drop/pass filter parameters
|
||||||
|
func (f *Filter) ShouldFieldsPass(key string) bool {
|
||||||
|
if f.fieldPass != nil {
|
||||||
|
if f.fieldPass.Match(key) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.fieldDrop != nil {
|
||||||
|
if f.fieldDrop.Match(key) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||||
|
// based on the tagdrop/tagpass filter parameters
|
||||||
|
func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||||
|
if f.TagPass != nil {
|
||||||
|
for _, pat := range f.TagPass {
|
||||||
|
if pat.filter == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tagval, ok := tags[pat.Name]; ok {
|
||||||
|
if pat.filter.Match(tagval) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.TagDrop != nil {
|
||||||
|
for _, pat := range f.TagDrop {
|
||||||
|
if pat.filter == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tagval, ok := tags[pat.Name]; ok {
|
||||||
|
if pat.filter.Match(tagval) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply TagInclude and TagExclude filters.
|
||||||
|
// modifies the tags map in-place.
|
||||||
|
func (f *Filter) FilterTags(tags map[string]string) {
|
||||||
|
if f.tagInclude != nil {
|
||||||
|
for k, _ := range tags {
|
||||||
|
if !f.tagInclude.Match(k) {
|
||||||
|
delete(tags, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.tagExclude != nil {
|
||||||
|
for k, _ := range tags {
|
||||||
|
if f.tagExclude.Match(k) {
|
||||||
|
delete(tags, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
366
internal/models/filter_test.go
Normal file
366
internal/models/filter_test.go
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
package internal_models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFilter_Empty(t *testing.T) {
|
||||||
|
f := Filter{}
|
||||||
|
|
||||||
|
measurements := []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"supercalifradjulisticexpialidocious",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range measurements {
|
||||||
|
if !f.ShouldFieldsPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_NamePass(t *testing.T) {
|
||||||
|
f := Filter{
|
||||||
|
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
passes := []string{
|
||||||
|
"foo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"cpu_usage_idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
drops := []string{
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"bar_foo",
|
||||||
|
"cpu_usage_busy",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range passes {
|
||||||
|
if !f.ShouldNamePass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range drops {
|
||||||
|
if f.ShouldNamePass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to drop", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_NameDrop(t *testing.T) {
|
||||||
|
f := Filter{
|
||||||
|
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
drops := []string{
|
||||||
|
"foo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"cpu_usage_idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
passes := []string{
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"bar_foo",
|
||||||
|
"cpu_usage_busy",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range passes {
|
||||||
|
if !f.ShouldNamePass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range drops {
|
||||||
|
if f.ShouldNamePass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to drop", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_FieldPass(t *testing.T) {
|
||||||
|
f := Filter{
|
||||||
|
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
passes := []string{
|
||||||
|
"foo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"cpu_usage_idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
drops := []string{
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"bar_foo",
|
||||||
|
"cpu_usage_busy",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range passes {
|
||||||
|
if !f.ShouldFieldsPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range drops {
|
||||||
|
if f.ShouldFieldsPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to drop", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_FieldDrop(t *testing.T) {
|
||||||
|
f := Filter{
|
||||||
|
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
drops := []string{
|
||||||
|
"foo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"cpu_usage_idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
passes := []string{
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"bar_foo",
|
||||||
|
"cpu_usage_busy",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range passes {
|
||||||
|
if !f.ShouldFieldsPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range drops {
|
||||||
|
if f.ShouldFieldsPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to drop", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_TagPass(t *testing.T) {
|
||||||
|
filters := []TagFilter{
|
||||||
|
TagFilter{
|
||||||
|
Name: "cpu",
|
||||||
|
Filter: []string{"cpu-*"},
|
||||||
|
},
|
||||||
|
TagFilter{
|
||||||
|
Name: "mem",
|
||||||
|
Filter: []string{"mem_free"},
|
||||||
|
}}
|
||||||
|
f := Filter{
|
||||||
|
TagPass: filters,
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
passes := []map[string]string{
|
||||||
|
{"cpu": "cpu-total"},
|
||||||
|
{"cpu": "cpu-0"},
|
||||||
|
{"cpu": "cpu-1"},
|
||||||
|
{"cpu": "cpu-2"},
|
||||||
|
{"mem": "mem_free"},
|
||||||
|
}
|
||||||
|
|
||||||
|
drops := []map[string]string{
|
||||||
|
{"cpu": "cputotal"},
|
||||||
|
{"cpu": "cpu0"},
|
||||||
|
{"cpu": "cpu1"},
|
||||||
|
{"cpu": "cpu2"},
|
||||||
|
{"mem": "mem_used"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range passes {
|
||||||
|
if !f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to pass", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range drops {
|
||||||
|
if f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to drop", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_TagDrop(t *testing.T) {
|
||||||
|
filters := []TagFilter{
|
||||||
|
TagFilter{
|
||||||
|
Name: "cpu",
|
||||||
|
Filter: []string{"cpu-*"},
|
||||||
|
},
|
||||||
|
TagFilter{
|
||||||
|
Name: "mem",
|
||||||
|
Filter: []string{"mem_free"},
|
||||||
|
}}
|
||||||
|
f := Filter{
|
||||||
|
TagDrop: filters,
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
drops := []map[string]string{
|
||||||
|
{"cpu": "cpu-total"},
|
||||||
|
{"cpu": "cpu-0"},
|
||||||
|
{"cpu": "cpu-1"},
|
||||||
|
{"cpu": "cpu-2"},
|
||||||
|
{"mem": "mem_free"},
|
||||||
|
}
|
||||||
|
|
||||||
|
passes := []map[string]string{
|
||||||
|
{"cpu": "cputotal"},
|
||||||
|
{"cpu": "cpu0"},
|
||||||
|
{"cpu": "cpu1"},
|
||||||
|
{"cpu": "cpu2"},
|
||||||
|
{"mem": "mem_used"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range passes {
|
||||||
|
if !f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to pass", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range drops {
|
||||||
|
if f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to drop", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_CompileFilterError(t *testing.T) {
|
||||||
|
f := Filter{
|
||||||
|
NameDrop: []string{"", ""},
|
||||||
|
}
|
||||||
|
assert.Error(t, f.CompileFilter())
|
||||||
|
f = Filter{
|
||||||
|
NamePass: []string{"", ""},
|
||||||
|
}
|
||||||
|
assert.Error(t, f.CompileFilter())
|
||||||
|
f = Filter{
|
||||||
|
FieldDrop: []string{"", ""},
|
||||||
|
}
|
||||||
|
assert.Error(t, f.CompileFilter())
|
||||||
|
f = Filter{
|
||||||
|
FieldPass: []string{"", ""},
|
||||||
|
}
|
||||||
|
assert.Error(t, f.CompileFilter())
|
||||||
|
f = Filter{
|
||||||
|
TagExclude: []string{"", ""},
|
||||||
|
}
|
||||||
|
assert.Error(t, f.CompileFilter())
|
||||||
|
f = Filter{
|
||||||
|
TagInclude: []string{"", ""},
|
||||||
|
}
|
||||||
|
assert.Error(t, f.CompileFilter())
|
||||||
|
filters := []TagFilter{
|
||||||
|
TagFilter{
|
||||||
|
Name: "cpu",
|
||||||
|
Filter: []string{"{foobar}"},
|
||||||
|
}}
|
||||||
|
f = Filter{
|
||||||
|
TagDrop: filters,
|
||||||
|
}
|
||||||
|
require.Error(t, f.CompileFilter())
|
||||||
|
filters = []TagFilter{
|
||||||
|
TagFilter{
|
||||||
|
Name: "cpu",
|
||||||
|
Filter: []string{"{foobar}"},
|
||||||
|
}}
|
||||||
|
f = Filter{
|
||||||
|
TagPass: filters,
|
||||||
|
}
|
||||||
|
require.Error(t, f.CompileFilter())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
||||||
|
m := testutil.TestMetric(1, "testmetric")
|
||||||
|
f := Filter{
|
||||||
|
NameDrop: []string{"foobar"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
require.True(t, f.ShouldMetricPass(m))
|
||||||
|
|
||||||
|
m = testutil.TestMetric(1, "foobar")
|
||||||
|
require.False(t, f.ShouldMetricPass(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||||
|
pretags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}
|
||||||
|
f := Filter{
|
||||||
|
TagExclude: []string{"nomatch"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}, pretags)
|
||||||
|
|
||||||
|
f = Filter{
|
||||||
|
TagInclude: []string{"nomatch"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{}, pretags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||||
|
pretags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}
|
||||||
|
f := Filter{
|
||||||
|
TagExclude: []string{"ho*"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"mytag": "foobar",
|
||||||
|
}, pretags)
|
||||||
|
|
||||||
|
pretags = map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"mytag": "foobar",
|
||||||
|
}
|
||||||
|
f = Filter{
|
||||||
|
TagInclude: []string{"my*"},
|
||||||
|
}
|
||||||
|
require.NoError(t, f.CompileFilter())
|
||||||
|
|
||||||
|
f.FilterTags(pretags)
|
||||||
|
assert.Equal(t, map[string]string{
|
||||||
|
"mytag": "foobar",
|
||||||
|
}, pretags)
|
||||||
|
}
|
||||||
24
internal/models/running_input.go
Normal file
24
internal/models/running_input.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package internal_models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunningInput struct {
|
||||||
|
Name string
|
||||||
|
Input telegraf.Input
|
||||||
|
Config *InputConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// InputConfig containing a name, interval, and filter
|
||||||
|
type InputConfig struct {
|
||||||
|
Name string
|
||||||
|
NameOverride string
|
||||||
|
MeasurementPrefix string
|
||||||
|
MeasurementSuffix string
|
||||||
|
Tags map[string]string
|
||||||
|
Filter Filter
|
||||||
|
Interval time.Duration
|
||||||
|
}
|
||||||
160
internal/models/running_output.go
Normal file
160
internal/models/running_output.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
package internal_models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/buffer"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Default size of metrics batch size.
|
||||||
|
DEFAULT_METRIC_BATCH_SIZE = 1000
|
||||||
|
|
||||||
|
// Default number of metrics kept. It should be a multiple of batch size.
|
||||||
|
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunningOutput contains the output configuration
|
||||||
|
type RunningOutput struct {
|
||||||
|
Name string
|
||||||
|
Output telegraf.Output
|
||||||
|
Config *OutputConfig
|
||||||
|
Quiet bool
|
||||||
|
MetricBufferLimit int
|
||||||
|
MetricBatchSize int
|
||||||
|
|
||||||
|
metrics *buffer.Buffer
|
||||||
|
failMetrics *buffer.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRunningOutput(
|
||||||
|
name string,
|
||||||
|
output telegraf.Output,
|
||||||
|
conf *OutputConfig,
|
||||||
|
batchSize int,
|
||||||
|
bufferLimit int,
|
||||||
|
) *RunningOutput {
|
||||||
|
if bufferLimit == 0 {
|
||||||
|
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||||
|
}
|
||||||
|
if batchSize == 0 {
|
||||||
|
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||||
|
}
|
||||||
|
ro := &RunningOutput{
|
||||||
|
Name: name,
|
||||||
|
metrics: buffer.NewBuffer(batchSize),
|
||||||
|
failMetrics: buffer.NewBuffer(bufferLimit),
|
||||||
|
Output: output,
|
||||||
|
Config: conf,
|
||||||
|
MetricBufferLimit: bufferLimit,
|
||||||
|
MetricBatchSize: batchSize,
|
||||||
|
}
|
||||||
|
return ro
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMetric adds a metric to the output. This function can also write cached
|
||||||
|
// points if FlushBufferWhenFull is true.
|
||||||
|
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||||
|
if ro.Config.Filter.IsActive {
|
||||||
|
if !ro.Config.Filter.ShouldMetricPass(metric) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter any tagexclude/taginclude parameters before adding metric
|
||||||
|
if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
|
||||||
|
// In order to filter out tags, we need to create a new metric, since
|
||||||
|
// metrics are immutable once created.
|
||||||
|
tags := metric.Tags()
|
||||||
|
fields := metric.Fields()
|
||||||
|
t := metric.Time()
|
||||||
|
name := metric.Name()
|
||||||
|
ro.Config.Filter.FilterTags(tags)
|
||||||
|
// error is not possible if creating from another metric, so ignore.
|
||||||
|
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
ro.metrics.Add(metric)
|
||||||
|
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||||
|
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||||
|
err := ro.write(batch)
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes all cached points to this output.
|
||||||
|
func (ro *RunningOutput) Write() error {
|
||||||
|
if !ro.Quiet {
|
||||||
|
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+
|
||||||
|
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||||
|
ro.Name,
|
||||||
|
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||||
|
ro.MetricBufferLimit,
|
||||||
|
ro.metrics.Total(),
|
||||||
|
ro.metrics.Drops()+ro.failMetrics.Drops())
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if !ro.failMetrics.IsEmpty() {
|
||||||
|
bufLen := ro.failMetrics.Len()
|
||||||
|
// how many batches of failed writes we need to write.
|
||||||
|
nBatches := bufLen/ro.MetricBatchSize + 1
|
||||||
|
batchSize := ro.MetricBatchSize
|
||||||
|
|
||||||
|
for i := 0; i < nBatches; i++ {
|
||||||
|
// If it's the last batch, only grab the metrics that have not had
|
||||||
|
// a write attempt already (this is primarily to preserve order).
|
||||||
|
if i == nBatches-1 {
|
||||||
|
batchSize = bufLen % ro.MetricBatchSize
|
||||||
|
}
|
||||||
|
batch := ro.failMetrics.Batch(batchSize)
|
||||||
|
// If we've already failed previous writes, don't bother trying to
|
||||||
|
// write to this output again. We are not exiting the loop just so
|
||||||
|
// that we can rotate the metrics to preserve order.
|
||||||
|
if err == nil {
|
||||||
|
err = ro.write(batch)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||||
|
// see comment above about not trying to write to an already failed output.
|
||||||
|
// if ro.failMetrics is empty then err will always be nil at this point.
|
||||||
|
if err == nil {
|
||||||
|
err = ro.write(batch)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||||
|
if len(metrics) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
err := ro.Output.Write(metrics)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
if err == nil {
|
||||||
|
if !ro.Quiet {
|
||||||
|
log.Printf("Output [%s] wrote batch of %d metrics in %s\n",
|
||||||
|
ro.Name, len(metrics), elapsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutputConfig containing name and filter
|
||||||
|
type OutputConfig struct {
|
||||||
|
Name string
|
||||||
|
Filter Filter
|
||||||
|
}
|
||||||
568
internal/models/running_output_test.go
Normal file
568
internal/models/running_output_test.go
Normal file
@@ -0,0 +1,568 @@
|
|||||||
|
package internal_models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var first5 = []telegraf.Metric{
|
||||||
|
testutil.TestMetric(101, "metric1"),
|
||||||
|
testutil.TestMetric(101, "metric2"),
|
||||||
|
testutil.TestMetric(101, "metric3"),
|
||||||
|
testutil.TestMetric(101, "metric4"),
|
||||||
|
testutil.TestMetric(101, "metric5"),
|
||||||
|
}
|
||||||
|
|
||||||
|
var next5 = []telegraf.Metric{
|
||||||
|
testutil.TestMetric(101, "metric6"),
|
||||||
|
testutil.TestMetric(101, "metric7"),
|
||||||
|
testutil.TestMetric(101, "metric8"),
|
||||||
|
testutil.TestMetric(101, "metric9"),
|
||||||
|
testutil.TestMetric(101, "metric10"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark adding metrics.
|
||||||
|
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &perfOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
ro.Quiet = true
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
ro.Write()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark adding metrics.
|
||||||
|
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &perfOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
ro.Quiet = true
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
if n%100 == 0 {
|
||||||
|
ro.Write()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark adding metrics.
|
||||||
|
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &perfOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
ro.Quiet = true
|
||||||
|
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that NameDrop filters ger properly applied.
|
||||||
|
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
NameDrop: []string{"metric1", "metric2"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that NameDrop filters without a match do nothing.
|
||||||
|
func TestRunningOutput_PassFilter(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
NameDrop: []string{"metric1000", "foo*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that tags are properly included
|
||||||
|
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
TagInclude: []string{"nothing*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Empty(t, m.Metrics()[0].Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that tags are properly excluded
|
||||||
|
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
TagExclude: []string{"tag*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Len(t, m.Metrics()[0].Tags(), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that tags are properly Excluded
|
||||||
|
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
TagExclude: []string{"nothing*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that tags are properly included
|
||||||
|
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: true,
|
||||||
|
TagInclude: []string{"tag*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.NoError(t, conf.Filter.CompileFilter())
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(first5[0])
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 1)
|
||||||
|
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that we can write metrics with simple default setup.
|
||||||
|
func TestRunningOutputDefault(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that running output doesn't flush until it's full when
|
||||||
|
// FlushBufferWhenFull is set.
|
||||||
|
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 6, 10)
|
||||||
|
|
||||||
|
// Fill buffer to 1 under limit
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// no flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add one more metric
|
||||||
|
ro.AddMetric(next5[0])
|
||||||
|
// now it flushed
|
||||||
|
assert.Len(t, m.Metrics(), 6)
|
||||||
|
|
||||||
|
// add one more metric and write it manually
|
||||||
|
ro.AddMetric(next5[1])
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 7)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that running output doesn't flush until it's full when
|
||||||
|
// FlushBufferWhenFull is set, twice.
|
||||||
|
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||||
|
|
||||||
|
// Fill buffer past limit twive
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// flushed twice
|
||||||
|
assert.Len(t, m.Metrics(), 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunningOutputWriteFail(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||||
|
|
||||||
|
// Fill buffer to limit twice
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// manual write fails
|
||||||
|
err := ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
m.failWrite = false
|
||||||
|
err = ro.Write()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Len(t, m.Metrics(), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the order of points is preserved during a write failure.
|
||||||
|
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// Write fails
|
||||||
|
err := ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
m.failWrite = false
|
||||||
|
// add 5 more metrics
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
err = ro.Write()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that 10 metrics were written
|
||||||
|
assert.Len(t, m.Metrics(), 10)
|
||||||
|
// Verify that they are in order
|
||||||
|
expected := append(first5, next5...)
|
||||||
|
assert.Equal(t, expected, m.Metrics())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the order of points is preserved during many write failures.
|
||||||
|
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 5, 100)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err := ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// Write fails
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
m.failWrite = false
|
||||||
|
err = ro.Write()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that 10 metrics were written
|
||||||
|
assert.Len(t, m.Metrics(), 20)
|
||||||
|
// Verify that they are in order
|
||||||
|
expected := append(first5, next5...)
|
||||||
|
expected = append(expected, first5...)
|
||||||
|
expected = append(expected, next5...)
|
||||||
|
assert.Equal(t, expected, m.Metrics())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the order of points is preserved when there is a remainder
|
||||||
|
// of points for the batch.
|
||||||
|
//
|
||||||
|
// ie, with a batch size of 5:
|
||||||
|
//
|
||||||
|
// 1 2 3 4 5 6 <-- order, failed points
|
||||||
|
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
|
||||||
|
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
|
||||||
|
//
|
||||||
|
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{
|
||||||
|
IsActive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
m.failWrite = true
|
||||||
|
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
||||||
|
|
||||||
|
// add 5 metrics
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// Write fails
|
||||||
|
err := ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
// no successful flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add and attempt to write a single metric:
|
||||||
|
ro.AddMetric(next5[0])
|
||||||
|
err = ro.Write()
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
// unset fail and write metrics
|
||||||
|
m.failWrite = false
|
||||||
|
err = ro.Write()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that 6 metrics were written
|
||||||
|
assert.Len(t, m.Metrics(), 6)
|
||||||
|
// Verify that they are in order
|
||||||
|
expected := append(first5, next5[0])
|
||||||
|
assert.Equal(t, expected, m.Metrics())
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockOutput struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
metrics []telegraf.Metric
|
||||||
|
|
||||||
|
// if true, mock a write failure
|
||||||
|
failWrite bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockOutput) Connect() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockOutput) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockOutput) Description() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockOutput) SampleConfig() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockOutput) Write(metrics []telegraf.Metric) error {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
if m.failWrite {
|
||||||
|
return fmt.Errorf("Failed Write!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.metrics == nil {
|
||||||
|
m.metrics = []telegraf.Metric{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range metrics {
|
||||||
|
m.metrics = append(m.metrics, metric)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockOutput) Metrics() []telegraf.Metric {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
return m.metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
type perfOutput struct {
|
||||||
|
// if true, mock a write failure
|
||||||
|
failWrite bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Connect() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Description() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) SampleConfig() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
||||||
|
if m.failWrite {
|
||||||
|
return fmt.Errorf("Failed Write!")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
94
metric.go
Normal file
94
metric.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package telegraf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Metric interface {
|
||||||
|
// Name returns the measurement name of the metric
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Name returns the tags associated with the metric
|
||||||
|
Tags() map[string]string
|
||||||
|
|
||||||
|
// Time return the timestamp for the metric
|
||||||
|
Time() time.Time
|
||||||
|
|
||||||
|
// UnixNano returns the unix nano time of the metric
|
||||||
|
UnixNano() int64
|
||||||
|
|
||||||
|
// Fields returns the fields for the metric
|
||||||
|
Fields() map[string]interface{}
|
||||||
|
|
||||||
|
// String returns a line-protocol string of the metric
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// PrecisionString returns a line-protocol string of the metric, at precision
|
||||||
|
PrecisionString(precison string) string
|
||||||
|
|
||||||
|
// Point returns a influxdb client.Point object
|
||||||
|
Point() *client.Point
|
||||||
|
}
|
||||||
|
|
||||||
|
// metric is a wrapper of the influxdb client.Point struct
|
||||||
|
type metric struct {
|
||||||
|
pt *client.Point
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetric returns a metric with the given timestamp. If a timestamp is not
|
||||||
|
// given, then data is sent to the database without a timestamp, in which case
|
||||||
|
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||||
|
// send data with a timestamp.
|
||||||
|
func NewMetric(
|
||||||
|
name string,
|
||||||
|
tags map[string]string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
t ...time.Time,
|
||||||
|
) (Metric, error) {
|
||||||
|
var T time.Time
|
||||||
|
if len(t) > 0 {
|
||||||
|
T = t[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
pt, err := client.NewPoint(name, tags, fields, T)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &metric{
|
||||||
|
pt: pt,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) Name() string {
|
||||||
|
return m.pt.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) Tags() map[string]string {
|
||||||
|
return m.pt.Tags()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) Time() time.Time {
|
||||||
|
return m.pt.Time()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) UnixNano() int64 {
|
||||||
|
return m.pt.UnixNano()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) Fields() map[string]interface{} {
|
||||||
|
return m.pt.Fields()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) String() string {
|
||||||
|
return m.pt.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) PrecisionString(precison string) string {
|
||||||
|
return m.pt.PrecisionString(precison)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metric) Point() *client.Point {
|
||||||
|
return m.pt
|
||||||
|
}
|
||||||
83
metric_test.go
Normal file
83
metric_test.go
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
package telegraf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewMetric(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
"datacenter": "us-east-1",
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage_idle": float64(99),
|
||||||
|
"usage_busy": float64(1),
|
||||||
|
}
|
||||||
|
m, err := NewMetric("cpu", tags, fields, now)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, tags, m.Tags())
|
||||||
|
assert.Equal(t, fields, m.Fields())
|
||||||
|
assert.Equal(t, "cpu", m.Name())
|
||||||
|
assert.Equal(t, now, m.Time())
|
||||||
|
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewMetricString(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage_idle": float64(99),
|
||||||
|
}
|
||||||
|
m, err := NewMetric("cpu", tags, fields, now)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||||
|
now.UnixNano())
|
||||||
|
assert.Equal(t, lineProto, m.String())
|
||||||
|
|
||||||
|
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||||
|
now.Unix())
|
||||||
|
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewMetricStringNoTime(t *testing.T) {
|
||||||
|
tags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage_idle": float64(99),
|
||||||
|
}
|
||||||
|
m, err := NewMetric("cpu", tags, fields)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||||
|
assert.Equal(t, lineProto, m.String())
|
||||||
|
|
||||||
|
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||||
|
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewMetricFailNaN(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"host": "localhost",
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"usage_idle": math.NaN(),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := NewMetric("cpu", tags, fields, now)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
@@ -1,8 +1,4 @@
|
|||||||
package outputs
|
package telegraf
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Output interface {
|
type Output interface {
|
||||||
// Connect to the Output
|
// Connect to the Output
|
||||||
@@ -14,7 +10,7 @@ type Output interface {
|
|||||||
// SampleConfig returns the default configuration of the Output
|
// SampleConfig returns the default configuration of the Output
|
||||||
SampleConfig() string
|
SampleConfig() string
|
||||||
// Write takes in group of points to be written to the Output
|
// Write takes in group of points to be written to the Output
|
||||||
Write(points []*client.Point) error
|
Write(metrics []Metric) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServiceOutput interface {
|
type ServiceOutput interface {
|
||||||
@@ -27,17 +23,9 @@ type ServiceOutput interface {
|
|||||||
// SampleConfig returns the default configuration of the Output
|
// SampleConfig returns the default configuration of the Output
|
||||||
SampleConfig() string
|
SampleConfig() string
|
||||||
// Write takes in group of points to be written to the Output
|
// Write takes in group of points to be written to the Output
|
||||||
Write(points []*client.Point) error
|
Write(metrics []Metric) error
|
||||||
// Start the "service" that will provide an Output
|
// Start the "service" that will provide an Output
|
||||||
Start() error
|
Start() error
|
||||||
// Stop the "service" that will provide an Output
|
// Stop the "service" that will provide an Output
|
||||||
Stop()
|
Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
type Creator func() Output
|
|
||||||
|
|
||||||
var Outputs = map[string]Creator{}
|
|
||||||
|
|
||||||
func Add(name string, creator Creator) {
|
|
||||||
Outputs[name] = creator
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
package all
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/amon"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/amqp"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/datadog"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/influxdb"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/kafka"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/kinesis"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/librato"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/mqtt"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/nsq"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/opentsdb"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/prometheus_client"
|
|
||||||
_ "github.com/influxdb/telegraf/outputs/riemann"
|
|
||||||
)
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
package amqp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
"github.com/streadway/amqp"
|
|
||||||
)
|
|
||||||
|
|
||||||
type AMQP struct {
|
|
||||||
// AMQP brokers to send metrics to
|
|
||||||
URL string
|
|
||||||
// AMQP exchange
|
|
||||||
Exchange string
|
|
||||||
// Routing Key Tag
|
|
||||||
RoutingTag string `toml:"routing_tag"`
|
|
||||||
// InfluxDB database
|
|
||||||
Database string
|
|
||||||
// InfluxDB retention policy
|
|
||||||
RetentionPolicy string
|
|
||||||
// InfluxDB precision
|
|
||||||
Precision string
|
|
||||||
|
|
||||||
channel *amqp.Channel
|
|
||||||
sync.Mutex
|
|
||||||
headers amqp.Table
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultRetentionPolicy = "default"
|
|
||||||
DefaultDatabase = "telegraf"
|
|
||||||
DefaultPrecision = "s"
|
|
||||||
)
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# AMQP url
|
|
||||||
url = "amqp://localhost:5672/influxdb"
|
|
||||||
# AMQP exchange
|
|
||||||
exchange = "telegraf"
|
|
||||||
# Telegraf tag to use as a routing key
|
|
||||||
# ie, if this tag exists, it's value will be used as the routing key
|
|
||||||
routing_tag = "host"
|
|
||||||
|
|
||||||
# InfluxDB retention policy
|
|
||||||
#retention_policy = "default"
|
|
||||||
# InfluxDB database
|
|
||||||
#database = "telegraf"
|
|
||||||
# InfluxDB precision
|
|
||||||
#precision = "s"
|
|
||||||
`
|
|
||||||
|
|
||||||
func (q *AMQP) Connect() error {
|
|
||||||
q.Lock()
|
|
||||||
defer q.Unlock()
|
|
||||||
|
|
||||||
q.headers = amqp.Table{
|
|
||||||
"precision": q.Precision,
|
|
||||||
"database": q.Database,
|
|
||||||
"retention_policy": q.RetentionPolicy,
|
|
||||||
}
|
|
||||||
|
|
||||||
connection, err := amqp.Dial(q.URL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
channel, err := connection.Channel()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to open a channel: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = channel.ExchangeDeclare(
|
|
||||||
q.Exchange, // name
|
|
||||||
"topic", // type
|
|
||||||
true, // durable
|
|
||||||
false, // delete when unused
|
|
||||||
false, // internal
|
|
||||||
false, // no-wait
|
|
||||||
nil, // arguments
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to declare an exchange: %s", err)
|
|
||||||
}
|
|
||||||
q.channel = channel
|
|
||||||
go func() {
|
|
||||||
log.Printf("Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error)))
|
|
||||||
log.Printf("Trying to reconnect")
|
|
||||||
for err := q.Connect(); err != nil; err = q.Connect() {
|
|
||||||
log.Println(err)
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *AMQP) Close() error {
|
|
||||||
return q.channel.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *AMQP) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *AMQP) Description() string {
|
|
||||||
return "Configuration for the AMQP server to send metrics to"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *AMQP) Write(points []*client.Point) error {
|
|
||||||
q.Lock()
|
|
||||||
defer q.Unlock()
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var outbuf = make(map[string][][]byte)
|
|
||||||
|
|
||||||
for _, p := range points {
|
|
||||||
// Combine tags from Point and BatchPoints and grab the resulting
|
|
||||||
// line-protocol output string to write to AMQP
|
|
||||||
var value, key string
|
|
||||||
value = p.String()
|
|
||||||
|
|
||||||
if q.RoutingTag != "" {
|
|
||||||
if h, ok := p.Tags()[q.RoutingTag]; ok {
|
|
||||||
key = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
outbuf[key] = append(outbuf[key], []byte(value))
|
|
||||||
|
|
||||||
}
|
|
||||||
for key, buf := range outbuf {
|
|
||||||
err := q.channel.Publish(
|
|
||||||
q.Exchange, // exchange
|
|
||||||
key, // routing key
|
|
||||||
false, // mandatory
|
|
||||||
false, // immediate
|
|
||||||
amqp.Publishing{
|
|
||||||
Headers: q.headers,
|
|
||||||
ContentType: "text/plain",
|
|
||||||
Body: bytes.Join(buf, []byte("\n")),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("FAILED to send amqp message: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("amqp", func() outputs.Output {
|
|
||||||
return &AMQP{
|
|
||||||
Database: DefaultDatabase,
|
|
||||||
Precision: DefaultPrecision,
|
|
||||||
RetentionPolicy: DefaultRetentionPolicy,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
# InfluxDB Output Plugin
|
|
||||||
|
|
||||||
This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
|
|
||||||
|
|
||||||
Required parameters:
|
|
||||||
|
|
||||||
* `urls`: List of strings, this is for InfluxDB clustering
|
|
||||||
support. On each flush interval, Telegraf will randomly choose one of the urls
|
|
||||||
to write to. Each URL should start with either `http://` or `udp://`
|
|
||||||
* `database`: The name of the database to write to.
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,160 +0,0 @@
|
|||||||
package influxdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math/rand"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/internal"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type InfluxDB struct {
|
|
||||||
// URL is only for backwards compatability
|
|
||||||
URL string
|
|
||||||
URLs []string `toml:"urls"`
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
Database string
|
|
||||||
UserAgent string
|
|
||||||
Precision string
|
|
||||||
Timeout internal.Duration
|
|
||||||
UDPPayload int `toml:"udp_payload"`
|
|
||||||
|
|
||||||
conns []client.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
|
||||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
|
||||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
|
||||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
|
||||||
urls = ["http://localhost:8086"] # required
|
|
||||||
# The target database for metrics (telegraf will create it if not exists)
|
|
||||||
database = "telegraf" # required
|
|
||||||
# Precision of writes, valid values are n, u, ms, s, m, and h
|
|
||||||
# note: using second precision greatly helps InfluxDB compression
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
|
||||||
# If not provided, will default to 0 (no timeout)
|
|
||||||
# timeout = "5s"
|
|
||||||
# username = "telegraf"
|
|
||||||
# password = "metricsmetricsmetricsmetrics"
|
|
||||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
|
||||||
# user_agent = "telegraf"
|
|
||||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
|
||||||
# udp_payload = 512
|
|
||||||
`
|
|
||||||
|
|
||||||
func (i *InfluxDB) Connect() error {
|
|
||||||
var urls []string
|
|
||||||
for _, u := range i.URLs {
|
|
||||||
urls = append(urls, u)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backward-compatability with single Influx URL config files
|
|
||||||
// This could eventually be removed in favor of specifying the urls as a list
|
|
||||||
if i.URL != "" {
|
|
||||||
urls = append(urls, i.URL)
|
|
||||||
}
|
|
||||||
|
|
||||||
var conns []client.Client
|
|
||||||
for _, u := range urls {
|
|
||||||
switch {
|
|
||||||
case strings.HasPrefix(u, "udp"):
|
|
||||||
parsed_url, err := url.Parse(u)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if i.UDPPayload == 0 {
|
|
||||||
i.UDPPayload = client.UDPPayloadSize
|
|
||||||
}
|
|
||||||
c, err := client.NewUDPClient(client.UDPConfig{
|
|
||||||
Addr: parsed_url.Host,
|
|
||||||
PayloadSize: i.UDPPayload,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
conns = append(conns, c)
|
|
||||||
default:
|
|
||||||
// If URL doesn't start with "udp", assume HTTP client
|
|
||||||
c, err := client.NewHTTPClient(client.HTTPConfig{
|
|
||||||
Addr: u,
|
|
||||||
Username: i.Username,
|
|
||||||
Password: i.Password,
|
|
||||||
UserAgent: i.UserAgent,
|
|
||||||
Timeout: i.Timeout.Duration,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create Database if it doesn't exist
|
|
||||||
_, e := c.Query(client.Query{
|
|
||||||
Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", i.Database),
|
|
||||||
})
|
|
||||||
|
|
||||||
if e != nil {
|
|
||||||
log.Println("Database creation failed: " + e.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
conns = append(conns, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i.conns = conns
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InfluxDB) Close() error {
|
|
||||||
// InfluxDB client does not provide a Close() function
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InfluxDB) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InfluxDB) Description() string {
|
|
||||||
return "Configuration for influxdb server to send metrics to"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Choose a random server in the cluster to write to until a successful write
|
|
||||||
// occurs, logging each unsuccessful. If all servers fail, return error.
|
|
||||||
func (i *InfluxDB) Write(points []*client.Point) error {
|
|
||||||
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
|
||||||
Database: i.Database,
|
|
||||||
Precision: i.Precision,
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, point := range points {
|
|
||||||
bp.AddPoint(point)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will get set to nil if a successful write occurs
|
|
||||||
err := errors.New("Could not write to any InfluxDB server in cluster")
|
|
||||||
|
|
||||||
p := rand.Perm(len(i.conns))
|
|
||||||
for _, n := range p {
|
|
||||||
if e := i.conns[n].Write(bp); e != nil {
|
|
||||||
log.Println("ERROR: " + e.Error())
|
|
||||||
} else {
|
|
||||||
err = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("influxdb", func() outputs.Output {
|
|
||||||
return &InfluxDB{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
package kafka
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/Shopify/sarama"
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Kafka struct {
|
|
||||||
// Kafka brokers to send metrics to
|
|
||||||
Brokers []string
|
|
||||||
// Kafka topic
|
|
||||||
Topic string
|
|
||||||
// Routing Key Tag
|
|
||||||
RoutingTag string `toml:"routing_tag"`
|
|
||||||
|
|
||||||
producer sarama.SyncProducer
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# URLs of kafka brokers
|
|
||||||
brokers = ["localhost:9092"]
|
|
||||||
# Kafka topic for producer messages
|
|
||||||
topic = "telegraf"
|
|
||||||
# Telegraf tag to use as a routing key
|
|
||||||
# ie, if this tag exists, it's value will be used as the routing key
|
|
||||||
routing_tag = "host"
|
|
||||||
`
|
|
||||||
|
|
||||||
func (k *Kafka) Connect() error {
|
|
||||||
producer, err := sarama.NewSyncProducer(k.Brokers, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
k.producer = producer
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kafka) Close() error {
|
|
||||||
return k.producer.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kafka) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kafka) Description() string {
|
|
||||||
return "Configuration for the Kafka server to send metrics to"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Kafka) Write(points []*client.Point) error {
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range points {
|
|
||||||
// Combine tags from Point and BatchPoints and grab the resulting
|
|
||||||
// line-protocol output string to write to Kafka
|
|
||||||
value := p.String()
|
|
||||||
|
|
||||||
m := &sarama.ProducerMessage{
|
|
||||||
Topic: k.Topic,
|
|
||||||
Value: sarama.StringEncoder(value),
|
|
||||||
}
|
|
||||||
if h, ok := p.Tags()[k.RoutingTag]; ok {
|
|
||||||
m.Key = sarama.StringEncoder(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err := k.producer.SendMessage(m)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
|
|
||||||
err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("kafka", func() outputs.Output {
|
|
||||||
return &Kafka{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
package librato
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/internal"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Librato struct {
|
|
||||||
ApiUser string
|
|
||||||
ApiToken string
|
|
||||||
SourceTag string
|
|
||||||
Timeout internal.Duration
|
|
||||||
|
|
||||||
apiUrl string
|
|
||||||
client *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# Librator API Docs
|
|
||||||
# http://dev.librato.com/v1/metrics-authentication
|
|
||||||
|
|
||||||
# Librato API user
|
|
||||||
api_user = "telegraf@influxdb.com" # required.
|
|
||||||
|
|
||||||
# Librato API token
|
|
||||||
api_token = "my-secret-token" # required.
|
|
||||||
|
|
||||||
# Tag Field to populate source attribute (optional)
|
|
||||||
# This is typically the _hostname_ from which the metric was obtained.
|
|
||||||
source_tag = "hostname"
|
|
||||||
|
|
||||||
# Connection timeout.
|
|
||||||
# timeout = "5s"
|
|
||||||
`
|
|
||||||
|
|
||||||
type Metrics struct {
|
|
||||||
Gauges []*Gauge `json:"gauges"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Gauge struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Value float64 `json:"value"`
|
|
||||||
Source string `json:"source"`
|
|
||||||
MeasureTime int64 `json:"measure_time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const librato_api = "https://metrics-api.librato.com/v1/metrics"
|
|
||||||
|
|
||||||
func NewLibrato(apiUrl string) *Librato {
|
|
||||||
return &Librato{
|
|
||||||
apiUrl: apiUrl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Librato) Connect() error {
|
|
||||||
if l.ApiUser == "" || l.ApiToken == "" {
|
|
||||||
return fmt.Errorf("api_user and api_token are required fields for librato output")
|
|
||||||
}
|
|
||||||
l.client = &http.Client{
|
|
||||||
Timeout: l.Timeout.Duration,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Librato) Write(points []*client.Point) error {
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
metrics := Metrics{}
|
|
||||||
var tempGauges = make([]*Gauge, len(points))
|
|
||||||
var acceptablePoints = 0
|
|
||||||
for _, pt := range points {
|
|
||||||
if gauge, err := l.buildGauge(pt); err == nil {
|
|
||||||
tempGauges[acceptablePoints] = gauge
|
|
||||||
acceptablePoints += 1
|
|
||||||
} else {
|
|
||||||
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metrics.Gauges = make([]*Gauge, acceptablePoints)
|
|
||||||
copy(metrics.Gauges, tempGauges[0:])
|
|
||||||
metricsBytes, err := json.Marshal(metrics)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
|
|
||||||
}
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
req.SetBasicAuth(l.ApiUser, l.ApiToken)
|
|
||||||
|
|
||||||
resp, err := l.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Librato) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Librato) Description() string {
|
|
||||||
return "Configuration for Librato API to send metrics to."
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Librato) buildGauge(pt *client.Point) (*Gauge, error) {
|
|
||||||
gauge := &Gauge{
|
|
||||||
Name: pt.Name(),
|
|
||||||
MeasureTime: pt.Time().Unix(),
|
|
||||||
}
|
|
||||||
if err := gauge.setValue(pt.Fields()["value"]); err != nil {
|
|
||||||
return gauge, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error())
|
|
||||||
}
|
|
||||||
if l.SourceTag != "" {
|
|
||||||
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
|
||||||
gauge.Source = source
|
|
||||||
} else {
|
|
||||||
return gauge, fmt.Errorf("undeterminable Source type from Field, %s\n", l.SourceTag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return gauge, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Gauge) setValue(v interface{}) error {
|
|
||||||
switch d := v.(type) {
|
|
||||||
case int:
|
|
||||||
g.Value = float64(int(d))
|
|
||||||
case int32:
|
|
||||||
g.Value = float64(int32(d))
|
|
||||||
case int64:
|
|
||||||
g.Value = float64(int64(d))
|
|
||||||
case float32:
|
|
||||||
g.Value = float64(d)
|
|
||||||
case float64:
|
|
||||||
g.Value = float64(d)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("undeterminable type %+v", d)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Librato) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("librato", func() outputs.Output {
|
|
||||||
return NewLibrato(librato_api)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,190 +0,0 @@
|
|||||||
package mqtt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/internal"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const MaxClientIdLen = 8
|
|
||||||
const MaxRetryCount = 3
|
|
||||||
const ClientIdPrefix = "telegraf"
|
|
||||||
|
|
||||||
type MQTT struct {
|
|
||||||
Servers []string `toml:"servers"`
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
Database string
|
|
||||||
Timeout internal.Duration
|
|
||||||
TopicPrefix string
|
|
||||||
|
|
||||||
Client *paho.Client
|
|
||||||
Opts *paho.ClientOptions
|
|
||||||
sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
servers = ["localhost:1883"] # required.
|
|
||||||
|
|
||||||
# MQTT outputs send metrics to this topic format
|
|
||||||
# "<topic_prefix>/host/<hostname>/<pluginname>/"
|
|
||||||
# ex: prefix/host/web01.example.com/mem/available
|
|
||||||
# topic_prefix = "prefix"
|
|
||||||
|
|
||||||
# username and password to connect MQTT server.
|
|
||||||
# username = "telegraf"
|
|
||||||
# password = "metricsmetricsmetricsmetrics"
|
|
||||||
`
|
|
||||||
|
|
||||||
func (m *MQTT) Connect() error {
|
|
||||||
var err error
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
m.Opts, err = m.CreateOpts()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Client = paho.NewClient(m.Opts)
|
|
||||||
if token := m.Client.Connect(); token.Wait() && token.Error() != nil {
|
|
||||||
return token.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MQTT) Close() error {
|
|
||||||
if m.Client.IsConnected() {
|
|
||||||
m.Client.Disconnect(20)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MQTT) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MQTT) Description() string {
|
|
||||||
return "Configuration for MQTT server to send metrics to"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MQTT) Write(points []*client.Point) error {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
hostname, ok := points[0].Tags()["host"]
|
|
||||||
if !ok {
|
|
||||||
hostname = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range points {
|
|
||||||
var t []string
|
|
||||||
if m.TopicPrefix != "" {
|
|
||||||
t = append(t, m.TopicPrefix)
|
|
||||||
}
|
|
||||||
tm := strings.Split(p.Name(), "_")
|
|
||||||
if len(tm) < 2 {
|
|
||||||
tm = []string{p.Name(), "stat"}
|
|
||||||
}
|
|
||||||
|
|
||||||
t = append(t, "host", hostname, tm[0], tm[1])
|
|
||||||
topic := strings.Join(t, "/")
|
|
||||||
|
|
||||||
value := p.String()
|
|
||||||
err := m.publish(topic, value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Could not write to MQTT server, %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MQTT) publish(topic, body string) error {
|
|
||||||
token := m.Client.Publish(topic, 0, false, body)
|
|
||||||
token.Wait()
|
|
||||||
if token.Error() != nil {
|
|
||||||
return token.Error()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MQTT) CreateOpts() (*paho.ClientOptions, error) {
|
|
||||||
opts := paho.NewClientOptions()
|
|
||||||
|
|
||||||
clientId := getRandomClientId()
|
|
||||||
opts.SetClientID(clientId)
|
|
||||||
|
|
||||||
TLSConfig := &tls.Config{InsecureSkipVerify: false}
|
|
||||||
ca := "" // TODO
|
|
||||||
scheme := "tcp"
|
|
||||||
if ca != "" {
|
|
||||||
scheme = "ssl"
|
|
||||||
certPool, err := getCertPool(ca)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
TLSConfig.RootCAs = certPool
|
|
||||||
}
|
|
||||||
TLSConfig.InsecureSkipVerify = true // TODO
|
|
||||||
opts.SetTLSConfig(TLSConfig)
|
|
||||||
|
|
||||||
user := m.Username
|
|
||||||
if user == "" {
|
|
||||||
opts.SetUsername(user)
|
|
||||||
}
|
|
||||||
password := m.Password
|
|
||||||
if password != "" {
|
|
||||||
opts.SetPassword(password)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(m.Servers) == 0 {
|
|
||||||
return opts, fmt.Errorf("could not get host infomations")
|
|
||||||
}
|
|
||||||
for _, host := range m.Servers {
|
|
||||||
server := fmt.Sprintf("%s://%s", scheme, host)
|
|
||||||
|
|
||||||
opts.AddBroker(server)
|
|
||||||
}
|
|
||||||
opts.SetAutoReconnect(true)
|
|
||||||
return opts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRandomClientId() string {
|
|
||||||
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
|
||||||
var bytes = make([]byte, MaxClientIdLen)
|
|
||||||
rand.Read(bytes)
|
|
||||||
for i, b := range bytes {
|
|
||||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
|
||||||
}
|
|
||||||
return ClientIdPrefix + "-" + string(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCertPool(pemPath string) (*x509.CertPool, error) {
|
|
||||||
certs := x509.NewCertPool()
|
|
||||||
|
|
||||||
pemData, err := ioutil.ReadFile(pemPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
certs.AppendCertsFromPEM(pemData)
|
|
||||||
return certs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("mqtt", func() outputs.Output {
|
|
||||||
return &MQTT{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
package nsq
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
"github.com/nsqio/go-nsq"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NSQ struct {
|
|
||||||
Server string
|
|
||||||
Topic string
|
|
||||||
producer *nsq.Producer
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# Location of nsqd instance listening on TCP
|
|
||||||
server = "localhost:4150"
|
|
||||||
# NSQ topic for producer messages
|
|
||||||
topic = "telegraf"
|
|
||||||
`
|
|
||||||
|
|
||||||
func (n *NSQ) Connect() error {
|
|
||||||
config := nsq.NewConfig()
|
|
||||||
producer, err := nsq.NewProducer(n.Server, config)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
n.producer = producer
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NSQ) Close() error {
|
|
||||||
n.producer.Stop()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NSQ) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NSQ) Description() string {
|
|
||||||
return "Send telegraf measurements to NSQD"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NSQ) Write(points []*client.Point) error {
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range points {
|
|
||||||
// Combine tags from Point and BatchPoints and grab the resulting
|
|
||||||
// line-protocol output string to write to NSQ
|
|
||||||
value := p.String()
|
|
||||||
|
|
||||||
err := n.producer.Publish(n.Topic, []byte(value))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("FAILED to send NSQD message: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("nsq", func() outputs.Output {
|
|
||||||
return &NSQ{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
package opentsdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuildTagsTelnet(t *testing.T) {
|
|
||||||
var tagtests = []struct {
|
|
||||||
ptIn map[string]string
|
|
||||||
outTags []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
map[string]string{"one": "two", "three": "four"},
|
|
||||||
[]string{"one=two", "three=four"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
map[string]string{"aaa": "bbb"},
|
|
||||||
[]string{"aaa=bbb"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
map[string]string{"one": "two", "aaa": "bbb"},
|
|
||||||
[]string{"aaa=bbb", "one=two"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
map[string]string{},
|
|
||||||
[]string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tagtests {
|
|
||||||
tags := buildTags(tt.ptIn)
|
|
||||||
if !reflect.DeepEqual(tags, tt.outTags) {
|
|
||||||
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWrite(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
o := &OpenTSDB{
|
|
||||||
Host: testutil.GetLocalHost(),
|
|
||||||
Port: 4242,
|
|
||||||
Prefix: "prefix.test.",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that we can connect to the OpenTSDB instance
|
|
||||||
err := o.Connect()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify that we can successfully write data to OpenTSDB
|
|
||||||
err = o.Write(testutil.MockBatchPoints().Points())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify postive and negative test cases of writing data
|
|
||||||
bp := testutil.MockBatchPoints()
|
|
||||||
bp.AddPoint(testutil.TestPoint(float64(1.0), "justametric.float"))
|
|
||||||
bp.AddPoint(testutil.TestPoint(int64(123456789), "justametric.int"))
|
|
||||||
bp.AddPoint(testutil.TestPoint(uint64(123456789012345), "justametric.uint"))
|
|
||||||
bp.AddPoint(testutil.TestPoint("Lorem Ipsum", "justametric.string"))
|
|
||||||
bp.AddPoint(testutil.TestPoint(float64(42.0), "justametric.anotherfloat"))
|
|
||||||
|
|
||||||
err = o.Write(bp.Points())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
package prometheus_client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type PrometheusClient struct {
|
|
||||||
Listen string
|
|
||||||
metrics map[string]*prometheus.UntypedVec
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# Address to listen on
|
|
||||||
# listen = ":9126"
|
|
||||||
`
|
|
||||||
|
|
||||||
func (p *PrometheusClient) Start() error {
|
|
||||||
if p.Listen == "" {
|
|
||||||
p.Listen = "localhost:9126"
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Handle("/metrics", prometheus.Handler())
|
|
||||||
server := &http.Server{
|
|
||||||
Addr: p.Listen,
|
|
||||||
}
|
|
||||||
|
|
||||||
p.metrics = make(map[string]*prometheus.UntypedVec)
|
|
||||||
go server.ListenAndServe()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrometheusClient) Stop() {
|
|
||||||
// TODO: Use a listener for http.Server that counts active connections
|
|
||||||
// that can be stopped and closed gracefully
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrometheusClient) Connect() error {
|
|
||||||
// This service output does not need to make any further connections
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrometheusClient) Close() error {
|
|
||||||
// This service output does not need to close any of its connections
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrometheusClient) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrometheusClient) Description() string {
|
|
||||||
return "Configuration for the Prometheus client to spawn"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrometheusClient) Write(points []*client.Point) error {
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, point := range points {
|
|
||||||
var labels []string
|
|
||||||
key := point.Name()
|
|
||||||
|
|
||||||
for k, _ := range point.Tags() {
|
|
||||||
if len(k) > 0 {
|
|
||||||
labels = append(labels, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := p.metrics[key]; !ok {
|
|
||||||
p.metrics[key] = prometheus.NewUntypedVec(
|
|
||||||
prometheus.UntypedOpts{
|
|
||||||
Name: key,
|
|
||||||
Help: fmt.Sprintf("Telegraf collected point '%s'", key),
|
|
||||||
},
|
|
||||||
labels,
|
|
||||||
)
|
|
||||||
prometheus.MustRegister(p.metrics[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
l := prometheus.Labels{}
|
|
||||||
for tk, tv := range point.Tags() {
|
|
||||||
l[tk] = tv
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, val := range point.Fields() {
|
|
||||||
switch val := val.(type) {
|
|
||||||
default:
|
|
||||||
log.Printf("Prometheus output, unsupported type. key: %s, type: %T\n",
|
|
||||||
key, val)
|
|
||||||
case int64:
|
|
||||||
m, err := p.metrics[key].GetMetricWith(l)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("ERROR Getting metric in Prometheus output, "+
|
|
||||||
"key: %s, labels: %v,\nerr: %s\n",
|
|
||||||
key, l, err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m.Set(float64(val))
|
|
||||||
case float64:
|
|
||||||
m, err := p.metrics[key].GetMetricWith(l)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("ERROR Getting metric in Prometheus output, "+
|
|
||||||
"key: %s, labels: %v,\nerr: %s\n",
|
|
||||||
key, l, err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m.Set(val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("prometheus_client", func() outputs.Output {
|
|
||||||
return &PrometheusClient{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,98 +0,0 @@
|
|||||||
package prometheus_client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/plugins/prometheus"
|
|
||||||
"github.com/influxdb/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
var pTesting *PrometheusClient
|
|
||||||
|
|
||||||
func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &prometheus.Prometheus{
|
|
||||||
Urls: []string{"http://localhost:9126/metrics"},
|
|
||||||
}
|
|
||||||
tags := make(map[string]string)
|
|
||||||
pt1, _ := client.NewPoint(
|
|
||||||
"test_point_1",
|
|
||||||
tags,
|
|
||||||
map[string]interface{}{"value": 0.0})
|
|
||||||
pt2, _ := client.NewPoint(
|
|
||||||
"test_point_2",
|
|
||||||
tags,
|
|
||||||
map[string]interface{}{"value": 1.0})
|
|
||||||
var points = []*client.Point{
|
|
||||||
pt1,
|
|
||||||
pt2,
|
|
||||||
}
|
|
||||||
require.NoError(t, pTesting.Write(points))
|
|
||||||
|
|
||||||
expected := []struct {
|
|
||||||
name string
|
|
||||||
value float64
|
|
||||||
tags map[string]string
|
|
||||||
}{
|
|
||||||
{"test_point_1", 0.0, tags},
|
|
||||||
{"test_point_2", 1.0, tags},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
|
|
||||||
require.NoError(t, p.Gather(&acc))
|
|
||||||
for _, e := range expected {
|
|
||||||
assert.NoError(t, acc.ValidateValue(e.name, e.value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrometheusWritePointTag(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &prometheus.Prometheus{
|
|
||||||
Urls: []string{"http://localhost:9126/metrics"},
|
|
||||||
}
|
|
||||||
tags := make(map[string]string)
|
|
||||||
tags["testtag"] = "testvalue"
|
|
||||||
pt1, _ := client.NewPoint(
|
|
||||||
"test_point_3",
|
|
||||||
tags,
|
|
||||||
map[string]interface{}{"value": 0.0})
|
|
||||||
pt2, _ := client.NewPoint(
|
|
||||||
"test_point_4",
|
|
||||||
tags,
|
|
||||||
map[string]interface{}{"value": 1.0})
|
|
||||||
var points = []*client.Point{
|
|
||||||
pt1,
|
|
||||||
pt2,
|
|
||||||
}
|
|
||||||
require.NoError(t, pTesting.Write(points))
|
|
||||||
|
|
||||||
expected := []struct {
|
|
||||||
name string
|
|
||||||
value float64
|
|
||||||
}{
|
|
||||||
{"test_point_3", 0.0},
|
|
||||||
{"test_point_4", 1.0},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
|
|
||||||
require.NoError(t, p.Gather(&acc))
|
|
||||||
for _, e := range expected {
|
|
||||||
assert.True(t, acc.CheckTaggedValue(e.name, e.value, tags))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
pTesting = &PrometheusClient{Listen: "localhost:9126"}
|
|
||||||
pTesting.Start()
|
|
||||||
}
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
package riemann
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/amir/raidman"
|
|
||||||
"github.com/influxdb/influxdb/client/v2"
|
|
||||||
"github.com/influxdb/telegraf/outputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Riemann struct {
|
|
||||||
URL string
|
|
||||||
Transport string
|
|
||||||
|
|
||||||
client *raidman.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# URL of server
|
|
||||||
url = "localhost:5555"
|
|
||||||
# transport protocol to use either tcp or udp
|
|
||||||
transport = "tcp"
|
|
||||||
`
|
|
||||||
|
|
||||||
func (r *Riemann) Connect() error {
|
|
||||||
c, err := raidman.Dial(r.Transport, r.URL)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.client = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Riemann) Close() error {
|
|
||||||
r.client.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Riemann) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Riemann) Description() string {
|
|
||||||
return "Configuration for the Riemann server to send metrics to"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Riemann) Write(points []*client.Point) error {
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var events []*raidman.Event
|
|
||||||
for _, p := range points {
|
|
||||||
ev := buildEvent(p)
|
|
||||||
events = append(events, ev)
|
|
||||||
}
|
|
||||||
|
|
||||||
var senderr = r.client.SendMulti(events)
|
|
||||||
if senderr != nil {
|
|
||||||
return errors.New(fmt.Sprintf("FAILED to send riemann message: %s\n",
|
|
||||||
senderr))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildEvent(p *client.Point) *raidman.Event {
|
|
||||||
host, ok := p.Tags()["host"]
|
|
||||||
if !ok {
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
host = "unknown"
|
|
||||||
} else {
|
|
||||||
host = hostname
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var event = &raidman.Event{
|
|
||||||
Host: host,
|
|
||||||
Service: p.Name(),
|
|
||||||
Metric: p.Fields()["value"],
|
|
||||||
}
|
|
||||||
|
|
||||||
return event
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("riemann", func() outputs.Output {
|
|
||||||
return &Riemann{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
package all
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/aerospike"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/apache"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/bcache"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/disque"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/elasticsearch"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/exec"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/haproxy"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/httpjson"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/influxdb"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/jolokia"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/kafka_consumer"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/leofs"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/lustre2"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/mailchimp"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/memcached"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/mongodb"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/mysql"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/nginx"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/phpfpm"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/ping"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/postgresql"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/procstat"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/prometheus"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/puppetagent"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/rabbitmq"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/redis"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/rethinkdb"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/statsd"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/system"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/trig"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/twemproxy"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/zfs"
|
|
||||||
_ "github.com/influxdb/telegraf/plugins/zookeeper"
|
|
||||||
)
|
|
||||||
@@ -1,319 +0,0 @@
|
|||||||
# Elasticsearch plugin
|
|
||||||
|
|
||||||
#### Plugin arguments:
|
|
||||||
- **servers** []string: list of one or more Elasticsearch servers
|
|
||||||
- **local** boolean: If false, it will read the indices stats from all nodes
|
|
||||||
- **cluster_health** boolean: If true, it will also obtain cluster level stats
|
|
||||||
|
|
||||||
#### Description
|
|
||||||
|
|
||||||
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
|
||||||
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
|
||||||
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```
|
|
||||||
[elasticsearch]
|
|
||||||
|
|
||||||
servers = ["http://localhost:9200"]
|
|
||||||
|
|
||||||
local = true
|
|
||||||
|
|
||||||
cluster_health = true
|
|
||||||
```
|
|
||||||
|
|
||||||
# Measurements
|
|
||||||
#### cluster measurements (utilizes fields instead of single values):
|
|
||||||
|
|
||||||
contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
|
|
||||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
|
||||||
`initializing_shards`, `unassigned_shards` fields
|
|
||||||
- elasticsearch_cluster_health
|
|
||||||
|
|
||||||
contains `status`, `number_of_shards`, `number_of_replicas`, `active_primary_shards`,
|
|
||||||
`active_shards`, `relocating_shards`, `initializing_shards`, `unassigned_shards` fields
|
|
||||||
- elasticsearch_indices
|
|
||||||
|
|
||||||
#### node measurements:
|
|
||||||
|
|
||||||
field data circuit breaker measurement names:
|
|
||||||
- elasticsearch_breakers_fielddata_estimated_size_in_bytes value=0
|
|
||||||
- elasticsearch_breakers_fielddata_overhead value=1.03
|
|
||||||
- elasticsearch_breakers_fielddata_tripped value=0
|
|
||||||
- elasticsearch_breakers_fielddata_limit_size_in_bytes value=623326003
|
|
||||||
- elasticsearch_breakers_request_estimated_size_in_bytes value=0
|
|
||||||
- elasticsearch_breakers_request_overhead value=1.0
|
|
||||||
- elasticsearch_breakers_request_tripped value=0
|
|
||||||
- elasticsearch_breakers_request_limit_size_in_bytes value=415550668
|
|
||||||
- elasticsearch_breakers_parent_overhead value=1.0
|
|
||||||
- elasticsearch_breakers_parent_tripped value=0
|
|
||||||
- elasticsearch_breakers_parent_limit_size_in_bytes value=727213670
|
|
||||||
- elasticsearch_breakers_parent_estimated_size_in_bytes value=0
|
|
||||||
|
|
||||||
File system information, data path, free disk space, read/write measurement names:
|
|
||||||
- elasticsearch_fs_timestamp value=1436460392946
|
|
||||||
- elasticsearch_fs_total_free_in_bytes value=16909316096
|
|
||||||
- elasticsearch_fs_total_available_in_bytes value=15894814720
|
|
||||||
- elasticsearch_fs_total_total_in_bytes value=19507089408
|
|
||||||
|
|
||||||
indices size, document count, indexing and deletion times, search times,
|
|
||||||
field cache size, merges and flushes measurement names:
|
|
||||||
- elasticsearch_indices_id_cache_memory_size_in_bytes value=0
|
|
||||||
- elasticsearch_indices_completion_size_in_bytes value=0
|
|
||||||
- elasticsearch_indices_suggest_total value=0
|
|
||||||
- elasticsearch_indices_suggest_time_in_millis value=0
|
|
||||||
- elasticsearch_indices_suggest_current value=0
|
|
||||||
- elasticsearch_indices_query_cache_memory_size_in_bytes value=0
|
|
||||||
- elasticsearch_indices_query_cache_evictions value=0
|
|
||||||
- elasticsearch_indices_query_cache_hit_count value=0
|
|
||||||
- elasticsearch_indices_query_cache_miss_count value=0
|
|
||||||
- elasticsearch_indices_store_size_in_bytes value=37715234
|
|
||||||
- elasticsearch_indices_store_throttle_time_in_millis value=215
|
|
||||||
- elasticsearch_indices_merges_current_docs value=0
|
|
||||||
- elasticsearch_indices_merges_current_size_in_bytes value=0
|
|
||||||
- elasticsearch_indices_merges_total value=133
|
|
||||||
- elasticsearch_indices_merges_total_time_in_millis value=21060
|
|
||||||
- elasticsearch_indices_merges_total_docs value=203672
|
|
||||||
- elasticsearch_indices_merges_total_size_in_bytes value=142900226
|
|
||||||
- elasticsearch_indices_merges_current value=0
|
|
||||||
- elasticsearch_indices_filter_cache_memory_size_in_bytes value=7384
|
|
||||||
- elasticsearch_indices_filter_cache_evictions value=0
|
|
||||||
- elasticsearch_indices_indexing_index_total value=84790
|
|
||||||
- elasticsearch_indices_indexing_index_time_in_millis value=29680
|
|
||||||
- elasticsearch_indices_indexing_index_current value=0
|
|
||||||
- elasticsearch_indices_indexing_noop_update_total value=0
|
|
||||||
- elasticsearch_indices_indexing_throttle_time_in_millis value=0
|
|
||||||
- elasticsearch_indices_indexing_delete_tota value=13879
|
|
||||||
- elasticsearch_indices_indexing_delete_time_in_millis value=1139
|
|
||||||
- elasticsearch_indices_indexing_delete_current value=0
|
|
||||||
- elasticsearch_indices_get_exists_time_in_millis value=0
|
|
||||||
- elasticsearch_indices_get_missing_total value=1
|
|
||||||
- elasticsearch_indices_get_missing_time_in_millis value=2
|
|
||||||
- elasticsearch_indices_get_current value=0
|
|
||||||
- elasticsearch_indices_get_total value=1
|
|
||||||
- elasticsearch_indices_get_time_in_millis value=2
|
|
||||||
- elasticsearch_indices_get_exists_total value=0
|
|
||||||
- elasticsearch_indices_refresh_total value=1076
|
|
||||||
- elasticsearch_indices_refresh_total_time_in_millis value=20078
|
|
||||||
- elasticsearch_indices_percolate_current value=0
|
|
||||||
- elasticsearch_indices_percolate_memory_size_in_bytes value=-1
|
|
||||||
- elasticsearch_indices_percolate_queries value=0
|
|
||||||
- elasticsearch_indices_percolate_total value=0
|
|
||||||
- elasticsearch_indices_percolate_time_in_millis value=0
|
|
||||||
- elasticsearch_indices_translog_operations value=17702
|
|
||||||
- elasticsearch_indices_translog_size_in_bytes value=17
|
|
||||||
- elasticsearch_indices_recovery_current_as_source value=0
|
|
||||||
- elasticsearch_indices_recovery_current_as_target value=0
|
|
||||||
- elasticsearch_indices_recovery_throttle_time_in_millis value=0
|
|
||||||
- elasticsearch_indices_docs_count value=29652
|
|
||||||
- elasticsearch_indices_docs_deleted value=5229
|
|
||||||
- elasticsearch_indices_flush_total_time_in_millis value=2401
|
|
||||||
- elasticsearch_indices_flush_total value=115
|
|
||||||
- elasticsearch_indices_fielddata_memory_size_in_bytes value=12996
|
|
||||||
- elasticsearch_indices_fielddata_evictions value=0
|
|
||||||
- elasticsearch_indices_search_fetch_current value=0
|
|
||||||
- elasticsearch_indices_search_open_contexts value=0
|
|
||||||
- elasticsearch_indices_search_query_total value=1452
|
|
||||||
- elasticsearch_indices_search_query_time_in_millis value=5695
|
|
||||||
- elasticsearch_indices_search_query_current value=0
|
|
||||||
- elasticsearch_indices_search_fetch_total value=414
|
|
||||||
- elasticsearch_indices_search_fetch_time_in_millis value=146
|
|
||||||
- elasticsearch_indices_warmer_current value=0
|
|
||||||
- elasticsearch_indices_warmer_total value=2319
|
|
||||||
- elasticsearch_indices_warmer_total_time_in_millis value=448
|
|
||||||
- elasticsearch_indices_segments_count value=134
|
|
||||||
- elasticsearch_indices_segments_memory_in_bytes value=1285212
|
|
||||||
- elasticsearch_indices_segments_index_writer_memory_in_bytes value=0
|
|
||||||
- elasticsearch_indices_segments_index_writer_max_memory_in_bytes value=172368955
|
|
||||||
- elasticsearch_indices_segments_version_map_memory_in_bytes value=611844
|
|
||||||
- elasticsearch_indices_segments_fixed_bit_set_memory_in_bytes value=0
|
|
||||||
|
|
||||||
HTTP connection measurement names:
|
|
||||||
- elasticsearch_http_current_open value=3
|
|
||||||
- elasticsearch_http_total_opened value=3
|
|
||||||
|
|
||||||
JVM stats, memory pool information, garbage collection, buffer pools measurement names:
|
|
||||||
- elasticsearch_jvm_timestamp value=1436460392945
|
|
||||||
- elasticsearch_jvm_uptime_in_millis value=202245
|
|
||||||
- elasticsearch_jvm_mem_non_heap_used_in_bytes value=39634576
|
|
||||||
- elasticsearch_jvm_mem_non_heap_committed_in_bytes value=40841216
|
|
||||||
- elasticsearch_jvm_mem_pools_young_max_in_bytes value=279183360
|
|
||||||
- elasticsearch_jvm_mem_pools_young_peak_used_in_bytes value=71630848
|
|
||||||
- elasticsearch_jvm_mem_pools_young_peak_max_in_bytes value=279183360
|
|
||||||
- elasticsearch_jvm_mem_pools_young_used_in_bytes value=32685760
|
|
||||||
- elasticsearch_jvm_mem_pools_survivor_peak_used_in_bytes value=8912888
|
|
||||||
- elasticsearch_jvm_mem_pools_survivor_peak_max_in_bytes value=34865152
|
|
||||||
- elasticsearch_jvm_mem_pools_survivor_used_in_bytes value=8912880
|
|
||||||
- elasticsearch_jvm_mem_pools_survivor_max_in_bytes value=34865152
|
|
||||||
- elasticsearch_jvm_mem_pools_old_peak_max_in_bytes value=724828160
|
|
||||||
- elasticsearch_jvm_mem_pools_old_used_in_bytes value=11110928
|
|
||||||
- elasticsearch_jvm_mem_pools_old_max_in_bytes value=724828160
|
|
||||||
- elasticsearch_jvm_mem_pools_old_peak_used_in_bytes value=14354608
|
|
||||||
- elasticsearch_jvm_mem_heap_used_in_bytes value=52709568
|
|
||||||
- elasticsearch_jvm_mem_heap_used_percent value=5
|
|
||||||
- elasticsearch_jvm_mem_heap_committed_in_bytes value=259522560
|
|
||||||
- elasticsearch_jvm_mem_heap_max_in_bytes value=1038876672
|
|
||||||
- elasticsearch_jvm_threads_peak_count value=45
|
|
||||||
- elasticsearch_jvm_threads_count value=44
|
|
||||||
- elasticsearch_jvm_gc_collectors_young_collection_count value=2
|
|
||||||
- elasticsearch_jvm_gc_collectors_young_collection_time_in_millis value=98
|
|
||||||
- elasticsearch_jvm_gc_collectors_old_collection_count value=1
|
|
||||||
- elasticsearch_jvm_gc_collectors_old_collection_time_in_millis value=24
|
|
||||||
- elasticsearch_jvm_buffer_pools_direct_count value=40
|
|
||||||
- elasticsearch_jvm_buffer_pools_direct_used_in_bytes value=6304239
|
|
||||||
- elasticsearch_jvm_buffer_pools_direct_total_capacity_in_bytes value=6304239
|
|
||||||
- elasticsearch_jvm_buffer_pools_mapped_count value=0
|
|
||||||
- elasticsearch_jvm_buffer_pools_mapped_used_in_bytes value=0
|
|
||||||
- elasticsearch_jvm_buffer_pools_mapped_total_capacity_in_bytes value=0
|
|
||||||
|
|
||||||
TCP information measurement names:
|
|
||||||
- elasticsearch_network_tcp_in_errs value=0
|
|
||||||
- elasticsearch_network_tcp_passive_opens value=16
|
|
||||||
- elasticsearch_network_tcp_curr_estab value=29
|
|
||||||
- elasticsearch_network_tcp_in_segs value=113
|
|
||||||
- elasticsearch_network_tcp_out_segs value=97
|
|
||||||
- elasticsearch_network_tcp_retrans_segs value=0
|
|
||||||
- elasticsearch_network_tcp_attempt_fails value=0
|
|
||||||
- elasticsearch_network_tcp_active_opens value=13
|
|
||||||
- elasticsearch_network_tcp_estab_resets value=0
|
|
||||||
- elasticsearch_network_tcp_out_rsts value=0
|
|
||||||
|
|
||||||
Operating system stats, load average, cpu, mem, swap measurement names:
|
|
||||||
- elasticsearch_os_swap_used_in_bytes value=0
|
|
||||||
- elasticsearch_os_swap_free_in_bytes value=487997440
|
|
||||||
- elasticsearch_os_timestamp value=1436460392944
|
|
||||||
- elasticsearch_os_uptime_in_millis value=25092
|
|
||||||
- elasticsearch_os_cpu_sys value=0
|
|
||||||
- elasticsearch_os_cpu_user value=0
|
|
||||||
- elasticsearch_os_cpu_idle value=99
|
|
||||||
- elasticsearch_os_cpu_usage value=0
|
|
||||||
- elasticsearch_os_cpu_stolen value=0
|
|
||||||
- elasticsearch_os_mem_free_percent value=74
|
|
||||||
- elasticsearch_os_mem_used_percent value=25
|
|
||||||
- elasticsearch_os_mem_actual_free_in_bytes value=1565470720
|
|
||||||
- elasticsearch_os_mem_actual_used_in_bytes value=534159360
|
|
||||||
- elasticsearch_os_mem_free_in_bytes value=477761536
|
|
||||||
- elasticsearch_os_mem_used_in_bytes value=1621868544
|
|
||||||
|
|
||||||
Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
|
|
||||||
- elasticsearch_process_mem_resident_in_bytes value=246382592
|
|
||||||
- elasticsearch_process_mem_share_in_bytes value=18747392
|
|
||||||
- elasticsearch_process_mem_total_virtual_in_bytes value=4747890688
|
|
||||||
- elasticsearch_process_timestamp value=1436460392945
|
|
||||||
- elasticsearch_process_open_file_descriptors value=160
|
|
||||||
- elasticsearch_process_cpu_total_in_millis value=15480
|
|
||||||
- elasticsearch_process_cpu_percent value=2
|
|
||||||
- elasticsearch_process_cpu_sys_in_millis value=1870
|
|
||||||
- elasticsearch_process_cpu_user_in_millis value=13610
|
|
||||||
|
|
||||||
Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
|
|
||||||
- elasticsearch_thread_pool_merge_threads value=6
|
|
||||||
- elasticsearch_thread_pool_merge_queue value=4
|
|
||||||
- elasticsearch_thread_pool_merge_active value=5
|
|
||||||
- elasticsearch_thread_pool_merge_rejected value=2
|
|
||||||
- elasticsearch_thread_pool_merge_largest value=5
|
|
||||||
- elasticsearch_thread_pool_merge_completed value=1
|
|
||||||
- elasticsearch_thread_pool_bulk_threads value=4
|
|
||||||
- elasticsearch_thread_pool_bulk_queue value=5
|
|
||||||
- elasticsearch_thread_pool_bulk_active value=7
|
|
||||||
- elasticsearch_thread_pool_bulk_rejected value=3
|
|
||||||
- elasticsearch_thread_pool_bulk_largest value=1
|
|
||||||
- elasticsearch_thread_pool_bulk_completed value=4
|
|
||||||
- elasticsearch_thread_pool_warmer_threads value=2
|
|
||||||
- elasticsearch_thread_pool_warmer_queue value=7
|
|
||||||
- elasticsearch_thread_pool_warmer_active value=3
|
|
||||||
- elasticsearch_thread_pool_warmer_rejected value=2
|
|
||||||
- elasticsearch_thread_pool_warmer_largest value=3
|
|
||||||
- elasticsearch_thread_pool_warmer_completed value=1
|
|
||||||
- elasticsearch_thread_pool_get_largest value=2
|
|
||||||
- elasticsearch_thread_pool_get_completed value=1
|
|
||||||
- elasticsearch_thread_pool_get_threads value=1
|
|
||||||
- elasticsearch_thread_pool_get_queue value=8
|
|
||||||
- elasticsearch_thread_pool_get_active value=4
|
|
||||||
- elasticsearch_thread_pool_get_rejected value=3
|
|
||||||
- elasticsearch_thread_pool_index_threads value=6
|
|
||||||
- elasticsearch_thread_pool_index_queue value=8
|
|
||||||
- elasticsearch_thread_pool_index_active value=4
|
|
||||||
- elasticsearch_thread_pool_index_rejected value=2
|
|
||||||
- elasticsearch_thread_pool_index_largest value=3
|
|
||||||
- elasticsearch_thread_pool_index_completed value=6
|
|
||||||
- elasticsearch_thread_pool_suggest_threads value=2
|
|
||||||
- elasticsearch_thread_pool_suggest_queue value=7
|
|
||||||
- elasticsearch_thread_pool_suggest_active value=2
|
|
||||||
- elasticsearch_thread_pool_suggest_rejected value=1
|
|
||||||
- elasticsearch_thread_pool_suggest_largest value=8
|
|
||||||
- elasticsearch_thread_pool_suggest_completed value=3
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_queue value=7
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_active value=4
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_rejected value=2
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_largest value=4
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_completed value=1
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_store_threads value=1
|
|
||||||
- elasticsearch_thread_pool_management_threads value=2
|
|
||||||
- elasticsearch_thread_pool_management_queue value=3
|
|
||||||
- elasticsearch_thread_pool_management_active value=1
|
|
||||||
- elasticsearch_thread_pool_management_rejected value=6
|
|
||||||
- elasticsearch_thread_pool_management_largest value=2
|
|
||||||
- elasticsearch_thread_pool_management_completed value=22
|
|
||||||
- elasticsearch_thread_pool_percolate_queue value=23
|
|
||||||
- elasticsearch_thread_pool_percolate_active value=13
|
|
||||||
- elasticsearch_thread_pool_percolate_rejected value=235
|
|
||||||
- elasticsearch_thread_pool_percolate_largest value=23
|
|
||||||
- elasticsearch_thread_pool_percolate_completed value=33
|
|
||||||
- elasticsearch_thread_pool_percolate_threads value=123
|
|
||||||
- elasticsearch_thread_pool_listener_active value=4
|
|
||||||
- elasticsearch_thread_pool_listener_rejected value=8
|
|
||||||
- elasticsearch_thread_pool_listener_largest value=1
|
|
||||||
- elasticsearch_thread_pool_listener_completed value=1
|
|
||||||
- elasticsearch_thread_pool_listener_threads value=1
|
|
||||||
- elasticsearch_thread_pool_listener_queue value=2
|
|
||||||
- elasticsearch_thread_pool_search_rejected value=7
|
|
||||||
- elasticsearch_thread_pool_search_largest value=2
|
|
||||||
- elasticsearch_thread_pool_search_completed value=4
|
|
||||||
- elasticsearch_thread_pool_search_threads value=5
|
|
||||||
- elasticsearch_thread_pool_search_queue value=7
|
|
||||||
- elasticsearch_thread_pool_search_active value=2
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_threads value=3
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_queue value=1
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_active value=5
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_rejected value=6
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_largest value=4
|
|
||||||
- elasticsearch_thread_pool_fetch_shard_started_completed value=54
|
|
||||||
- elasticsearch_thread_pool_refresh_rejected value=4
|
|
||||||
- elasticsearch_thread_pool_refresh_largest value=8
|
|
||||||
- elasticsearch_thread_pool_refresh_completed value=3
|
|
||||||
- elasticsearch_thread_pool_refresh_threads value=23
|
|
||||||
- elasticsearch_thread_pool_refresh_queue value=7
|
|
||||||
- elasticsearch_thread_pool_refresh_active value=3
|
|
||||||
- elasticsearch_thread_pool_optimize_threads value=3
|
|
||||||
- elasticsearch_thread_pool_optimize_queue value=4
|
|
||||||
- elasticsearch_thread_pool_optimize_active value=1
|
|
||||||
- elasticsearch_thread_pool_optimize_rejected value=2
|
|
||||||
- elasticsearch_thread_pool_optimize_largest value=7
|
|
||||||
- elasticsearch_thread_pool_optimize_completed value=3
|
|
||||||
- elasticsearch_thread_pool_snapshot_largest value=1
|
|
||||||
- elasticsearch_thread_pool_snapshot_completed value=0
|
|
||||||
- elasticsearch_thread_pool_snapshot_threads value=8
|
|
||||||
- elasticsearch_thread_pool_snapshot_queue value=5
|
|
||||||
- elasticsearch_thread_pool_snapshot_active value=6
|
|
||||||
- elasticsearch_thread_pool_snapshot_rejected value=2
|
|
||||||
- elasticsearch_thread_pool_generic_threads value=1
|
|
||||||
- elasticsearch_thread_pool_generic_queue value=4
|
|
||||||
- elasticsearch_thread_pool_generic_active value=6
|
|
||||||
- elasticsearch_thread_pool_generic_rejected value=3
|
|
||||||
- elasticsearch_thread_pool_generic_largest value=2
|
|
||||||
- elasticsearch_thread_pool_generic_completed value=27
|
|
||||||
- elasticsearch_thread_pool_flush_threads value=3
|
|
||||||
- elasticsearch_thread_pool_flush_queue value=8
|
|
||||||
- elasticsearch_thread_pool_flush_active value=0
|
|
||||||
- elasticsearch_thread_pool_flush_rejected value=1
|
|
||||||
- elasticsearch_thread_pool_flush_largest value=5
|
|
||||||
- elasticsearch_thread_pool_flush_completed value=3
|
|
||||||
|
|
||||||
Transport statistics about sent and received bytes in cluster communication measurement names:
|
|
||||||
- elasticsearch_transport_server_open value=13
|
|
||||||
- elasticsearch_transport_rx_count value=6
|
|
||||||
- elasticsearch_transport_rx_size_in_bytes value=1380
|
|
||||||
- elasticsearch_transport_tx_count value=6
|
|
||||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
|
||||||
@@ -1,759 +0,0 @@
|
|||||||
package elasticsearch
|
|
||||||
|
|
||||||
const clusterResponse = `
|
|
||||||
{
|
|
||||||
"cluster_name": "elasticsearch_telegraf",
|
|
||||||
"status": "green",
|
|
||||||
"timed_out": false,
|
|
||||||
"number_of_nodes": 3,
|
|
||||||
"number_of_data_nodes": 3,
|
|
||||||
"active_primary_shards": 5,
|
|
||||||
"active_shards": 15,
|
|
||||||
"relocating_shards": 0,
|
|
||||||
"initializing_shards": 0,
|
|
||||||
"unassigned_shards": 0,
|
|
||||||
"indices": {
|
|
||||||
"v1": {
|
|
||||||
"status": "green",
|
|
||||||
"number_of_shards": 10,
|
|
||||||
"number_of_replicas": 1,
|
|
||||||
"active_primary_shards": 10,
|
|
||||||
"active_shards": 20,
|
|
||||||
"relocating_shards": 0,
|
|
||||||
"initializing_shards": 0,
|
|
||||||
"unassigned_shards": 0
|
|
||||||
},
|
|
||||||
"v2": {
|
|
||||||
"status": "red",
|
|
||||||
"number_of_shards": 10,
|
|
||||||
"number_of_replicas": 1,
|
|
||||||
"active_primary_shards": 0,
|
|
||||||
"active_shards": 0,
|
|
||||||
"relocating_shards": 0,
|
|
||||||
"initializing_shards": 0,
|
|
||||||
"unassigned_shards": 20
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`
|
|
||||||
|
|
||||||
var clusterHealthExpected = map[string]interface{}{
|
|
||||||
"status": "green",
|
|
||||||
"timed_out": false,
|
|
||||||
"number_of_nodes": 3,
|
|
||||||
"number_of_data_nodes": 3,
|
|
||||||
"active_primary_shards": 5,
|
|
||||||
"active_shards": 15,
|
|
||||||
"relocating_shards": 0,
|
|
||||||
"initializing_shards": 0,
|
|
||||||
"unassigned_shards": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
var v1IndexExpected = map[string]interface{}{
|
|
||||||
"status": "green",
|
|
||||||
"number_of_shards": 10,
|
|
||||||
"number_of_replicas": 1,
|
|
||||||
"active_primary_shards": 10,
|
|
||||||
"active_shards": 20,
|
|
||||||
"relocating_shards": 0,
|
|
||||||
"initializing_shards": 0,
|
|
||||||
"unassigned_shards": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
var v2IndexExpected = map[string]interface{}{
|
|
||||||
"status": "red",
|
|
||||||
"number_of_shards": 10,
|
|
||||||
"number_of_replicas": 1,
|
|
||||||
"active_primary_shards": 0,
|
|
||||||
"active_shards": 0,
|
|
||||||
"relocating_shards": 0,
|
|
||||||
"initializing_shards": 0,
|
|
||||||
"unassigned_shards": 20,
|
|
||||||
}
|
|
||||||
|
|
||||||
const statsResponse = `
|
|
||||||
{
|
|
||||||
"cluster_name": "es-testcluster",
|
|
||||||
"nodes": {
|
|
||||||
"SDFsfSDFsdfFSDSDfSFDSDF": {
|
|
||||||
"timestamp": 1436365550135,
|
|
||||||
"name": "test.host.com",
|
|
||||||
"transport_address": "inet[/127.0.0.1:9300]",
|
|
||||||
"host": "test",
|
|
||||||
"ip": [
|
|
||||||
"inet[/127.0.0.1:9300]",
|
|
||||||
"NONE"
|
|
||||||
],
|
|
||||||
"attributes": {
|
|
||||||
"master": "true"
|
|
||||||
},
|
|
||||||
"indices": {
|
|
||||||
"docs": {
|
|
||||||
"count": 29652,
|
|
||||||
"deleted": 5229
|
|
||||||
},
|
|
||||||
"store": {
|
|
||||||
"size_in_bytes": 37715234,
|
|
||||||
"throttle_time_in_millis": 215
|
|
||||||
},
|
|
||||||
"indexing": {
|
|
||||||
"index_total": 84790,
|
|
||||||
"index_time_in_millis": 29680,
|
|
||||||
"index_current": 0,
|
|
||||||
"delete_total": 13879,
|
|
||||||
"delete_time_in_millis": 1139,
|
|
||||||
"delete_current": 0,
|
|
||||||
"noop_update_total": 0,
|
|
||||||
"is_throttled": false,
|
|
||||||
"throttle_time_in_millis": 0
|
|
||||||
},
|
|
||||||
"get": {
|
|
||||||
"total": 1,
|
|
||||||
"time_in_millis": 2,
|
|
||||||
"exists_total": 0,
|
|
||||||
"exists_time_in_millis": 0,
|
|
||||||
"missing_total": 1,
|
|
||||||
"missing_time_in_millis": 2,
|
|
||||||
"current": 0
|
|
||||||
},
|
|
||||||
"search": {
|
|
||||||
"open_contexts": 0,
|
|
||||||
"query_total": 1452,
|
|
||||||
"query_time_in_millis": 5695,
|
|
||||||
"query_current": 0,
|
|
||||||
"fetch_total": 414,
|
|
||||||
"fetch_time_in_millis": 146,
|
|
||||||
"fetch_current": 0
|
|
||||||
},
|
|
||||||
"merges": {
|
|
||||||
"current": 0,
|
|
||||||
"current_docs": 0,
|
|
||||||
"current_size_in_bytes": 0,
|
|
||||||
"total": 133,
|
|
||||||
"total_time_in_millis": 21060,
|
|
||||||
"total_docs": 203672,
|
|
||||||
"total_size_in_bytes": 142900226
|
|
||||||
},
|
|
||||||
"refresh": {
|
|
||||||
"total": 1076,
|
|
||||||
"total_time_in_millis": 20078
|
|
||||||
},
|
|
||||||
"flush": {
|
|
||||||
"total": 115,
|
|
||||||
"total_time_in_millis": 2401
|
|
||||||
},
|
|
||||||
"warmer": {
|
|
||||||
"current": 0,
|
|
||||||
"total": 2319,
|
|
||||||
"total_time_in_millis": 448
|
|
||||||
},
|
|
||||||
"filter_cache": {
|
|
||||||
"memory_size_in_bytes": 7384,
|
|
||||||
"evictions": 0
|
|
||||||
},
|
|
||||||
"id_cache": {
|
|
||||||
"memory_size_in_bytes": 0
|
|
||||||
},
|
|
||||||
"fielddata": {
|
|
||||||
"memory_size_in_bytes": 12996,
|
|
||||||
"evictions": 0
|
|
||||||
},
|
|
||||||
"percolate": {
|
|
||||||
"total": 0,
|
|
||||||
"time_in_millis": 0,
|
|
||||||
"current": 0,
|
|
||||||
"memory_size_in_bytes": -1,
|
|
||||||
"memory_size": "-1b",
|
|
||||||
"queries": 0
|
|
||||||
},
|
|
||||||
"completion": {
|
|
||||||
"size_in_bytes": 0
|
|
||||||
},
|
|
||||||
"segments": {
|
|
||||||
"count": 134,
|
|
||||||
"memory_in_bytes": 1285212,
|
|
||||||
"index_writer_memory_in_bytes": 0,
|
|
||||||
"index_writer_max_memory_in_bytes": 172368955,
|
|
||||||
"version_map_memory_in_bytes": 611844,
|
|
||||||
"fixed_bit_set_memory_in_bytes": 0
|
|
||||||
},
|
|
||||||
"translog": {
|
|
||||||
"operations": 17702,
|
|
||||||
"size_in_bytes": 17
|
|
||||||
},
|
|
||||||
"suggest": {
|
|
||||||
"total": 0,
|
|
||||||
"time_in_millis": 0,
|
|
||||||
"current": 0
|
|
||||||
},
|
|
||||||
"query_cache": {
|
|
||||||
"memory_size_in_bytes": 0,
|
|
||||||
"evictions": 0,
|
|
||||||
"hit_count": 0,
|
|
||||||
"miss_count": 0
|
|
||||||
},
|
|
||||||
"recovery": {
|
|
||||||
"current_as_source": 0,
|
|
||||||
"current_as_target": 0,
|
|
||||||
"throttle_time_in_millis": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"os": {
|
|
||||||
"timestamp": 1436460392944,
|
|
||||||
"load_average": [
|
|
||||||
0.01,
|
|
||||||
0.04,
|
|
||||||
0.05
|
|
||||||
],
|
|
||||||
"mem": {
|
|
||||||
"free_in_bytes": 477761536,
|
|
||||||
"used_in_bytes": 1621868544,
|
|
||||||
"free_percent": 74,
|
|
||||||
"used_percent": 25,
|
|
||||||
"actual_free_in_bytes": 1565470720,
|
|
||||||
"actual_used_in_bytes": 534159360
|
|
||||||
},
|
|
||||||
"swap": {
|
|
||||||
"used_in_bytes": 0,
|
|
||||||
"free_in_bytes": 487997440
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"process": {
|
|
||||||
"timestamp": 1436460392945,
|
|
||||||
"open_file_descriptors": 160,
|
|
||||||
"cpu": {
|
|
||||||
"percent": 2,
|
|
||||||
"sys_in_millis": 1870,
|
|
||||||
"user_in_millis": 13610,
|
|
||||||
"total_in_millis": 15480
|
|
||||||
},
|
|
||||||
"mem": {
|
|
||||||
"total_virtual_in_bytes": 4747890688
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"jvm": {
|
|
||||||
"timestamp": 1436460392945,
|
|
||||||
"uptime_in_millis": 202245,
|
|
||||||
"mem": {
|
|
||||||
"heap_used_in_bytes": 52709568,
|
|
||||||
"heap_used_percent": 5,
|
|
||||||
"heap_committed_in_bytes": 259522560,
|
|
||||||
"heap_max_in_bytes": 1038876672,
|
|
||||||
"non_heap_used_in_bytes": 39634576,
|
|
||||||
"non_heap_committed_in_bytes": 40841216,
|
|
||||||
"pools": {
|
|
||||||
"young": {
|
|
||||||
"used_in_bytes": 32685760,
|
|
||||||
"max_in_bytes": 279183360,
|
|
||||||
"peak_used_in_bytes": 71630848,
|
|
||||||
"peak_max_in_bytes": 279183360
|
|
||||||
},
|
|
||||||
"survivor": {
|
|
||||||
"used_in_bytes": 8912880,
|
|
||||||
"max_in_bytes": 34865152,
|
|
||||||
"peak_used_in_bytes": 8912888,
|
|
||||||
"peak_max_in_bytes": 34865152
|
|
||||||
},
|
|
||||||
"old": {
|
|
||||||
"used_in_bytes": 11110928,
|
|
||||||
"max_in_bytes": 724828160,
|
|
||||||
"peak_used_in_bytes": 14354608,
|
|
||||||
"peak_max_in_bytes": 724828160
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"threads": {
|
|
||||||
"count": 44,
|
|
||||||
"peak_count": 45
|
|
||||||
},
|
|
||||||
"gc": {
|
|
||||||
"collectors": {
|
|
||||||
"young": {
|
|
||||||
"collection_count": 2,
|
|
||||||
"collection_time_in_millis": 98
|
|
||||||
},
|
|
||||||
"old": {
|
|
||||||
"collection_count": 1,
|
|
||||||
"collection_time_in_millis": 24
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"buffer_pools": {
|
|
||||||
"direct": {
|
|
||||||
"count": 40,
|
|
||||||
"used_in_bytes": 6304239,
|
|
||||||
"total_capacity_in_bytes": 6304239
|
|
||||||
},
|
|
||||||
"mapped": {
|
|
||||||
"count": 0,
|
|
||||||
"used_in_bytes": 0,
|
|
||||||
"total_capacity_in_bytes": 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"thread_pool": {
|
|
||||||
"percolate": {
|
|
||||||
"threads": 123,
|
|
||||||
"queue": 23,
|
|
||||||
"active": 13,
|
|
||||||
"rejected": 235,
|
|
||||||
"largest": 23,
|
|
||||||
"completed": 33
|
|
||||||
},
|
|
||||||
"fetch_shard_started": {
|
|
||||||
"threads": 3,
|
|
||||||
"queue": 1,
|
|
||||||
"active": 5,
|
|
||||||
"rejected": 6,
|
|
||||||
"largest": 4,
|
|
||||||
"completed": 54
|
|
||||||
},
|
|
||||||
"listener": {
|
|
||||||
"threads": 1,
|
|
||||||
"queue": 2,
|
|
||||||
"active": 4,
|
|
||||||
"rejected": 8,
|
|
||||||
"largest": 1,
|
|
||||||
"completed": 1
|
|
||||||
},
|
|
||||||
"index": {
|
|
||||||
"threads": 6,
|
|
||||||
"queue": 8,
|
|
||||||
"active": 4,
|
|
||||||
"rejected": 2,
|
|
||||||
"largest": 3,
|
|
||||||
"completed": 6
|
|
||||||
},
|
|
||||||
"refresh": {
|
|
||||||
"threads": 23,
|
|
||||||
"queue": 7,
|
|
||||||
"active": 3,
|
|
||||||
"rejected": 4,
|
|
||||||
"largest": 8,
|
|
||||||
"completed": 3
|
|
||||||
},
|
|
||||||
"suggest": {
|
|
||||||
"threads": 2,
|
|
||||||
"queue": 7,
|
|
||||||
"active": 2,
|
|
||||||
"rejected": 1,
|
|
||||||
"largest": 8,
|
|
||||||
"completed": 3
|
|
||||||
},
|
|
||||||
"generic": {
|
|
||||||
"threads": 1,
|
|
||||||
"queue": 4,
|
|
||||||
"active": 6,
|
|
||||||
"rejected": 3,
|
|
||||||
"largest": 2,
|
|
||||||
"completed": 27
|
|
||||||
},
|
|
||||||
"warmer": {
|
|
||||||
"threads": 2,
|
|
||||||
"queue": 7,
|
|
||||||
"active": 3,
|
|
||||||
"rejected": 2,
|
|
||||||
"largest": 3,
|
|
||||||
"completed": 1
|
|
||||||
},
|
|
||||||
"search": {
|
|
||||||
"threads": 5,
|
|
||||||
"queue": 7,
|
|
||||||
"active": 2,
|
|
||||||
"rejected": 7,
|
|
||||||
"largest": 2,
|
|
||||||
"completed": 4
|
|
||||||
},
|
|
||||||
"flush": {
|
|
||||||
"threads": 3,
|
|
||||||
"queue": 8,
|
|
||||||
"active": 0,
|
|
||||||
"rejected": 1,
|
|
||||||
"largest": 5,
|
|
||||||
"completed": 3
|
|
||||||
},
|
|
||||||
"optimize": {
|
|
||||||
"threads": 3,
|
|
||||||
"queue": 4,
|
|
||||||
"active": 1,
|
|
||||||
"rejected": 2,
|
|
||||||
"largest": 7,
|
|
||||||
"completed": 3
|
|
||||||
},
|
|
||||||
"fetch_shard_store": {
|
|
||||||
"threads": 1,
|
|
||||||
"queue": 7,
|
|
||||||
"active": 4,
|
|
||||||
"rejected": 2,
|
|
||||||
"largest": 4,
|
|
||||||
"completed": 1
|
|
||||||
},
|
|
||||||
"management": {
|
|
||||||
"threads": 2,
|
|
||||||
"queue": 3,
|
|
||||||
"active": 1,
|
|
||||||
"rejected": 6,
|
|
||||||
"largest": 2,
|
|
||||||
"completed": 22
|
|
||||||
},
|
|
||||||
"get": {
|
|
||||||
"threads": 1,
|
|
||||||
"queue": 8,
|
|
||||||
"active": 4,
|
|
||||||
"rejected": 3,
|
|
||||||
"largest": 2,
|
|
||||||
"completed": 1
|
|
||||||
},
|
|
||||||
"merge": {
|
|
||||||
"threads": 6,
|
|
||||||
"queue": 4,
|
|
||||||
"active": 5,
|
|
||||||
"rejected": 2,
|
|
||||||
"largest": 5,
|
|
||||||
"completed": 1
|
|
||||||
},
|
|
||||||
"bulk": {
|
|
||||||
"threads": 4,
|
|
||||||
"queue": 5,
|
|
||||||
"active": 7,
|
|
||||||
"rejected": 3,
|
|
||||||
"largest": 1,
|
|
||||||
"completed": 4
|
|
||||||
},
|
|
||||||
"snapshot": {
|
|
||||||
"threads": 8,
|
|
||||||
"queue": 5,
|
|
||||||
"active": 6,
|
|
||||||
"rejected": 2,
|
|
||||||
"largest": 1,
|
|
||||||
"completed": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"fs": {
|
|
||||||
"timestamp": 1436460392946,
|
|
||||||
"total": {
|
|
||||||
"total_in_bytes": 19507089408,
|
|
||||||
"free_in_bytes": 16909316096,
|
|
||||||
"available_in_bytes": 15894814720
|
|
||||||
},
|
|
||||||
"data": [
|
|
||||||
{
|
|
||||||
"path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
|
|
||||||
"mount": "/usr/share/elasticsearch/data",
|
|
||||||
"type": "ext4",
|
|
||||||
"total_in_bytes": 19507089408,
|
|
||||||
"free_in_bytes": 16909316096,
|
|
||||||
"available_in_bytes": 15894814720
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"transport": {
|
|
||||||
"server_open": 13,
|
|
||||||
"rx_count": 6,
|
|
||||||
"rx_size_in_bytes": 1380,
|
|
||||||
"tx_count": 6,
|
|
||||||
"tx_size_in_bytes": 1380
|
|
||||||
},
|
|
||||||
"http": {
|
|
||||||
"current_open": 3,
|
|
||||||
"total_opened": 3
|
|
||||||
},
|
|
||||||
"breakers": {
|
|
||||||
"fielddata": {
|
|
||||||
"limit_size_in_bytes": 623326003,
|
|
||||||
"limit_size": "594.4mb",
|
|
||||||
"estimated_size_in_bytes": 0,
|
|
||||||
"estimated_size": "0b",
|
|
||||||
"overhead": 1.03,
|
|
||||||
"tripped": 0
|
|
||||||
},
|
|
||||||
"request": {
|
|
||||||
"limit_size_in_bytes": 415550668,
|
|
||||||
"limit_size": "396.2mb",
|
|
||||||
"estimated_size_in_bytes": 0,
|
|
||||||
"estimated_size": "0b",
|
|
||||||
"overhead": 1.0,
|
|
||||||
"tripped": 0
|
|
||||||
},
|
|
||||||
"parent": {
|
|
||||||
"limit_size_in_bytes": 727213670,
|
|
||||||
"limit_size": "693.5mb",
|
|
||||||
"estimated_size_in_bytes": 0,
|
|
||||||
"estimated_size": "0b",
|
|
||||||
"overhead": 1.0,
|
|
||||||
"tripped": 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`
|
|
||||||
|
|
||||||
var indicesExpected = map[string]float64{
|
|
||||||
"indices_id_cache_memory_size_in_bytes": 0,
|
|
||||||
"indices_completion_size_in_bytes": 0,
|
|
||||||
"indices_suggest_total": 0,
|
|
||||||
"indices_suggest_time_in_millis": 0,
|
|
||||||
"indices_suggest_current": 0,
|
|
||||||
"indices_query_cache_memory_size_in_bytes": 0,
|
|
||||||
"indices_query_cache_evictions": 0,
|
|
||||||
"indices_query_cache_hit_count": 0,
|
|
||||||
"indices_query_cache_miss_count": 0,
|
|
||||||
"indices_store_size_in_bytes": 37715234,
|
|
||||||
"indices_store_throttle_time_in_millis": 215,
|
|
||||||
"indices_merges_current_docs": 0,
|
|
||||||
"indices_merges_current_size_in_bytes": 0,
|
|
||||||
"indices_merges_total": 133,
|
|
||||||
"indices_merges_total_time_in_millis": 21060,
|
|
||||||
"indices_merges_total_docs": 203672,
|
|
||||||
"indices_merges_total_size_in_bytes": 142900226,
|
|
||||||
"indices_merges_current": 0,
|
|
||||||
"indices_filter_cache_memory_size_in_bytes": 7384,
|
|
||||||
"indices_filter_cache_evictions": 0,
|
|
||||||
"indices_indexing_index_total": 84790,
|
|
||||||
"indices_indexing_index_time_in_millis": 29680,
|
|
||||||
"indices_indexing_index_current": 0,
|
|
||||||
"indices_indexing_noop_update_total": 0,
|
|
||||||
"indices_indexing_throttle_time_in_millis": 0,
|
|
||||||
"indices_indexing_delete_total": 13879,
|
|
||||||
"indices_indexing_delete_time_in_millis": 1139,
|
|
||||||
"indices_indexing_delete_current": 0,
|
|
||||||
"indices_get_exists_time_in_millis": 0,
|
|
||||||
"indices_get_missing_total": 1,
|
|
||||||
"indices_get_missing_time_in_millis": 2,
|
|
||||||
"indices_get_current": 0,
|
|
||||||
"indices_get_total": 1,
|
|
||||||
"indices_get_time_in_millis": 2,
|
|
||||||
"indices_get_exists_total": 0,
|
|
||||||
"indices_refresh_total": 1076,
|
|
||||||
"indices_refresh_total_time_in_millis": 20078,
|
|
||||||
"indices_percolate_current": 0,
|
|
||||||
"indices_percolate_memory_size_in_bytes": -1,
|
|
||||||
"indices_percolate_queries": 0,
|
|
||||||
"indices_percolate_total": 0,
|
|
||||||
"indices_percolate_time_in_millis": 0,
|
|
||||||
"indices_translog_operations": 17702,
|
|
||||||
"indices_translog_size_in_bytes": 17,
|
|
||||||
"indices_recovery_current_as_source": 0,
|
|
||||||
"indices_recovery_current_as_target": 0,
|
|
||||||
"indices_recovery_throttle_time_in_millis": 0,
|
|
||||||
"indices_docs_count": 29652,
|
|
||||||
"indices_docs_deleted": 5229,
|
|
||||||
"indices_flush_total_time_in_millis": 2401,
|
|
||||||
"indices_flush_total": 115,
|
|
||||||
"indices_fielddata_memory_size_in_bytes": 12996,
|
|
||||||
"indices_fielddata_evictions": 0,
|
|
||||||
"indices_search_fetch_current": 0,
|
|
||||||
"indices_search_open_contexts": 0,
|
|
||||||
"indices_search_query_total": 1452,
|
|
||||||
"indices_search_query_time_in_millis": 5695,
|
|
||||||
"indices_search_query_current": 0,
|
|
||||||
"indices_search_fetch_total": 414,
|
|
||||||
"indices_search_fetch_time_in_millis": 146,
|
|
||||||
"indices_warmer_current": 0,
|
|
||||||
"indices_warmer_total": 2319,
|
|
||||||
"indices_warmer_total_time_in_millis": 448,
|
|
||||||
"indices_segments_count": 134,
|
|
||||||
"indices_segments_memory_in_bytes": 1285212,
|
|
||||||
"indices_segments_index_writer_memory_in_bytes": 0,
|
|
||||||
"indices_segments_index_writer_max_memory_in_bytes": 172368955,
|
|
||||||
"indices_segments_version_map_memory_in_bytes": 611844,
|
|
||||||
"indices_segments_fixed_bit_set_memory_in_bytes": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
var osExpected = map[string]float64{
|
|
||||||
"os_swap_used_in_bytes": 0,
|
|
||||||
"os_swap_free_in_bytes": 487997440,
|
|
||||||
"os_timestamp": 1436460392944,
|
|
||||||
"os_mem_free_percent": 74,
|
|
||||||
"os_mem_used_percent": 25,
|
|
||||||
"os_mem_actual_free_in_bytes": 1565470720,
|
|
||||||
"os_mem_actual_used_in_bytes": 534159360,
|
|
||||||
"os_mem_free_in_bytes": 477761536,
|
|
||||||
"os_mem_used_in_bytes": 1621868544,
|
|
||||||
}
|
|
||||||
|
|
||||||
var processExpected = map[string]float64{
|
|
||||||
"process_mem_total_virtual_in_bytes": 4747890688,
|
|
||||||
"process_timestamp": 1436460392945,
|
|
||||||
"process_open_file_descriptors": 160,
|
|
||||||
"process_cpu_total_in_millis": 15480,
|
|
||||||
"process_cpu_percent": 2,
|
|
||||||
"process_cpu_sys_in_millis": 1870,
|
|
||||||
"process_cpu_user_in_millis": 13610,
|
|
||||||
}
|
|
||||||
|
|
||||||
var jvmExpected = map[string]float64{
|
|
||||||
"jvm_timestamp": 1436460392945,
|
|
||||||
"jvm_uptime_in_millis": 202245,
|
|
||||||
"jvm_mem_non_heap_used_in_bytes": 39634576,
|
|
||||||
"jvm_mem_non_heap_committed_in_bytes": 40841216,
|
|
||||||
"jvm_mem_pools_young_max_in_bytes": 279183360,
|
|
||||||
"jvm_mem_pools_young_peak_used_in_bytes": 71630848,
|
|
||||||
"jvm_mem_pools_young_peak_max_in_bytes": 279183360,
|
|
||||||
"jvm_mem_pools_young_used_in_bytes": 32685760,
|
|
||||||
"jvm_mem_pools_survivor_peak_used_in_bytes": 8912888,
|
|
||||||
"jvm_mem_pools_survivor_peak_max_in_bytes": 34865152,
|
|
||||||
"jvm_mem_pools_survivor_used_in_bytes": 8912880,
|
|
||||||
"jvm_mem_pools_survivor_max_in_bytes": 34865152,
|
|
||||||
"jvm_mem_pools_old_peak_max_in_bytes": 724828160,
|
|
||||||
"jvm_mem_pools_old_used_in_bytes": 11110928,
|
|
||||||
"jvm_mem_pools_old_max_in_bytes": 724828160,
|
|
||||||
"jvm_mem_pools_old_peak_used_in_bytes": 14354608,
|
|
||||||
"jvm_mem_heap_used_in_bytes": 52709568,
|
|
||||||
"jvm_mem_heap_used_percent": 5,
|
|
||||||
"jvm_mem_heap_committed_in_bytes": 259522560,
|
|
||||||
"jvm_mem_heap_max_in_bytes": 1038876672,
|
|
||||||
"jvm_threads_peak_count": 45,
|
|
||||||
"jvm_threads_count": 44,
|
|
||||||
"jvm_gc_collectors_young_collection_count": 2,
|
|
||||||
"jvm_gc_collectors_young_collection_time_in_millis": 98,
|
|
||||||
"jvm_gc_collectors_old_collection_count": 1,
|
|
||||||
"jvm_gc_collectors_old_collection_time_in_millis": 24,
|
|
||||||
"jvm_buffer_pools_direct_count": 40,
|
|
||||||
"jvm_buffer_pools_direct_used_in_bytes": 6304239,
|
|
||||||
"jvm_buffer_pools_direct_total_capacity_in_bytes": 6304239,
|
|
||||||
"jvm_buffer_pools_mapped_count": 0,
|
|
||||||
"jvm_buffer_pools_mapped_used_in_bytes": 0,
|
|
||||||
"jvm_buffer_pools_mapped_total_capacity_in_bytes": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
var threadPoolExpected = map[string]float64{
|
|
||||||
"thread_pool_merge_threads": 6,
|
|
||||||
"thread_pool_merge_queue": 4,
|
|
||||||
"thread_pool_merge_active": 5,
|
|
||||||
"thread_pool_merge_rejected": 2,
|
|
||||||
"thread_pool_merge_largest": 5,
|
|
||||||
"thread_pool_merge_completed": 1,
|
|
||||||
"thread_pool_bulk_threads": 4,
|
|
||||||
"thread_pool_bulk_queue": 5,
|
|
||||||
"thread_pool_bulk_active": 7,
|
|
||||||
"thread_pool_bulk_rejected": 3,
|
|
||||||
"thread_pool_bulk_largest": 1,
|
|
||||||
"thread_pool_bulk_completed": 4,
|
|
||||||
"thread_pool_warmer_threads": 2,
|
|
||||||
"thread_pool_warmer_queue": 7,
|
|
||||||
"thread_pool_warmer_active": 3,
|
|
||||||
"thread_pool_warmer_rejected": 2,
|
|
||||||
"thread_pool_warmer_largest": 3,
|
|
||||||
"thread_pool_warmer_completed": 1,
|
|
||||||
"thread_pool_get_largest": 2,
|
|
||||||
"thread_pool_get_completed": 1,
|
|
||||||
"thread_pool_get_threads": 1,
|
|
||||||
"thread_pool_get_queue": 8,
|
|
||||||
"thread_pool_get_active": 4,
|
|
||||||
"thread_pool_get_rejected": 3,
|
|
||||||
"thread_pool_index_threads": 6,
|
|
||||||
"thread_pool_index_queue": 8,
|
|
||||||
"thread_pool_index_active": 4,
|
|
||||||
"thread_pool_index_rejected": 2,
|
|
||||||
"thread_pool_index_largest": 3,
|
|
||||||
"thread_pool_index_completed": 6,
|
|
||||||
"thread_pool_suggest_threads": 2,
|
|
||||||
"thread_pool_suggest_queue": 7,
|
|
||||||
"thread_pool_suggest_active": 2,
|
|
||||||
"thread_pool_suggest_rejected": 1,
|
|
||||||
"thread_pool_suggest_largest": 8,
|
|
||||||
"thread_pool_suggest_completed": 3,
|
|
||||||
"thread_pool_fetch_shard_store_queue": 7,
|
|
||||||
"thread_pool_fetch_shard_store_active": 4,
|
|
||||||
"thread_pool_fetch_shard_store_rejected": 2,
|
|
||||||
"thread_pool_fetch_shard_store_largest": 4,
|
|
||||||
"thread_pool_fetch_shard_store_completed": 1,
|
|
||||||
"thread_pool_fetch_shard_store_threads": 1,
|
|
||||||
"thread_pool_management_threads": 2,
|
|
||||||
"thread_pool_management_queue": 3,
|
|
||||||
"thread_pool_management_active": 1,
|
|
||||||
"thread_pool_management_rejected": 6,
|
|
||||||
"thread_pool_management_largest": 2,
|
|
||||||
"thread_pool_management_completed": 22,
|
|
||||||
"thread_pool_percolate_queue": 23,
|
|
||||||
"thread_pool_percolate_active": 13,
|
|
||||||
"thread_pool_percolate_rejected": 235,
|
|
||||||
"thread_pool_percolate_largest": 23,
|
|
||||||
"thread_pool_percolate_completed": 33,
|
|
||||||
"thread_pool_percolate_threads": 123,
|
|
||||||
"thread_pool_listener_active": 4,
|
|
||||||
"thread_pool_listener_rejected": 8,
|
|
||||||
"thread_pool_listener_largest": 1,
|
|
||||||
"thread_pool_listener_completed": 1,
|
|
||||||
"thread_pool_listener_threads": 1,
|
|
||||||
"thread_pool_listener_queue": 2,
|
|
||||||
"thread_pool_search_rejected": 7,
|
|
||||||
"thread_pool_search_largest": 2,
|
|
||||||
"thread_pool_search_completed": 4,
|
|
||||||
"thread_pool_search_threads": 5,
|
|
||||||
"thread_pool_search_queue": 7,
|
|
||||||
"thread_pool_search_active": 2,
|
|
||||||
"thread_pool_fetch_shard_started_threads": 3,
|
|
||||||
"thread_pool_fetch_shard_started_queue": 1,
|
|
||||||
"thread_pool_fetch_shard_started_active": 5,
|
|
||||||
"thread_pool_fetch_shard_started_rejected": 6,
|
|
||||||
"thread_pool_fetch_shard_started_largest": 4,
|
|
||||||
"thread_pool_fetch_shard_started_completed": 54,
|
|
||||||
"thread_pool_refresh_rejected": 4,
|
|
||||||
"thread_pool_refresh_largest": 8,
|
|
||||||
"thread_pool_refresh_completed": 3,
|
|
||||||
"thread_pool_refresh_threads": 23,
|
|
||||||
"thread_pool_refresh_queue": 7,
|
|
||||||
"thread_pool_refresh_active": 3,
|
|
||||||
"thread_pool_optimize_threads": 3,
|
|
||||||
"thread_pool_optimize_queue": 4,
|
|
||||||
"thread_pool_optimize_active": 1,
|
|
||||||
"thread_pool_optimize_rejected": 2,
|
|
||||||
"thread_pool_optimize_largest": 7,
|
|
||||||
"thread_pool_optimize_completed": 3,
|
|
||||||
"thread_pool_snapshot_largest": 1,
|
|
||||||
"thread_pool_snapshot_completed": 0,
|
|
||||||
"thread_pool_snapshot_threads": 8,
|
|
||||||
"thread_pool_snapshot_queue": 5,
|
|
||||||
"thread_pool_snapshot_active": 6,
|
|
||||||
"thread_pool_snapshot_rejected": 2,
|
|
||||||
"thread_pool_generic_threads": 1,
|
|
||||||
"thread_pool_generic_queue": 4,
|
|
||||||
"thread_pool_generic_active": 6,
|
|
||||||
"thread_pool_generic_rejected": 3,
|
|
||||||
"thread_pool_generic_largest": 2,
|
|
||||||
"thread_pool_generic_completed": 27,
|
|
||||||
"thread_pool_flush_threads": 3,
|
|
||||||
"thread_pool_flush_queue": 8,
|
|
||||||
"thread_pool_flush_active": 0,
|
|
||||||
"thread_pool_flush_rejected": 1,
|
|
||||||
"thread_pool_flush_largest": 5,
|
|
||||||
"thread_pool_flush_completed": 3,
|
|
||||||
}
|
|
||||||
|
|
||||||
var fsExpected = map[string]float64{
|
|
||||||
"fs_timestamp": 1436460392946,
|
|
||||||
"fs_total_free_in_bytes": 16909316096,
|
|
||||||
"fs_total_available_in_bytes": 15894814720,
|
|
||||||
"fs_total_total_in_bytes": 19507089408,
|
|
||||||
}
|
|
||||||
|
|
||||||
var transportExpected = map[string]float64{
|
|
||||||
"transport_server_open": 13,
|
|
||||||
"transport_rx_count": 6,
|
|
||||||
"transport_rx_size_in_bytes": 1380,
|
|
||||||
"transport_tx_count": 6,
|
|
||||||
"transport_tx_size_in_bytes": 1380,
|
|
||||||
}
|
|
||||||
|
|
||||||
var httpExpected = map[string]float64{
|
|
||||||
"http_current_open": 3,
|
|
||||||
"http_total_opened": 3,
|
|
||||||
}
|
|
||||||
|
|
||||||
var breakersExpected = map[string]float64{
|
|
||||||
"breakers_fielddata_estimated_size_in_bytes": 0,
|
|
||||||
"breakers_fielddata_overhead": 1.03,
|
|
||||||
"breakers_fielddata_tripped": 0,
|
|
||||||
"breakers_fielddata_limit_size_in_bytes": 623326003,
|
|
||||||
"breakers_request_estimated_size_in_bytes": 0,
|
|
||||||
"breakers_request_overhead": 1.0,
|
|
||||||
"breakers_request_tripped": 0,
|
|
||||||
"breakers_request_limit_size_in_bytes": 415550668,
|
|
||||||
"breakers_parent_overhead": 1.0,
|
|
||||||
"breakers_parent_tripped": 0,
|
|
||||||
"breakers_parent_limit_size_in_bytes": 727213670,
|
|
||||||
"breakers_parent_estimated_size_in_bytes": 0,
|
|
||||||
}
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
# Exec Plugin
|
|
||||||
|
|
||||||
The exec plugin can execute arbitrary commands which output JSON. Then it flattens JSON and finds
|
|
||||||
all numeric values, treating them as floats.
|
|
||||||
|
|
||||||
For example, if you have a json-returning command called mycollector, you could
|
|
||||||
setup the exec plugin with:
|
|
||||||
|
|
||||||
```
|
|
||||||
[[exec.commands]]
|
|
||||||
command = "/usr/bin/mycollector --output=json"
|
|
||||||
name = "mycollector"
|
|
||||||
interval = 10
|
|
||||||
```
|
|
||||||
|
|
||||||
The name is used as a prefix for the measurements.
|
|
||||||
|
|
||||||
The interval is used to determine how often a particular command should be run. Each
|
|
||||||
time the exec plugin runs, it will only run a particular command if it has been at least
|
|
||||||
`interval` seconds since the exec plugin last ran the command.
|
|
||||||
|
|
||||||
|
|
||||||
# Sample
|
|
||||||
|
|
||||||
Let's say that we have a command named "mycollector", which gives the following output:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"a": 0.5,
|
|
||||||
"b": {
|
|
||||||
"c": "some text",
|
|
||||||
"d": 0.1,
|
|
||||||
"e": 5
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The collected metrics will be:
|
|
||||||
```
|
|
||||||
exec_mycollector_a value=0.5
|
|
||||||
exec_mycollector_b_d value=0.1
|
|
||||||
exec_mycollector_b_e value=5
|
|
||||||
```
|
|
||||||
@@ -1,162 +0,0 @@
|
|||||||
package exec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/gonuts/go-shellquote"
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
|
||||||
"math"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const sampleConfig = `
|
|
||||||
# specify commands via an array of tables
|
|
||||||
[[plugins.exec.commands]]
|
|
||||||
# the command to run
|
|
||||||
command = "/usr/bin/mycollector --foo=bar"
|
|
||||||
|
|
||||||
# name of the command (used as a prefix for measurements)
|
|
||||||
name = "mycollector"
|
|
||||||
|
|
||||||
# Only run this command if it has been at least this many
|
|
||||||
# seconds since it last ran
|
|
||||||
interval = 10
|
|
||||||
`
|
|
||||||
|
|
||||||
type Exec struct {
|
|
||||||
Commands []*Command
|
|
||||||
runner Runner
|
|
||||||
clock Clock
|
|
||||||
}
|
|
||||||
|
|
||||||
type Command struct {
|
|
||||||
Command string
|
|
||||||
Name string
|
|
||||||
Interval int
|
|
||||||
lastRunAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type Runner interface {
|
|
||||||
Run(*Command) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Clock interface {
|
|
||||||
Now() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type CommandRunner struct{}
|
|
||||||
|
|
||||||
type RealClock struct{}
|
|
||||||
|
|
||||||
func (c CommandRunner) Run(command *Command) ([]byte, error) {
|
|
||||||
command.lastRunAt = time.Now()
|
|
||||||
split_cmd, err := shellquote.Split(command.Command)
|
|
||||||
if err != nil || len(split_cmd) == 0 {
|
|
||||||
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
|
||||||
var out bytes.Buffer
|
|
||||||
cmd.Stdout = &out
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command.Command)
|
|
||||||
}
|
|
||||||
|
|
||||||
return out.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c RealClock) Now() time.Time {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewExec() *Exec {
|
|
||||||
return &Exec{runner: CommandRunner{}, clock: RealClock{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Exec) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Exec) Description() string {
|
|
||||||
return "Read flattened metrics from one or more commands that output JSON to stdout"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Exec) Gather(acc plugins.Accumulator) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
errorChannel := make(chan error, len(e.Commands))
|
|
||||||
|
|
||||||
for _, c := range e.Commands {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(c *Command, acc plugins.Accumulator) {
|
|
||||||
defer wg.Done()
|
|
||||||
err := e.gatherCommand(c, acc)
|
|
||||||
if err != nil {
|
|
||||||
errorChannel <- err
|
|
||||||
}
|
|
||||||
}(c, acc)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(errorChannel)
|
|
||||||
|
|
||||||
// Get all errors and return them as one giant error
|
|
||||||
errorStrings := []string{}
|
|
||||||
for err := range errorChannel {
|
|
||||||
errorStrings = append(errorStrings, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errorStrings) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.New(strings.Join(errorStrings, "\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error {
|
|
||||||
secondsSinceLastRun := 0.0
|
|
||||||
|
|
||||||
if c.lastRunAt.Unix() == 0 { // means time is uninitialized
|
|
||||||
secondsSinceLastRun = math.Inf(1)
|
|
||||||
} else {
|
|
||||||
secondsSinceLastRun = (e.clock.Now().Sub(c.lastRunAt)).Seconds()
|
|
||||||
}
|
|
||||||
|
|
||||||
if secondsSinceLastRun >= float64(c.Interval) {
|
|
||||||
out, err := e.runner.Run(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var jsonOut interface{}
|
|
||||||
err = json.Unmarshal(out, &jsonOut)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", c.Command, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
processResponse(acc, c.Name, map[string]string{}, jsonOut)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) {
|
|
||||||
switch t := v.(type) {
|
|
||||||
case map[string]interface{}:
|
|
||||||
for k, v := range t {
|
|
||||||
processResponse(acc, prefix+"_"+k, tags, v)
|
|
||||||
}
|
|
||||||
case float64:
|
|
||||||
acc.Add(prefix, v, tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
plugins.Add("exec", func() plugins.Plugin {
|
|
||||||
return NewExec()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,262 +0,0 @@
|
|||||||
package exec
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/influxdb/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"math"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Midnight 9/22/2015
|
|
||||||
const baseTimeSeconds = 1442905200
|
|
||||||
|
|
||||||
const validJson = `
|
|
||||||
{
|
|
||||||
"status": "green",
|
|
||||||
"num_processes": 82,
|
|
||||||
"cpu": {
|
|
||||||
"status": "red",
|
|
||||||
"nil_status": null,
|
|
||||||
"used": 8234,
|
|
||||||
"free": 32
|
|
||||||
},
|
|
||||||
"percent": 0.81,
|
|
||||||
"users": [0, 1, 2, 3]
|
|
||||||
}`
|
|
||||||
|
|
||||||
const malformedJson = `
|
|
||||||
{
|
|
||||||
"status": "green",
|
|
||||||
`
|
|
||||||
|
|
||||||
type runnerMock struct {
|
|
||||||
out []byte
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type clockMock struct {
|
|
||||||
now time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRunnerMock(out []byte, err error) Runner {
|
|
||||||
return &runnerMock{
|
|
||||||
out: out,
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r runnerMock) Run(command *Command) ([]byte, error) {
|
|
||||||
if r.err != nil {
|
|
||||||
return nil, r.err
|
|
||||||
}
|
|
||||||
return r.out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newClockMock(now time.Time) Clock {
|
|
||||||
return &clockMock{now: now}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c clockMock) Now() time.Time {
|
|
||||||
return c.now
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExec(t *testing.T) {
|
|
||||||
runner := newRunnerMock([]byte(validJson), nil)
|
|
||||||
clock := newClockMock(time.Unix(baseTimeSeconds+20, 0))
|
|
||||||
command := Command{
|
|
||||||
Command: "testcommand arg1",
|
|
||||||
Name: "mycollector",
|
|
||||||
Interval: 10,
|
|
||||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &Exec{
|
|
||||||
runner: runner,
|
|
||||||
clock: clock,
|
|
||||||
Commands: []*Command{&command},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
initialPoints := len(acc.Points)
|
|
||||||
err := e.Gather(&acc)
|
|
||||||
deltaPoints := len(acc.Points) - initialPoints
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkFloat := []struct {
|
|
||||||
name string
|
|
||||||
value float64
|
|
||||||
}{
|
|
||||||
{"mycollector_num_processes", 82},
|
|
||||||
{"mycollector_cpu_used", 8234},
|
|
||||||
{"mycollector_cpu_free", 32},
|
|
||||||
{"mycollector_percent", 0.81},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range checkFloat {
|
|
||||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExecMalformed(t *testing.T) {
|
|
||||||
runner := newRunnerMock([]byte(malformedJson), nil)
|
|
||||||
clock := newClockMock(time.Unix(baseTimeSeconds+20, 0))
|
|
||||||
command := Command{
|
|
||||||
Command: "badcommand arg1",
|
|
||||||
Name: "mycollector",
|
|
||||||
Interval: 10,
|
|
||||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &Exec{
|
|
||||||
runner: runner,
|
|
||||||
clock: clock,
|
|
||||||
Commands: []*Command{&command},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
initialPoints := len(acc.Points)
|
|
||||||
err := e.Gather(&acc)
|
|
||||||
deltaPoints := len(acc.Points) - initialPoints
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, deltaPoints, 0, "No new points should have been added")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCommandError(t *testing.T) {
|
|
||||||
runner := newRunnerMock(nil, fmt.Errorf("exit status code 1"))
|
|
||||||
clock := newClockMock(time.Unix(baseTimeSeconds+20, 0))
|
|
||||||
command := Command{
|
|
||||||
Command: "badcommand",
|
|
||||||
Name: "mycollector",
|
|
||||||
Interval: 10,
|
|
||||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &Exec{
|
|
||||||
runner: runner,
|
|
||||||
clock: clock,
|
|
||||||
Commands: []*Command{&command},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
initialPoints := len(acc.Points)
|
|
||||||
err := e.Gather(&acc)
|
|
||||||
deltaPoints := len(acc.Points) - initialPoints
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, deltaPoints, 0, "No new points should have been added")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExecNotEnoughTime(t *testing.T) {
|
|
||||||
runner := newRunnerMock([]byte(validJson), nil)
|
|
||||||
clock := newClockMock(time.Unix(baseTimeSeconds+5, 0))
|
|
||||||
command := Command{
|
|
||||||
Command: "testcommand arg1",
|
|
||||||
Name: "mycollector",
|
|
||||||
Interval: 10,
|
|
||||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &Exec{
|
|
||||||
runner: runner,
|
|
||||||
clock: clock,
|
|
||||||
Commands: []*Command{&command},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
initialPoints := len(acc.Points)
|
|
||||||
err := e.Gather(&acc)
|
|
||||||
deltaPoints := len(acc.Points) - initialPoints
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, deltaPoints, 0, "No new points should have been added")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExecUninitializedLastRunAt(t *testing.T) {
|
|
||||||
runner := newRunnerMock([]byte(validJson), nil)
|
|
||||||
clock := newClockMock(time.Unix(baseTimeSeconds, 0))
|
|
||||||
command := Command{
|
|
||||||
Command: "testcommand arg1",
|
|
||||||
Name: "mycollector",
|
|
||||||
Interval: math.MaxInt32,
|
|
||||||
// Uninitialized lastRunAt should default to time.Unix(0, 0), so this should
|
|
||||||
// run no matter what the interval is
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &Exec{
|
|
||||||
runner: runner,
|
|
||||||
clock: clock,
|
|
||||||
Commands: []*Command{&command},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
initialPoints := len(acc.Points)
|
|
||||||
err := e.Gather(&acc)
|
|
||||||
deltaPoints := len(acc.Points) - initialPoints
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkFloat := []struct {
|
|
||||||
name string
|
|
||||||
value float64
|
|
||||||
}{
|
|
||||||
{"mycollector_num_processes", 82},
|
|
||||||
{"mycollector_cpu_used", 8234},
|
|
||||||
{"mycollector_cpu_free", 32},
|
|
||||||
{"mycollector_percent", 0.81},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range checkFloat {
|
|
||||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored")
|
|
||||||
}
|
|
||||||
func TestExecOneNotEnoughTimeAndOneEnoughTime(t *testing.T) {
|
|
||||||
runner := newRunnerMock([]byte(validJson), nil)
|
|
||||||
clock := newClockMock(time.Unix(baseTimeSeconds+5, 0))
|
|
||||||
notEnoughTimeCommand := Command{
|
|
||||||
Command: "testcommand arg1",
|
|
||||||
Name: "mycollector",
|
|
||||||
Interval: 10,
|
|
||||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
|
||||||
}
|
|
||||||
enoughTimeCommand := Command{
|
|
||||||
Command: "testcommand arg1",
|
|
||||||
Name: "mycollector",
|
|
||||||
Interval: 3,
|
|
||||||
lastRunAt: time.Unix(baseTimeSeconds, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &Exec{
|
|
||||||
runner: runner,
|
|
||||||
clock: clock,
|
|
||||||
Commands: []*Command{¬EnoughTimeCommand, &enoughTimeCommand},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
initialPoints := len(acc.Points)
|
|
||||||
err := e.Gather(&acc)
|
|
||||||
deltaPoints := len(acc.Points) - initialPoints
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkFloat := []struct {
|
|
||||||
name string
|
|
||||||
value float64
|
|
||||||
}{
|
|
||||||
{"mycollector_num_processes", 82},
|
|
||||||
{"mycollector_cpu_used", 8234},
|
|
||||||
{"mycollector_cpu_free", 32},
|
|
||||||
{"mycollector_percent", 0.81},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range checkFloat {
|
|
||||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, deltaPoints, 4, "Only one command should have been run")
|
|
||||||
}
|
|
||||||
@@ -1,233 +0,0 @@
|
|||||||
package httpjson
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HttpJson struct {
|
|
||||||
Services []Service
|
|
||||||
client HTTPClient
|
|
||||||
}
|
|
||||||
|
|
||||||
type Service struct {
|
|
||||||
Name string
|
|
||||||
Servers []string
|
|
||||||
Method string
|
|
||||||
TagKeys []string
|
|
||||||
Parameters map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
type HTTPClient interface {
|
|
||||||
// Returns the result of an http request
|
|
||||||
//
|
|
||||||
// Parameters:
|
|
||||||
// req: HTTP request object
|
|
||||||
//
|
|
||||||
// Returns:
|
|
||||||
// http.Response: HTTP respons object
|
|
||||||
// error : Any error that may have occurred
|
|
||||||
MakeRequest(req *http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type RealHTTPClient struct {
|
|
||||||
client *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
|
||||||
return c.client.Do(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
# Specify services via an array of tables
|
|
||||||
[[plugins.httpjson.services]]
|
|
||||||
|
|
||||||
# a name for the service being polled
|
|
||||||
name = "webserver_stats"
|
|
||||||
|
|
||||||
# URL of each server in the service's cluster
|
|
||||||
servers = [
|
|
||||||
"http://localhost:9999/stats/",
|
|
||||||
"http://localhost:9998/stats/",
|
|
||||||
]
|
|
||||||
|
|
||||||
# HTTP method to use (case-sensitive)
|
|
||||||
method = "GET"
|
|
||||||
|
|
||||||
# List of tag names to extract from top-level of JSON server response
|
|
||||||
# tag_keys = [
|
|
||||||
# "my_tag_1",
|
|
||||||
# "my_tag_2"
|
|
||||||
# ]
|
|
||||||
|
|
||||||
# HTTP parameters (all values must be strings)
|
|
||||||
[plugins.httpjson.services.parameters]
|
|
||||||
event_type = "cpu_spike"
|
|
||||||
threshold = "0.75"
|
|
||||||
`
|
|
||||||
|
|
||||||
func (h *HttpJson) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HttpJson) Description() string {
|
|
||||||
return "Read flattened metrics from one or more JSON HTTP endpoints"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gathers data for all servers.
|
|
||||||
func (h *HttpJson) Gather(acc plugins.Accumulator) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
totalServers := 0
|
|
||||||
for _, service := range h.Services {
|
|
||||||
totalServers += len(service.Servers)
|
|
||||||
}
|
|
||||||
errorChannel := make(chan error, totalServers)
|
|
||||||
|
|
||||||
for _, service := range h.Services {
|
|
||||||
for _, server := range service.Servers {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(service Service, server string) {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := h.gatherServer(acc, service, server); err != nil {
|
|
||||||
errorChannel <- err
|
|
||||||
}
|
|
||||||
}(service, server)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(errorChannel)
|
|
||||||
|
|
||||||
// Get all errors and return them as one giant error
|
|
||||||
errorStrings := []string{}
|
|
||||||
for err := range errorChannel {
|
|
||||||
errorStrings = append(errorStrings, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errorStrings) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.New(strings.Join(errorStrings, "\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gathers data from a particular server
|
|
||||||
// Parameters:
|
|
||||||
// acc : The telegraf Accumulator to use
|
|
||||||
// serverURL: endpoint to send request to
|
|
||||||
// service : the service being queried
|
|
||||||
//
|
|
||||||
// Returns:
|
|
||||||
// error: Any error that may have occurred
|
|
||||||
func (h *HttpJson) gatherServer(
|
|
||||||
acc plugins.Accumulator,
|
|
||||||
service Service,
|
|
||||||
serverURL string,
|
|
||||||
) error {
|
|
||||||
resp, err := h.sendRequest(service, serverURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var jsonOut map[string]interface{}
|
|
||||||
if err = json.Unmarshal([]byte(resp), &jsonOut); err != nil {
|
|
||||||
return errors.New("Error decoding JSON response")
|
|
||||||
}
|
|
||||||
|
|
||||||
tags := map[string]string{
|
|
||||||
"server": serverURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tag := range service.TagKeys {
|
|
||||||
switch v := jsonOut[tag].(type) {
|
|
||||||
case string:
|
|
||||||
tags[tag] = v
|
|
||||||
}
|
|
||||||
delete(jsonOut, tag)
|
|
||||||
}
|
|
||||||
|
|
||||||
processResponse(acc, service.Name, tags, jsonOut)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends an HTTP request to the server using the HttpJson object's HTTPClient
|
|
||||||
// Parameters:
|
|
||||||
// serverURL: endpoint to send request to
|
|
||||||
//
|
|
||||||
// Returns:
|
|
||||||
// string: body of the response
|
|
||||||
// error : Any error that may have occurred
|
|
||||||
func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error) {
|
|
||||||
// Prepare URL
|
|
||||||
requestURL, err := url.Parse(serverURL)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
params := url.Values{}
|
|
||||||
for k, v := range service.Parameters {
|
|
||||||
params.Add(k, v)
|
|
||||||
}
|
|
||||||
requestURL.RawQuery = params.Encode()
|
|
||||||
|
|
||||||
// Create + send request
|
|
||||||
req, err := http.NewRequest(service.Method, requestURL.String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := h.client.MakeRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return string(body), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process response
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
|
||||||
requestURL.String(),
|
|
||||||
resp.StatusCode,
|
|
||||||
http.StatusText(resp.StatusCode),
|
|
||||||
http.StatusOK,
|
|
||||||
http.StatusText(http.StatusOK))
|
|
||||||
return string(body), err
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(body), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flattens the map generated from the JSON object and stores its float values using a
|
|
||||||
// plugins.Accumulator. It ignores any non-float values.
|
|
||||||
// Parameters:
|
|
||||||
// acc: the Accumulator to use
|
|
||||||
// prefix: What the name of the measurement name should be prefixed by.
|
|
||||||
// tags: telegraf tags to
|
|
||||||
func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) {
|
|
||||||
switch t := v.(type) {
|
|
||||||
case map[string]interface{}:
|
|
||||||
for k, v := range t {
|
|
||||||
processResponse(acc, prefix+"_"+k, tags, v)
|
|
||||||
}
|
|
||||||
case float64:
|
|
||||||
acc.Add(prefix, v, tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
plugins.Add("httpjson", func() plugins.Plugin {
|
|
||||||
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
package httpjson
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
const validJSON = `
|
|
||||||
{
|
|
||||||
"parent": {
|
|
||||||
"child": 3,
|
|
||||||
"ignored_child": "hi"
|
|
||||||
},
|
|
||||||
"ignored_null": null,
|
|
||||||
"integer": 4,
|
|
||||||
"ignored_list": [3, 4],
|
|
||||||
"ignored_parent": {
|
|
||||||
"another_ignored_list": [4],
|
|
||||||
"another_ignored_null": null,
|
|
||||||
"ignored_string": "hello, world!"
|
|
||||||
}
|
|
||||||
}`
|
|
||||||
|
|
||||||
const validJSONTags = `
|
|
||||||
{
|
|
||||||
"value": 15,
|
|
||||||
"role": "master",
|
|
||||||
"build": "123"
|
|
||||||
}`
|
|
||||||
|
|
||||||
const invalidJSON = "I don't think this is JSON"
|
|
||||||
|
|
||||||
const empty = ""
|
|
||||||
|
|
||||||
type mockHTTPClient struct {
|
|
||||||
responseBody string
|
|
||||||
statusCode int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock implementation of MakeRequest. Usually returns an http.Response with
|
|
||||||
// hard-coded responseBody and statusCode. However, if the request uses a
|
|
||||||
// nonstandard method, it uses status code 405 (method not allowed)
|
|
||||||
func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
|
||||||
resp := http.Response{}
|
|
||||||
resp.StatusCode = c.statusCode
|
|
||||||
|
|
||||||
// basic error checking on request method
|
|
||||||
allowedMethods := []string{"GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"}
|
|
||||||
methodValid := false
|
|
||||||
for _, method := range allowedMethods {
|
|
||||||
if req.Method == method {
|
|
||||||
methodValid = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !methodValid {
|
|
||||||
resp.StatusCode = 405 // Method not allowed
|
|
||||||
}
|
|
||||||
|
|
||||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
|
||||||
// Parameters:
|
|
||||||
// response : Body of the response that the mock HTTP client should return
|
|
||||||
// statusCode: HTTP status code the mock HTTP client should return
|
|
||||||
//
|
|
||||||
// Returns:
|
|
||||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
|
||||||
func genMockHttpJson(response string, statusCode int) *HttpJson {
|
|
||||||
return &HttpJson{
|
|
||||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
|
||||||
Services: []Service{
|
|
||||||
Service{
|
|
||||||
Servers: []string{
|
|
||||||
"http://server1.example.com/metrics/",
|
|
||||||
"http://server2.example.com/metrics/",
|
|
||||||
},
|
|
||||||
Name: "my_webapp",
|
|
||||||
Method: "GET",
|
|
||||||
Parameters: map[string]string{
|
|
||||||
"httpParam1": "12",
|
|
||||||
"httpParam2": "the second parameter",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Service{
|
|
||||||
Servers: []string{
|
|
||||||
"http://server3.example.com/metrics/",
|
|
||||||
"http://server4.example.com/metrics/",
|
|
||||||
},
|
|
||||||
Name: "other_webapp",
|
|
||||||
Method: "POST",
|
|
||||||
Parameters: map[string]string{
|
|
||||||
"httpParam1": "12",
|
|
||||||
"httpParam2": "the second parameter",
|
|
||||||
},
|
|
||||||
TagKeys: []string{
|
|
||||||
"role",
|
|
||||||
"build",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the proper values are ignored or collected
|
|
||||||
func TestHttpJson200(t *testing.T) {
|
|
||||||
httpjson := genMockHttpJson(validJSON, 200)
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
err := httpjson.Gather(&acc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, 8, len(acc.Points))
|
|
||||||
|
|
||||||
for _, service := range httpjson.Services {
|
|
||||||
for _, srv := range service.Servers {
|
|
||||||
require.NoError(t,
|
|
||||||
acc.ValidateTaggedValue(
|
|
||||||
fmt.Sprintf("%s_parent_child", service.Name),
|
|
||||||
3.0,
|
|
||||||
map[string]string{"server": srv},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
require.NoError(t,
|
|
||||||
acc.ValidateTaggedValue(
|
|
||||||
fmt.Sprintf("%s_integer", service.Name),
|
|
||||||
4.0,
|
|
||||||
map[string]string{"server": srv},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test response to HTTP 500
|
|
||||||
func TestHttpJson500(t *testing.T) {
|
|
||||||
httpjson := genMockHttpJson(validJSON, 500)
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
err := httpjson.Gather(&acc)
|
|
||||||
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
// 4 error lines for (2 urls) * (2 services)
|
|
||||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
|
||||||
assert.Equal(t, 0, len(acc.Points))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test response to HTTP 405
|
|
||||||
func TestHttpJsonBadMethod(t *testing.T) {
|
|
||||||
httpjson := genMockHttpJson(validJSON, 200)
|
|
||||||
httpjson.Services[0].Method = "NOT_A_REAL_METHOD"
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
err := httpjson.Gather(&acc)
|
|
||||||
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
// 2 error lines for (2 urls) * (1 falied service)
|
|
||||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2)
|
|
||||||
|
|
||||||
// (2 measurements) * (2 servers) * (1 successful service)
|
|
||||||
assert.Equal(t, 4, len(acc.Points))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test response to malformed JSON
|
|
||||||
func TestHttpJsonBadJson(t *testing.T) {
|
|
||||||
httpjson := genMockHttpJson(invalidJSON, 200)
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
err := httpjson.Gather(&acc)
|
|
||||||
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
// 4 error lines for (2 urls) * (2 services)
|
|
||||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
|
||||||
assert.Equal(t, 0, len(acc.Points))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test response to empty string as response objectgT
|
|
||||||
func TestHttpJsonEmptyResponse(t *testing.T) {
|
|
||||||
httpjson := genMockHttpJson(empty, 200)
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
err := httpjson.Gather(&acc)
|
|
||||||
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
// 4 error lines for (2 urls) * (2 services)
|
|
||||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
|
||||||
assert.Equal(t, 0, len(acc.Points))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the proper values are ignored or collected
|
|
||||||
func TestHttpJson200Tags(t *testing.T) {
|
|
||||||
httpjson := genMockHttpJson(validJSONTags, 200)
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
err := httpjson.Gather(&acc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, 4, len(acc.Points))
|
|
||||||
|
|
||||||
for _, service := range httpjson.Services {
|
|
||||||
if service.Name == "other_webapp" {
|
|
||||||
for _, srv := range service.Servers {
|
|
||||||
require.NoError(t,
|
|
||||||
acc.ValidateTaggedValue(
|
|
||||||
fmt.Sprintf("%s_value", service.Name),
|
|
||||||
15.0,
|
|
||||||
map[string]string{"server": srv, "role": "master", "build": "123"},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
# influxdb plugin
|
|
||||||
|
|
||||||
The influxdb plugin collects InfluxDB-formatted data from JSON endpoints.
|
|
||||||
|
|
||||||
With a configuration of:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[plugins.influxdb]]
|
|
||||||
urls = [
|
|
||||||
"http://127.0.0.1:8086/debug/vars",
|
|
||||||
"http://192.168.2.1:8086/debug/vars"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
And if 127.0.0.1 responds with this JSON:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"k1": {
|
|
||||||
"name": "fruit",
|
|
||||||
"tags": {
|
|
||||||
"kind": "apple"
|
|
||||||
},
|
|
||||||
"values": {
|
|
||||||
"inventory": 371,
|
|
||||||
"sold": 112
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"k2": {
|
|
||||||
"name": "fruit",
|
|
||||||
"tags": {
|
|
||||||
"kind": "banana"
|
|
||||||
},
|
|
||||||
"values": {
|
|
||||||
"inventory": 1000,
|
|
||||||
"sold": 403
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And if 192.168.2.1 responds like so:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"k3": {
|
|
||||||
"name": "transactions",
|
|
||||||
"tags": {},
|
|
||||||
"values": {
|
|
||||||
"total": 100,
|
|
||||||
"balance": 184.75
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Then the collected metrics will be:
|
|
||||||
|
|
||||||
```
|
|
||||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='apple' inventory=371.0,sold=112.0
|
|
||||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='banana' inventory=1000.0,sold=403.0
|
|
||||||
|
|
||||||
influxdb_transactions,url='http://192.168.2.1:8086/debug/vars' total=100.0,balance=184.75
|
|
||||||
```
|
|
||||||
|
|
||||||
There are two important details to note about the collected metrics:
|
|
||||||
|
|
||||||
1. Even though the values in JSON are being displayed as integers, the metrics are reported as floats.
|
|
||||||
JSON encoders usually don't print the fractional part for round floats.
|
|
||||||
Because you cannot change the type of an existing field in InfluxDB, we assume all numbers are floats.
|
|
||||||
|
|
||||||
2. The top-level keys' names (in the example above, `"k1"`, `"k2"`, and `"k3"`) are not considered when recording the metrics.
|
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
package influxdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
|
||||||
)
|
|
||||||
|
|
||||||
type InfluxDB struct {
|
|
||||||
URLs []string `toml:"urls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*InfluxDB) Description() string {
|
|
||||||
return "Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*InfluxDB) SampleConfig() string {
|
|
||||||
return `
|
|
||||||
# Works with InfluxDB debug endpoints out of the box,
|
|
||||||
# but other services can use this format too.
|
|
||||||
# See the influxdb plugin's README for more details.
|
|
||||||
|
|
||||||
# Multiple URLs from which to read InfluxDB-formatted JSON
|
|
||||||
urls = [
|
|
||||||
"http://localhost:8086/debug/vars"
|
|
||||||
]
|
|
||||||
`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InfluxDB) Gather(acc plugins.Accumulator) error {
|
|
||||||
errorChannel := make(chan error, len(i.URLs))
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for _, u := range i.URLs {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(url string) {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := i.gatherURL(acc, url); err != nil {
|
|
||||||
errorChannel <- fmt.Errorf("[url=%s]: %s", url, err)
|
|
||||||
}
|
|
||||||
}(u)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
close(errorChannel)
|
|
||||||
|
|
||||||
// If there weren't any errors, we can return nil now.
|
|
||||||
if len(errorChannel) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// There were errors, so join them all together as one big error.
|
|
||||||
errorStrings := make([]string, 0, len(errorChannel))
|
|
||||||
for err := range errorChannel {
|
|
||||||
errorStrings = append(errorStrings, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New(strings.Join(errorStrings, "\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
type point struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Tags map[string]string `json:"tags"`
|
|
||||||
Values map[string]interface{} `json:"values"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gathers data from a particular URL
|
|
||||||
// Parameters:
|
|
||||||
// acc : The telegraf Accumulator to use
|
|
||||||
// url : endpoint to send request to
|
|
||||||
//
|
|
||||||
// Returns:
|
|
||||||
// error: Any error that may have occurred
|
|
||||||
func (i *InfluxDB) gatherURL(
|
|
||||||
acc plugins.Accumulator,
|
|
||||||
url string,
|
|
||||||
) error {
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
// It would be nice to be able to decode into a map[string]point, but
|
|
||||||
// we'll get a decoder error like:
|
|
||||||
// `json: cannot unmarshal array into Go value of type influxdb.point`
|
|
||||||
// if any of the values aren't objects.
|
|
||||||
// To avoid that error, we decode by hand.
|
|
||||||
dec := json.NewDecoder(resp.Body)
|
|
||||||
|
|
||||||
// Parse beginning of object
|
|
||||||
if t, err := dec.Token(); err != nil {
|
|
||||||
return err
|
|
||||||
} else if t != json.Delim('{') {
|
|
||||||
return errors.New("document root must be a JSON object")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through rest of object
|
|
||||||
for {
|
|
||||||
// Nothing left in this object, we're done
|
|
||||||
if !dec.More() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read in a string key. We don't do anything with the top-level keys, so it's discarded.
|
|
||||||
_, err := dec.Token()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to parse a whole object into a point.
|
|
||||||
// It might be a non-object, like a string or array.
|
|
||||||
// If we fail to decode it into a point, ignore it and move on.
|
|
||||||
var p point
|
|
||||||
if err := dec.Decode(&p); err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the object was a point, but was not fully initialized, ignore it and move on.
|
|
||||||
if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a tag to indicate the source of the data.
|
|
||||||
p.Tags["url"] = url
|
|
||||||
|
|
||||||
acc.AddFields(
|
|
||||||
p.Name,
|
|
||||||
p.Values,
|
|
||||||
p.Tags,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
plugins.Add("influxdb", func() plugins.Plugin {
|
|
||||||
return &InfluxDB{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
package influxdb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/plugins/influxdb"
|
|
||||||
"github.com/influxdb/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBasic(t *testing.T) {
|
|
||||||
js := `
|
|
||||||
{
|
|
||||||
"_1": {
|
|
||||||
"name": "foo",
|
|
||||||
"tags": {
|
|
||||||
"id": "ex1"
|
|
||||||
},
|
|
||||||
"values": {
|
|
||||||
"i": -1,
|
|
||||||
"f": 0.5,
|
|
||||||
"b": true,
|
|
||||||
"s": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"ignored": {
|
|
||||||
"willBeRecorded": false
|
|
||||||
},
|
|
||||||
"ignoredAndNested": {
|
|
||||||
"hash": {
|
|
||||||
"is": "nested"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"array": [
|
|
||||||
"makes parsing more difficult than necessary"
|
|
||||||
],
|
|
||||||
"string": "makes parsing more difficult than necessary",
|
|
||||||
"_2": {
|
|
||||||
"name": "bar",
|
|
||||||
"tags": {
|
|
||||||
"id": "ex2"
|
|
||||||
},
|
|
||||||
"values": {
|
|
||||||
"x": "x"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pointWithoutFields_willNotBeIncluded": {
|
|
||||||
"name": "asdf",
|
|
||||||
"tags": {
|
|
||||||
"id": "ex3"
|
|
||||||
},
|
|
||||||
"values": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`
|
|
||||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.URL.Path == "/endpoint" {
|
|
||||||
_, _ = w.Write([]byte(js))
|
|
||||||
} else {
|
|
||||||
w.WriteHeader(http.StatusNotFound)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer fakeServer.Close()
|
|
||||||
|
|
||||||
plugin := &influxdb.InfluxDB{
|
|
||||||
URLs: []string{fakeServer.URL + "/endpoint"},
|
|
||||||
}
|
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
|
||||||
require.NoError(t, plugin.Gather(&acc))
|
|
||||||
|
|
||||||
require.Len(t, acc.Points, 2)
|
|
||||||
require.NoError(t, acc.ValidateTaggedFieldsValue(
|
|
||||||
"foo",
|
|
||||||
map[string]interface{}{
|
|
||||||
// JSON will truncate floats to integer representations.
|
|
||||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
|
||||||
"i": -1.0,
|
|
||||||
"f": 0.5,
|
|
||||||
"b": true,
|
|
||||||
"s": "string",
|
|
||||||
},
|
|
||||||
map[string]string{
|
|
||||||
"id": "ex1",
|
|
||||||
"url": fakeServer.URL + "/endpoint",
|
|
||||||
},
|
|
||||||
))
|
|
||||||
require.NoError(t, acc.ValidateTaggedFieldsValue(
|
|
||||||
"bar",
|
|
||||||
map[string]interface{}{
|
|
||||||
"x": "x",
|
|
||||||
},
|
|
||||||
map[string]string{
|
|
||||||
"id": "ex2",
|
|
||||||
"url": fakeServer.URL + "/endpoint",
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
37
plugins/inputs/EXAMPLE_README.md
Normal file
37
plugins/inputs/EXAMPLE_README.md
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# Example Input Plugin
|
||||||
|
|
||||||
|
The example plugin gathers metrics about example things
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Description
|
||||||
|
[[inputs.example]]
|
||||||
|
# SampleConfig
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
<optional description>
|
||||||
|
|
||||||
|
- measurement1
|
||||||
|
- field1 (type, unit)
|
||||||
|
- field2 (float, percent)
|
||||||
|
- measurement2
|
||||||
|
- field3 (integer, bytes)
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
- All measurements have the following tags:
|
||||||
|
- tag1 (optional description)
|
||||||
|
- tag2
|
||||||
|
- measurement2 has the following tags:
|
||||||
|
- tag3
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||||
|
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||||
|
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||||
|
```
|
||||||
@@ -4,7 +4,8 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/influxdb/telegraf/plugins"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -103,11 +104,9 @@ type Aerospike struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
# Aerospike servers to connect to (with port)
|
## Aerospike servers to connect to (with port)
|
||||||
# Default: servers = ["localhost:3000"]
|
## This plugin will query all namespaces the aerospike
|
||||||
#
|
## server has configured and get stats for them.
|
||||||
# This plugin will query all namespaces the aerospike
|
|
||||||
# server has configured and get stats for them.
|
|
||||||
servers = ["localhost:3000"]
|
servers = ["localhost:3000"]
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -119,7 +118,7 @@ func (a *Aerospike) Description() string {
|
|||||||
return "Read stats from an aerospike server"
|
return "Read stats from an aerospike server"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Aerospike) Gather(acc plugins.Accumulator) error {
|
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||||
if len(a.Servers) == 0 {
|
if len(a.Servers) == 0 {
|
||||||
return a.gatherServer("127.0.0.1:3000", acc)
|
return a.gatherServer("127.0.0.1:3000", acc)
|
||||||
}
|
}
|
||||||
@@ -140,7 +139,7 @@ func (a *Aerospike) Gather(acc plugins.Accumulator) error {
|
|||||||
return outerr
|
return outerr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Aerospike) gatherServer(host string, acc plugins.Accumulator) error {
|
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
||||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||||
@@ -247,26 +246,32 @@ func get(key []byte, host string) (map[string]string, error) {
|
|||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func readAerospikeStats(stats map[string]string, acc plugins.Accumulator, host, namespace string) {
|
func readAerospikeStats(
|
||||||
|
stats map[string]string,
|
||||||
|
acc telegraf.Accumulator,
|
||||||
|
host string,
|
||||||
|
namespace string,
|
||||||
|
) {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := map[string]string{
|
||||||
|
"aerospike_host": host,
|
||||||
|
"namespace": "_service",
|
||||||
|
}
|
||||||
|
|
||||||
|
if namespace != "" {
|
||||||
|
tags["namespace"] = namespace
|
||||||
|
}
|
||||||
for key, value := range stats {
|
for key, value := range stats {
|
||||||
tags := map[string]string{
|
|
||||||
"aerospike_host": host,
|
|
||||||
"namespace": "_service",
|
|
||||||
}
|
|
||||||
|
|
||||||
if namespace != "" {
|
|
||||||
tags["namespace"] = namespace
|
|
||||||
}
|
|
||||||
|
|
||||||
// We are going to ignore all string based keys
|
// We are going to ignore all string based keys
|
||||||
val, err := strconv.ParseInt(value, 10, 64)
|
val, err := strconv.ParseInt(value, 10, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if strings.Contains(key, "-") {
|
if strings.Contains(key, "-") {
|
||||||
key = strings.Replace(key, "-", "_", -1)
|
key = strings.Replace(key, "-", "_", -1)
|
||||||
}
|
}
|
||||||
acc.Add(key, val, tags)
|
fields[key] = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
acc.AddFields("aerospike", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||||
@@ -330,7 +335,7 @@ func msgLenFromBytes(buf [6]byte) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
plugins.Add("aerospike", func() plugins.Plugin {
|
inputs.Add("aerospike", func() telegraf.Input {
|
||||||
return &Aerospike{}
|
return &Aerospike{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
package aerospike
|
package aerospike
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/influxdb/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAerospikeStatistics(t *testing.T) {
|
func TestAerospikeStatistics(t *testing.T) {
|
||||||
@@ -31,7 +32,7 @@ func TestAerospikeStatistics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, metric := range asMetrics {
|
for _, metric := range asMetrics {
|
||||||
assert.True(t, acc.HasIntValue(metric), metric)
|
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -49,13 +50,16 @@ func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
|||||||
"stat_read_reqs": "12345",
|
"stat_read_reqs": "12345",
|
||||||
}
|
}
|
||||||
readAerospikeStats(stats, &acc, "host1", "")
|
readAerospikeStats(stats, &acc, "host1", "")
|
||||||
for k := range stats {
|
|
||||||
if k == "stat-write-errs" {
|
fields := map[string]interface{}{
|
||||||
k = "stat_write_errs"
|
"stat_write_errs": int64(12345),
|
||||||
}
|
"stat_read_reqs": int64(12345),
|
||||||
assert.True(t, acc.HasMeasurement(k))
|
|
||||||
assert.True(t, acc.CheckValue(k, int64(12345)))
|
|
||||||
}
|
}
|
||||||
|
tags := map[string]string{
|
||||||
|
"aerospike_host": "host1",
|
||||||
|
"namespace": "_service",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||||
@@ -66,13 +70,15 @@ func TestReadAerospikeStatsNamespace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
readAerospikeStats(stats, &acc, "host1", "test")
|
readAerospikeStats(stats, &acc, "host1", "test")
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"stat_write_errs": int64(12345),
|
||||||
|
"stat_read_reqs": int64(12345),
|
||||||
|
}
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"aerospike_host": "host1",
|
"aerospike_host": "host1",
|
||||||
"namespace": "test",
|
"namespace": "test",
|
||||||
}
|
}
|
||||||
for k := range stats {
|
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||||
assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||||
75
plugins/inputs/all/all.go
Normal file
75
plugins/inputs/all/all.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package all
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/chrony"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/raindrops"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/rollbar_webhooks"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||||
|
)
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Telegraf plugin: Apache
|
# Telegraf plugin: Apache
|
||||||
|
|
||||||
#### Plugin arguments:
|
#### Plugin arguments:
|
||||||
- **urls** []string: List of apache-status URLs to collect from.
|
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
||||||
|
|
||||||
#### Description
|
#### Description
|
||||||
|
|
||||||
@@ -11,7 +11,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Apache struct {
|
type Apache struct {
|
||||||
@@ -19,7 +20,8 @@ type Apache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
# An array of Apache status URI to gather stats.
|
## An array of Apache status URI to gather stats.
|
||||||
|
## Default is "http://localhost/server-status?auto".
|
||||||
urls = ["http://localhost/server-status?auto"]
|
urls = ["http://localhost/server-status?auto"]
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -31,7 +33,11 @@ func (n *Apache) Description() string {
|
|||||||
return "Read Apache status information (mod_status)"
|
return "Read Apache status information (mod_status)"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Apache) Gather(acc plugins.Accumulator) error {
|
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if len(n.Urls) == 0 {
|
||||||
|
n.Urls = []string{"http://localhost/server-status?auto"}
|
||||||
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var outerr error
|
var outerr error
|
||||||
|
|
||||||
@@ -57,9 +63,12 @@ var tr = &http.Transport{
|
|||||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
var client = &http.Client{Transport: tr}
|
var client = &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
Timeout: time.Duration(4 * time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||||
resp, err := client.Get(addr.String())
|
resp, err := client.Get(addr.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||||
@@ -72,32 +81,33 @@ func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
|||||||
tags := getTags(addr)
|
tags := getTags(addr)
|
||||||
|
|
||||||
sc := bufio.NewScanner(resp.Body)
|
sc := bufio.NewScanner(resp.Body)
|
||||||
|
fields := make(map[string]interface{})
|
||||||
for sc.Scan() {
|
for sc.Scan() {
|
||||||
line := sc.Text()
|
line := sc.Text()
|
||||||
if strings.Contains(line, ":") {
|
if strings.Contains(line, ":") {
|
||||||
|
|
||||||
parts := strings.SplitN(line, ":", 2)
|
parts := strings.SplitN(line, ":", 2)
|
||||||
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
|
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
|
||||||
|
|
||||||
switch key {
|
switch key {
|
||||||
|
|
||||||
case "Scoreboard":
|
case "Scoreboard":
|
||||||
n.gatherScores(part, acc, tags)
|
for field, value := range n.gatherScores(part) {
|
||||||
|
fields[field] = value
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
value, err := strconv.ParseFloat(part, 64)
|
value, err := strconv.ParseFloat(part, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
acc.Add(key, value, tags)
|
fields[key] = value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
acc.AddFields("apache", fields, tags)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[string]string) {
|
func (n *Apache) gatherScores(data string) map[string]interface{} {
|
||||||
|
|
||||||
var waiting, open int = 0, 0
|
var waiting, open int = 0, 0
|
||||||
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||||
|
|
||||||
@@ -129,17 +139,20 @@ func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
acc.Add("scboard_waiting", float64(waiting), tags)
|
fields := map[string]interface{}{
|
||||||
acc.Add("scboard_starting", float64(S), tags)
|
"scboard_waiting": float64(waiting),
|
||||||
acc.Add("scboard_reading", float64(R), tags)
|
"scboard_starting": float64(S),
|
||||||
acc.Add("scboard_sending", float64(W), tags)
|
"scboard_reading": float64(R),
|
||||||
acc.Add("scboard_keepalive", float64(K), tags)
|
"scboard_sending": float64(W),
|
||||||
acc.Add("scboard_dnslookup", float64(D), tags)
|
"scboard_keepalive": float64(K),
|
||||||
acc.Add("scboard_closing", float64(C), tags)
|
"scboard_dnslookup": float64(D),
|
||||||
acc.Add("scboard_logging", float64(L), tags)
|
"scboard_closing": float64(C),
|
||||||
acc.Add("scboard_finishing", float64(G), tags)
|
"scboard_logging": float64(L),
|
||||||
acc.Add("scboard_idle_cleanup", float64(I), tags)
|
"scboard_finishing": float64(G),
|
||||||
acc.Add("scboard_open", float64(open), tags)
|
"scboard_idle_cleanup": float64(I),
|
||||||
|
"scboard_open": float64(open),
|
||||||
|
}
|
||||||
|
return fields
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get tag(s) for the apache plugin
|
// Get tag(s) for the apache plugin
|
||||||
@@ -160,7 +173,7 @@ func getTags(addr *url.URL) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
plugins.Add("apache", func() plugins.Plugin {
|
inputs.Add("apache", func() telegraf.Input {
|
||||||
return &Apache{}
|
return &Apache{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -6,9 +6,8 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -44,37 +43,31 @@ func TestHTTPApache(t *testing.T) {
|
|||||||
err := a.Gather(&acc)
|
err := a.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testInt := []struct {
|
fields := map[string]interface{}{
|
||||||
measurement string
|
"TotalAccesses": float64(1.29811861e+08),
|
||||||
value float64
|
"TotalkBytes": float64(5.213701865e+09),
|
||||||
}{
|
"CPULoad": float64(6.51929),
|
||||||
{"TotalAccesses", 1.29811861e+08},
|
"Uptime": float64(941553),
|
||||||
{"TotalkBytes", 5.213701865e+09},
|
"ReqPerSec": float64(137.87),
|
||||||
{"CPULoad", 6.51929},
|
"BytesPerSec": float64(5.67024e+06),
|
||||||
{"Uptime", 941553},
|
"BytesPerReq": float64(41127.4),
|
||||||
{"ReqPerSec", 137.87},
|
"BusyWorkers": float64(270),
|
||||||
{"BytesPerSec", 5.67024e+06},
|
"IdleWorkers": float64(630),
|
||||||
{"BytesPerReq", 41127.4},
|
"ConnsTotal": float64(1451),
|
||||||
{"BusyWorkers", 270},
|
"ConnsAsyncWriting": float64(32),
|
||||||
{"IdleWorkers", 630},
|
"ConnsAsyncKeepAlive": float64(945),
|
||||||
{"ConnsTotal", 1451},
|
"ConnsAsyncClosing": float64(205),
|
||||||
{"ConnsAsyncWriting", 32},
|
"scboard_waiting": float64(630),
|
||||||
{"ConnsAsyncKeepAlive", 945},
|
"scboard_starting": float64(0),
|
||||||
{"ConnsAsyncClosing", 205},
|
"scboard_reading": float64(157),
|
||||||
{"scboard_waiting", 630},
|
"scboard_sending": float64(113),
|
||||||
{"scboard_starting", 0},
|
"scboard_keepalive": float64(0),
|
||||||
{"scboard_reading", 157},
|
"scboard_dnslookup": float64(0),
|
||||||
{"scboard_sending", 113},
|
"scboard_closing": float64(0),
|
||||||
{"scboard_keepalive", 0},
|
"scboard_logging": float64(0),
|
||||||
{"scboard_dnslookup", 0},
|
"scboard_finishing": float64(0),
|
||||||
{"scboard_closing", 0},
|
"scboard_idle_cleanup": float64(0),
|
||||||
{"scboard_logging", 0},
|
"scboard_open": float64(2850),
|
||||||
{"scboard_finishing", 0},
|
|
||||||
{"scboard_idle_cleanup", 0},
|
|
||||||
{"scboard_open", 2850},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range testInt {
|
|
||||||
assert.True(t, acc.CheckValue(test.measurement, test.value))
|
|
||||||
}
|
}
|
||||||
|
acc.AssertContainsFields(t, "apache", fields)
|
||||||
}
|
}
|
||||||
@@ -26,27 +26,27 @@ Measurement names:
|
|||||||
dirty_data
|
dirty_data
|
||||||
Amount of dirty data for this backing device in the cache. Continuously
|
Amount of dirty data for this backing device in the cache. Continuously
|
||||||
updated unlike the cache set's version, but may be slightly off.
|
updated unlike the cache set's version, but may be slightly off.
|
||||||
|
|
||||||
bypassed
|
bypassed
|
||||||
Amount of IO (both reads and writes) that has bypassed the cache
|
Amount of IO (both reads and writes) that has bypassed the cache
|
||||||
|
|
||||||
|
|
||||||
cache_bypass_hits
|
cache_bypass_hits
|
||||||
cache_bypass_misses
|
cache_bypass_misses
|
||||||
Hits and misses for IO that is intended to skip the cache are still counted,
|
Hits and misses for IO that is intended to skip the cache are still counted,
|
||||||
but broken out here.
|
but broken out here.
|
||||||
|
|
||||||
cache_hits
|
cache_hits
|
||||||
cache_misses
|
cache_misses
|
||||||
cache_hit_ratio
|
cache_hit_ratio
|
||||||
Hits and misses are counted per individual IO as bcache sees them; a
|
Hits and misses are counted per individual IO as bcache sees them; a
|
||||||
partial hit is counted as a miss.
|
partial hit is counted as a miss.
|
||||||
|
|
||||||
cache_miss_collisions
|
cache_miss_collisions
|
||||||
Counts instances where data was going to be inserted into the cache from a
|
Counts instances where data was going to be inserted into the cache from a
|
||||||
cache miss, but raced with a write and data was already present (usually 0
|
cache miss, but raced with a write and data was already present (usually 0
|
||||||
since the synchronization for cache misses was rewritten)
|
since the synchronization for cache misses was rewritten)
|
||||||
|
|
||||||
cache_readaheads
|
cache_readaheads
|
||||||
Count of times readahead occurred.
|
Count of times readahead occurred.
|
||||||
```
|
```
|
||||||
@@ -70,7 +70,7 @@ Using this configuration:
|
|||||||
When run with:
|
When run with:
|
||||||
|
|
||||||
```
|
```
|
||||||
./telegraf -config telegraf.conf -filter bcache -test
|
./telegraf -config telegraf.conf -input-filter bcache -test
|
||||||
```
|
```
|
||||||
|
|
||||||
It produces:
|
It produces:
|
||||||
@@ -8,7 +8,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/plugins"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Bcache struct {
|
type Bcache struct {
|
||||||
@@ -17,14 +18,14 @@ type Bcache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
# Bcache sets path
|
## Bcache sets path
|
||||||
# If not specified, then default is:
|
## If not specified, then default is:
|
||||||
# bcachePath = "/sys/fs/bcache"
|
bcachePath = "/sys/fs/bcache"
|
||||||
#
|
|
||||||
# By default, telegraf gather stats for all bcache devices
|
## By default, telegraf gather stats for all bcache devices
|
||||||
# Setting devices will restrict the stats to the specified
|
## Setting devices will restrict the stats to the specified
|
||||||
# bcache devices.
|
## bcache devices.
|
||||||
# bcacheDevs = ["bcache0", ...]
|
bcacheDevs = ["bcache0"]
|
||||||
`
|
`
|
||||||
|
|
||||||
func (b *Bcache) SampleConfig() string {
|
func (b *Bcache) SampleConfig() string {
|
||||||
@@ -69,7 +70,7 @@ func prettyToBytes(v string) uint64 {
|
|||||||
return uint64(result)
|
return uint64(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
|
||||||
tags := getTags(bdev)
|
tags := getTags(bdev)
|
||||||
metrics, err := filepath.Glob(bdev + "/stats_total/*")
|
metrics, err := filepath.Glob(bdev + "/stats_total/*")
|
||||||
if len(metrics) < 0 {
|
if len(metrics) < 0 {
|
||||||
@@ -81,7 +82,9 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
|||||||
}
|
}
|
||||||
rawValue := strings.TrimSpace(string(file))
|
rawValue := strings.TrimSpace(string(file))
|
||||||
value := prettyToBytes(rawValue)
|
value := prettyToBytes(rawValue)
|
||||||
acc.Add("dirty_data", value, tags)
|
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
fields["dirty_data"] = value
|
||||||
|
|
||||||
for _, path := range metrics {
|
for _, path := range metrics {
|
||||||
key := filepath.Base(path)
|
key := filepath.Base(path)
|
||||||
@@ -92,16 +95,17 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
|||||||
}
|
}
|
||||||
if key == "bypassed" {
|
if key == "bypassed" {
|
||||||
value := prettyToBytes(rawValue)
|
value := prettyToBytes(rawValue)
|
||||||
acc.Add(key, value, tags)
|
fields[key] = value
|
||||||
} else {
|
} else {
|
||||||
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
||||||
acc.Add(key, value, tags)
|
fields[key] = value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
acc.AddFields("bcache", fields, tags)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bcache) Gather(acc plugins.Accumulator) error {
|
func (b *Bcache) Gather(acc telegraf.Accumulator) error {
|
||||||
bcacheDevsChecked := make(map[string]bool)
|
bcacheDevsChecked := make(map[string]bool)
|
||||||
var restrictDevs bool
|
var restrictDevs bool
|
||||||
if len(b.BcacheDevs) != 0 {
|
if len(b.BcacheDevs) != 0 {
|
||||||
@@ -117,7 +121,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error {
|
|||||||
}
|
}
|
||||||
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
||||||
if len(bdevs) < 1 {
|
if len(bdevs) < 1 {
|
||||||
return errors.New("Can't found any bcache device")
|
return errors.New("Can't find any bcache device")
|
||||||
}
|
}
|
||||||
for _, bdev := range bdevs {
|
for _, bdev := range bdevs {
|
||||||
if restrictDevs {
|
if restrictDevs {
|
||||||
@@ -132,7 +136,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
plugins.Add("bcache", func() plugins.Plugin {
|
inputs.Add("bcache", func() telegraf.Input {
|
||||||
return &Bcache{}
|
return &Bcache{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -5,8 +5,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdb/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,11 +28,6 @@ var (
|
|||||||
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
||||||
)
|
)
|
||||||
|
|
||||||
type metrics struct {
|
|
||||||
name string
|
|
||||||
value uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||||
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -53,70 +47,52 @@ func TestBcacheGeneratesMetrics(t *testing.T) {
|
|||||||
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", []byte(dirty_data), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
|
||||||
|
[]byte(dirty_data), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
|
||||||
|
[]byte(bypassed), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cache_bypass_hits), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||||
|
[]byte(cache_bypass_hits), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cache_bypass_misses), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||||
|
[]byte(cache_bypass_misses), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cache_hit_ratio), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||||
|
[]byte(cache_hit_ratio), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", []byte(cache_hits), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
|
||||||
|
[]byte(cache_hits), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cache_miss_collisions), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||||
|
[]byte(cache_miss_collisions), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", []byte(cache_misses), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
|
||||||
|
[]byte(cache_misses), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", []byte(cache_readaheads), 0644)
|
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
|
||||||
|
[]byte(cache_readaheads), 0644)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
intMetrics := []*metrics{
|
fields := map[string]interface{}{
|
||||||
{
|
"dirty_data": uint64(1610612736),
|
||||||
name: "dirty_data",
|
"bypassed": uint64(5167704440832),
|
||||||
value: 1610612736,
|
"cache_bypass_hits": uint64(146155333),
|
||||||
},
|
"cache_bypass_misses": uint64(0),
|
||||||
{
|
"cache_hit_ratio": uint64(90),
|
||||||
name: "bypassed",
|
"cache_hits": uint64(511469583),
|
||||||
value: 5167704440832,
|
"cache_miss_collisions": uint64(157567),
|
||||||
},
|
"cache_misses": uint64(50616331),
|
||||||
{
|
"cache_readaheads": uint64(2),
|
||||||
name: "cache_bypass_hits",
|
|
||||||
value: 146155333,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cache_bypass_misses",
|
|
||||||
value: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cache_hit_ratio",
|
|
||||||
value: 90,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cache_hits",
|
|
||||||
value: 511469583,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cache_miss_collisions",
|
|
||||||
value: 157567,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cache_misses",
|
|
||||||
value: 50616331,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cache_readaheads",
|
|
||||||
value: 2,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
@@ -126,27 +102,19 @@ func TestBcacheGeneratesMetrics(t *testing.T) {
|
|||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
//all devs
|
// all devs
|
||||||
b := &Bcache{BcachePath: testBcachePath}
|
b := &Bcache{BcachePath: testBcachePath}
|
||||||
|
|
||||||
err = b.Gather(&acc)
|
err = b.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||||
|
|
||||||
for _, metric := range intMetrics {
|
// one exist dev
|
||||||
assert.True(t, acc.HasUIntValue(metric.name), metric.name)
|
|
||||||
assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags))
|
|
||||||
}
|
|
||||||
|
|
||||||
//one exist dev
|
|
||||||
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
|
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
|
||||||
|
|
||||||
err = b.Gather(&acc)
|
err = b.Gather(&acc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||||
for _, metric := range intMetrics {
|
|
||||||
assert.True(t, acc.HasUIntValue(metric.name), metric.name)
|
|
||||||
assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
125
plugins/inputs/cassandra/README.md
Normal file
125
plugins/inputs/cassandra/README.md
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# Telegraf plugin: Cassandra
|
||||||
|
|
||||||
|
#### Plugin arguments:
|
||||||
|
- **context** string: Context root used for jolokia url
|
||||||
|
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
|
||||||
|
- **metrics** []string: List of Jmx paths that identify mbeans attributes
|
||||||
|
|
||||||
|
#### Description
|
||||||
|
|
||||||
|
The Cassandra plugin collects Cassandra/JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
|
||||||
|
|
||||||
|
See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
|
||||||
|
|
||||||
|
# Measurements:
|
||||||
|
Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name.
|
||||||
|
|
||||||
|
Given a configuration like:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cassandra]]
|
||||||
|
context = "/jolokia/read"
|
||||||
|
servers = [":8778"]
|
||||||
|
metrics = ["/java.lang:type=Memory/HeapMemoryUsage"]
|
||||||
|
```
|
||||||
|
|
||||||
|
The collected metrics will be:
|
||||||
|
|
||||||
|
```
|
||||||
|
javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084
|
||||||
|
```
|
||||||
|
|
||||||
|
# Useful Metrics:
|
||||||
|
|
||||||
|
Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web.
|
||||||
|
|
||||||
|
- [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics)
|
||||||
|
- [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
|
||||||
|
|
||||||
|
####measurement = javaGarbageCollector
|
||||||
|
|
||||||
|
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
|
||||||
|
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
|
||||||
|
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
|
||||||
|
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
|
||||||
|
|
||||||
|
####measurement = javaMemory
|
||||||
|
|
||||||
|
- /java.lang:type=Memory/HeapMemoryUsage
|
||||||
|
- /java.lang:type=Memory/NonHeapMemoryUsage
|
||||||
|
|
||||||
|
####measurement = cassandraCache
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hit
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hit
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
|
||||||
|
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
|
||||||
|
|
||||||
|
####measurement = cassandraClient
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients
|
||||||
|
|
||||||
|
####measurement = cassandraClientRequest
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
|
||||||
|
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
|
||||||
|
|
||||||
|
####measurement = cassandraCommitLog
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
|
||||||
|
|
||||||
|
####measurement = cassandraCompaction
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTask
|
||||||
|
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
|
||||||
|
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
|
||||||
|
|
||||||
|
####measurement = cassandraStorage
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=Storage,name=Load
|
||||||
|
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
|
||||||
|
|
||||||
|
####measurement = cassandraTable
|
||||||
|
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
|
||||||
|
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed
|
||||||
|
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency
|
||||||
|
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
|
||||||
|
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
|
||||||
|
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
|
||||||
|
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
|
||||||
|
|
||||||
|
|
||||||
|
####measurement = cassandraThreadPools
|
||||||
|
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
|
||||||
|
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks
|
||||||
|
|
||||||
|
|
||||||
309
plugins/inputs/cassandra/cassandra.go
Normal file
309
plugins/inputs/cassandra/cassandra.go
Normal file
@@ -0,0 +1,309 @@
|
|||||||
|
package cassandra
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JolokiaClient interface {
|
||||||
|
MakeRequest(req *http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type JolokiaClientImpl struct {
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||||
|
return c.client.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Cassandra struct {
|
||||||
|
jClient JolokiaClient
|
||||||
|
Context string
|
||||||
|
Servers []string
|
||||||
|
Metrics []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type javaMetric struct {
|
||||||
|
host string
|
||||||
|
metric string
|
||||||
|
acc telegraf.Accumulator
|
||||||
|
}
|
||||||
|
|
||||||
|
type cassandraMetric struct {
|
||||||
|
host string
|
||||||
|
metric string
|
||||||
|
acc telegraf.Accumulator
|
||||||
|
}
|
||||||
|
|
||||||
|
type jmxMetric interface {
|
||||||
|
addTagsFields(out map[string]interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newJavaMetric(host string, metric string,
|
||||||
|
acc telegraf.Accumulator) *javaMetric {
|
||||||
|
return &javaMetric{host: host, metric: metric, acc: acc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCassandraMetric(host string, metric string,
|
||||||
|
acc telegraf.Accumulator) *cassandraMetric {
|
||||||
|
return &cassandraMetric{host: host, metric: metric, acc: acc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
|
||||||
|
mname string) {
|
||||||
|
for k, v := range values {
|
||||||
|
if v != nil {
|
||||||
|
fields[mname+"_"+k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseJmxMetricRequest(mbean string) map[string]string {
|
||||||
|
tokens := make(map[string]string)
|
||||||
|
classAndPairs := strings.Split(mbean, ":")
|
||||||
|
if classAndPairs[0] == "org.apache.cassandra.metrics" {
|
||||||
|
tokens["class"] = "cassandra"
|
||||||
|
} else if classAndPairs[0] == "java.lang" {
|
||||||
|
tokens["class"] = "java"
|
||||||
|
} else {
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
pairs := strings.Split(classAndPairs[1], ",")
|
||||||
|
for _, pair := range pairs {
|
||||||
|
p := strings.Split(pair, "=")
|
||||||
|
tokens[p[0]] = p[1]
|
||||||
|
}
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTokensToTags(tokens map[string]string, tags map[string]string) {
|
||||||
|
for k, v := range tokens {
|
||||||
|
if k == "name" {
|
||||||
|
tags["mname"] = v // name seems to a reserved word in influxdb
|
||||||
|
} else if k == "class" || k == "type" {
|
||||||
|
continue // class and type are used in the metric name
|
||||||
|
} else {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||||
|
tags := make(map[string]string)
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
|
||||||
|
a := out["request"].(map[string]interface{})
|
||||||
|
attribute := a["attribute"].(string)
|
||||||
|
mbean := a["mbean"].(string)
|
||||||
|
|
||||||
|
tokens := parseJmxMetricRequest(mbean)
|
||||||
|
addTokensToTags(tokens, tags)
|
||||||
|
tags["cassandra_host"] = j.host
|
||||||
|
|
||||||
|
if _, ok := tags["mname"]; !ok {
|
||||||
|
//Queries for a single value will not return a "name" tag in the response.
|
||||||
|
tags["mname"] = attribute
|
||||||
|
}
|
||||||
|
|
||||||
|
if values, ok := out["value"]; ok {
|
||||||
|
switch t := values.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
|
||||||
|
case interface{}:
|
||||||
|
fields[attribute] = t
|
||||||
|
}
|
||||||
|
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||||
|
j.metric, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addCassandraMetric(mbean string, c cassandraMetric,
|
||||||
|
values map[string]interface{}) {
|
||||||
|
|
||||||
|
tags := make(map[string]string)
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tokens := parseJmxMetricRequest(mbean)
|
||||||
|
addTokensToTags(tokens, tags)
|
||||||
|
tags["cassandra_host"] = c.host
|
||||||
|
addValuesAsFields(values, fields, tags["mname"])
|
||||||
|
c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||||
|
|
||||||
|
r := out["request"]
|
||||||
|
|
||||||
|
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
||||||
|
// Requests with wildcards for keyspace or table names will return nested
|
||||||
|
// maps in the json response
|
||||||
|
if tokens["type"] == "Table" && (tokens["keyspace"] == "*" ||
|
||||||
|
tokens["scope"] == "*") {
|
||||||
|
if valuesMap, ok := out["value"]; ok {
|
||||||
|
for k, v := range valuesMap.(map[string]interface{}) {
|
||||||
|
addCassandraMetric(k, c, v.(map[string]interface{}))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||||
|
c.metric, out)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if values, ok := out["value"]; ok {
|
||||||
|
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
|
||||||
|
c, values.(map[string]interface{}))
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||||
|
c.metric, out)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Cassandra) SampleConfig() string {
|
||||||
|
return `
|
||||||
|
# This is the context root used to compose the jolokia url
|
||||||
|
context = "/jolokia/read"
|
||||||
|
## List of cassandra servers exposing jolokia read service
|
||||||
|
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||||
|
## List of metrics collected on above servers
|
||||||
|
## Each metric consists of a jmx path.
|
||||||
|
## This will collect all heap memory usage metrics from the jvm and
|
||||||
|
## ReadLatency metrics for all keyspaces and tables.
|
||||||
|
## "type=Table" in the query works with Cassandra3.0. Older versions might
|
||||||
|
## need to use "type=ColumnFamily"
|
||||||
|
metrics = [
|
||||||
|
"/java.lang:type=Memory/HeapMemoryUsage",
|
||||||
|
"/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
|
||||||
|
]
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Cassandra) Description() string {
|
||||||
|
return "Read Cassandra metrics through Jolokia"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||||
|
// Create + send request
|
||||||
|
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := j.jClient.MakeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Process response
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||||
|
requestUrl,
|
||||||
|
resp.StatusCode,
|
||||||
|
http.StatusText(resp.StatusCode),
|
||||||
|
http.StatusOK,
|
||||||
|
http.StatusText(http.StatusOK))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// read body
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal json
|
||||||
|
var jsonOut map[string]interface{}
|
||||||
|
if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
|
||||||
|
return nil, errors.New("Error decoding JSON response")
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsonOut, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseServerTokens(server string) map[string]string {
|
||||||
|
serverTokens := make(map[string]string)
|
||||||
|
|
||||||
|
hostAndUser := strings.Split(server, "@")
|
||||||
|
hostPort := ""
|
||||||
|
userPasswd := ""
|
||||||
|
if len(hostAndUser) == 2 {
|
||||||
|
hostPort = hostAndUser[1]
|
||||||
|
userPasswd = hostAndUser[0]
|
||||||
|
} else {
|
||||||
|
hostPort = hostAndUser[0]
|
||||||
|
}
|
||||||
|
hostTokens := strings.Split(hostPort, ":")
|
||||||
|
serverTokens["host"] = hostTokens[0]
|
||||||
|
serverTokens["port"] = hostTokens[1]
|
||||||
|
|
||||||
|
if userPasswd != "" {
|
||||||
|
userTokens := strings.Split(userPasswd, ":")
|
||||||
|
serverTokens["user"] = userTokens[0]
|
||||||
|
serverTokens["passwd"] = userTokens[1]
|
||||||
|
}
|
||||||
|
return serverTokens
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||||
|
context := c.Context
|
||||||
|
servers := c.Servers
|
||||||
|
metrics := c.Metrics
|
||||||
|
|
||||||
|
for _, server := range servers {
|
||||||
|
for _, metric := range metrics {
|
||||||
|
serverTokens := parseServerTokens(server)
|
||||||
|
|
||||||
|
var m jmxMetric
|
||||||
|
if strings.HasPrefix(metric, "/java.lang:") {
|
||||||
|
m = newJavaMetric(serverTokens["host"], metric, acc)
|
||||||
|
} else if strings.HasPrefix(metric,
|
||||||
|
"/org.apache.cassandra.metrics:") {
|
||||||
|
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||||
|
} else {
|
||||||
|
// unsupported metric type
|
||||||
|
log.Printf("Unsupported Cassandra metric [%s], skipping",
|
||||||
|
metric)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare URL
|
||||||
|
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||||
|
serverTokens["port"] + context + metric)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
|
||||||
|
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||||
|
serverTokens["passwd"])
|
||||||
|
}
|
||||||
|
fmt.Printf("host %s url %s\n", serverTokens["host"], requestUrl)
|
||||||
|
|
||||||
|
out, err := c.getAttr(requestUrl)
|
||||||
|
if out["status"] != 200.0 {
|
||||||
|
fmt.Printf("URL returned with status %v\n", out["status"])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m.addTagsFields(out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("cassandra", func() telegraf.Input {
|
||||||
|
return &Cassandra{jClient: &JolokiaClientImpl{client: &http.Client{}}}
|
||||||
|
})
|
||||||
|
}
|
||||||
286
plugins/inputs/cassandra/cassandra_test.go
Normal file
286
plugins/inputs/cassandra/cassandra_test.go
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
package cassandra
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
_ "github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const validJavaMultiValueJSON = `
|
||||||
|
{
|
||||||
|
"request":{
|
||||||
|
"mbean":"java.lang:type=Memory",
|
||||||
|
"attribute":"HeapMemoryUsage",
|
||||||
|
"type":"read"
|
||||||
|
},
|
||||||
|
"value":{
|
||||||
|
"init":67108864,
|
||||||
|
"committed":456130560,
|
||||||
|
"max":477626368,
|
||||||
|
"used":203288528
|
||||||
|
},
|
||||||
|
"timestamp":1446129191,
|
||||||
|
"status":200
|
||||||
|
}`
|
||||||
|
|
||||||
|
const validCassandraMultiValueJSON = `
|
||||||
|
{
|
||||||
|
"request": {
|
||||||
|
"mbean": "org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table,type=Table",
|
||||||
|
"type": "read"},
|
||||||
|
"status": 200,
|
||||||
|
"timestamp": 1458089229,
|
||||||
|
"value": {
|
||||||
|
"999thPercentile": 20.0,
|
||||||
|
"99thPercentile": 10.0,
|
||||||
|
"Count": 400,
|
||||||
|
"DurationUnit": "microseconds",
|
||||||
|
"Max": 30.0,
|
||||||
|
"Mean": null,
|
||||||
|
"MeanRate": 3.0,
|
||||||
|
"Min": 1.0,
|
||||||
|
"RateUnit": "events/second",
|
||||||
|
"StdDev": null
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
const validCassandraNestedMultiValueJSON = `
|
||||||
|
{
|
||||||
|
"request": {
|
||||||
|
"mbean": "org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=*,type=Table",
|
||||||
|
"type": "read"},
|
||||||
|
"status": 200,
|
||||||
|
"timestamp": 1458089184,
|
||||||
|
"value": {
|
||||||
|
"org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table1,type=Table":
|
||||||
|
{ "999thPercentile": 1.0,
|
||||||
|
"Count": 100,
|
||||||
|
"DurationUnit": "microseconds",
|
||||||
|
"OneMinuteRate": 1.0,
|
||||||
|
"RateUnit": "events/second",
|
||||||
|
"StdDev": null
|
||||||
|
},
|
||||||
|
"org.apache.cassandra.metrics:keyspace=test_keyspace2,name=ReadLatency,scope=test_table2,type=Table":
|
||||||
|
{ "999thPercentile": 2.0,
|
||||||
|
"Count": 200,
|
||||||
|
"DurationUnit": "microseconds",
|
||||||
|
"OneMinuteRate": 2.0,
|
||||||
|
"RateUnit": "events/second",
|
||||||
|
"StdDev": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
const validSingleValueJSON = `
|
||||||
|
{
|
||||||
|
"request":{
|
||||||
|
"path":"used",
|
||||||
|
"mbean":"java.lang:type=Memory",
|
||||||
|
"attribute":"HeapMemoryUsage",
|
||||||
|
"type":"read"
|
||||||
|
},
|
||||||
|
"value":209274376,
|
||||||
|
"timestamp":1446129256,
|
||||||
|
"status":200
|
||||||
|
}`
|
||||||
|
|
||||||
|
const validJavaMultiTypeJSON = `
|
||||||
|
{
|
||||||
|
"request":{
|
||||||
|
"mbean":"java.lang:name=ConcurrentMarkSweep,type=GarbageCollector",
|
||||||
|
"attribute":"CollectionCount",
|
||||||
|
"type":"read"
|
||||||
|
},
|
||||||
|
"value":1,
|
||||||
|
"timestamp":1459316570,
|
||||||
|
"status":200
|
||||||
|
}`
|
||||||
|
|
||||||
|
const invalidJSON = "I don't think this is JSON"
|
||||||
|
|
||||||
|
const empty = ""
|
||||||
|
|
||||||
|
var Servers = []string{"10.10.10.10:8778"}
|
||||||
|
var AuthServers = []string{"user:passwd@10.10.10.10:8778"}
|
||||||
|
var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"}
|
||||||
|
var HeapMetric = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||||
|
var ReadLatencyMetric = "/org.apache.cassandra.metrics:type=Table,keyspace=test_keyspace1,scope=test_table,name=ReadLatency"
|
||||||
|
var NestedReadLatencyMetric = "/org.apache.cassandra.metrics:type=Table,keyspace=test_keyspace1,scope=*,name=ReadLatency"
|
||||||
|
var GarbageCollectorMetric1 = "/java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount"
|
||||||
|
var GarbageCollectorMetric2 = "/java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime"
|
||||||
|
var Context = "/jolokia/read"
|
||||||
|
|
||||||
|
type jolokiaClientStub struct {
|
||||||
|
responseBody string
|
||||||
|
statusCode int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||||
|
resp := http.Response{}
|
||||||
|
resp.StatusCode = c.statusCode
|
||||||
|
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||||
|
// Parameters:
|
||||||
|
// response : Body of the response that the mock HTTP client should return
|
||||||
|
// statusCode: HTTP status code the mock HTTP client should return
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||||
|
func genJolokiaClientStub(response string, statusCode int, servers []string, metrics []string) *Cassandra {
|
||||||
|
return &Cassandra{
|
||||||
|
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
|
||||||
|
Context: Context,
|
||||||
|
Servers: servers,
|
||||||
|
Metrics: metrics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the proper values are ignored or collected for class=Java
|
||||||
|
func TestHttpJsonJavaMultiValue(t *testing.T) {
|
||||||
|
cassandra := genJolokiaClientStub(validJavaMultiValueJSON, 200,
|
||||||
|
MultipleServers, []string{HeapMetric})
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
acc.SetDebug(true)
|
||||||
|
err := cassandra.Gather(&acc)
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 2, len(acc.Metrics))
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"HeapMemoryUsage_init": 67108864.0,
|
||||||
|
"HeapMemoryUsage_committed": 456130560.0,
|
||||||
|
"HeapMemoryUsage_max": 477626368.0,
|
||||||
|
"HeapMemoryUsage_used": 203288528.0,
|
||||||
|
}
|
||||||
|
tags1 := map[string]string{
|
||||||
|
"cassandra_host": "10.10.10.10",
|
||||||
|
"mname": "HeapMemoryUsage",
|
||||||
|
}
|
||||||
|
|
||||||
|
tags2 := map[string]string{
|
||||||
|
"cassandra_host": "10.10.10.11",
|
||||||
|
"mname": "HeapMemoryUsage",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags1)
|
||||||
|
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||||
|
cassandra := genJolokiaClientStub(validJavaMultiTypeJSON, 200, AuthServers, []string{GarbageCollectorMetric1, GarbageCollectorMetric2})
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
acc.SetDebug(true)
|
||||||
|
err := cassandra.Gather(&acc)
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 2, len(acc.Metrics))
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"CollectionCount": 1.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"cassandra_host": "10.10.10.10",
|
||||||
|
"mname": "ConcurrentMarkSweep",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "javaGarbageCollector", fields, tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the proper values are ignored or collected
|
||||||
|
func TestHttpJsonOn404(t *testing.T) {
|
||||||
|
|
||||||
|
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
|
||||||
|
[]string{HeapMetric})
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := jolokia.Gather(&acc)
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 0, len(acc.Metrics))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the proper values are ignored or collected for class=Cassandra
|
||||||
|
func TestHttpJsonCassandraMultiValue(t *testing.T) {
|
||||||
|
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
err := cassandra.Gather(&acc)
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 1, len(acc.Metrics))
|
||||||
|
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"ReadLatency_999thPercentile": 20.0,
|
||||||
|
"ReadLatency_99thPercentile": 10.0,
|
||||||
|
"ReadLatency_Count": 400.0,
|
||||||
|
"ReadLatency_DurationUnit": "microseconds",
|
||||||
|
"ReadLatency_Max": 30.0,
|
||||||
|
"ReadLatency_MeanRate": 3.0,
|
||||||
|
"ReadLatency_Min": 1.0,
|
||||||
|
"ReadLatency_RateUnit": "events/second",
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"cassandra_host": "10.10.10.10",
|
||||||
|
"mname": "ReadLatency",
|
||||||
|
"keyspace": "test_keyspace1",
|
||||||
|
"scope": "test_table",
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "cassandraTable", fields, tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the proper values are ignored or collected for class=Cassandra with
|
||||||
|
// nested values
|
||||||
|
func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
|
||||||
|
cassandra := genJolokiaClientStub(validCassandraNestedMultiValueJSON, 200, Servers, []string{NestedReadLatencyMetric})
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
acc.SetDebug(true)
|
||||||
|
err := cassandra.Gather(&acc)
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, 2, len(acc.Metrics))
|
||||||
|
|
||||||
|
fields1 := map[string]interface{}{
|
||||||
|
"ReadLatency_999thPercentile": 1.0,
|
||||||
|
"ReadLatency_Count": 100.0,
|
||||||
|
"ReadLatency_DurationUnit": "microseconds",
|
||||||
|
"ReadLatency_OneMinuteRate": 1.0,
|
||||||
|
"ReadLatency_RateUnit": "events/second",
|
||||||
|
}
|
||||||
|
|
||||||
|
fields2 := map[string]interface{}{
|
||||||
|
"ReadLatency_999thPercentile": 2.0,
|
||||||
|
"ReadLatency_Count": 200.0,
|
||||||
|
"ReadLatency_DurationUnit": "microseconds",
|
||||||
|
"ReadLatency_OneMinuteRate": 2.0,
|
||||||
|
"ReadLatency_RateUnit": "events/second",
|
||||||
|
}
|
||||||
|
|
||||||
|
tags1 := map[string]string{
|
||||||
|
"cassandra_host": "10.10.10.10",
|
||||||
|
"mname": "ReadLatency",
|
||||||
|
"keyspace": "test_keyspace1",
|
||||||
|
"scope": "test_table1",
|
||||||
|
}
|
||||||
|
|
||||||
|
tags2 := map[string]string{
|
||||||
|
"cassandra_host": "10.10.10.10",
|
||||||
|
"mname": "ReadLatency",
|
||||||
|
"keyspace": "test_keyspace2",
|
||||||
|
"scope": "test_table2",
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "cassandraTable", fields1, tags1)
|
||||||
|
acc.AssertContainsTaggedFields(t, "cassandraTable", fields2, tags2)
|
||||||
|
}
|
||||||
109
plugins/inputs/ceph/README.md
Normal file
109
plugins/inputs/ceph/README.md
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
# Ceph Storage Input Plugin
|
||||||
|
|
||||||
|
Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||||
|
|
||||||
|
The plugin works by scanning the configured SocketDir for OSD and MON socket files. When it finds
|
||||||
|
a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump**
|
||||||
|
|
||||||
|
The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are
|
||||||
|
used as collection tags, and all sub-keys are flattened. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"paxos": {
|
||||||
|
"refresh": 9363435,
|
||||||
|
"refresh_latency": {
|
||||||
|
"avgcount": 9363435,
|
||||||
|
"sum": 5378.794002000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Would be parsed into the following metrics, all of which would be tagged with collection=paxos:
|
||||||
|
|
||||||
|
- refresh = 9363435
|
||||||
|
- refresh_latency.avgcount: 9363435
|
||||||
|
- refresh_latency.sum: 5378.794002000
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
|
||||||
|
[[inputs.ceph]]
|
||||||
|
## All configuration values are optional, defaults are shown below
|
||||||
|
|
||||||
|
## location of ceph binary
|
||||||
|
ceph_binary = "/usr/bin/ceph"
|
||||||
|
|
||||||
|
## directory in which to look for socket files
|
||||||
|
socket_dir = "/var/run/ceph"
|
||||||
|
|
||||||
|
## prefix of MON and OSD socket files, used to determine socket type
|
||||||
|
mon_prefix = "ceph-mon"
|
||||||
|
osd_prefix = "ceph-osd"
|
||||||
|
|
||||||
|
## suffix used to identify socket files
|
||||||
|
socket_suffix = "asok"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go.
|
||||||
|
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
All measurements will have the following tags:
|
||||||
|
|
||||||
|
- type: either 'osd' or 'mon' to indicate which type of node was queried
|
||||||
|
- id: a unique string identifier, parsed from the socket file name for the node
|
||||||
|
- collection: the top-level key under which these fields were reported. Possible values are:
|
||||||
|
- for MON nodes:
|
||||||
|
- cluster
|
||||||
|
- leveldb
|
||||||
|
- mon
|
||||||
|
- paxos
|
||||||
|
- throttle-mon_client_bytes
|
||||||
|
- throttle-mon_daemon_bytes
|
||||||
|
- throttle-msgr_dispatch_throttler-mon
|
||||||
|
- for OSD nodes:
|
||||||
|
- WBThrottle
|
||||||
|
- filestore
|
||||||
|
- leveldb
|
||||||
|
- mutex-FileJournal::completions_lock
|
||||||
|
- mutex-FileJournal::finisher_lock
|
||||||
|
- mutex-FileJournal::write_lock
|
||||||
|
- mutex-FileJournal::writeq_lock
|
||||||
|
- mutex-JOS::ApplyManager::apply_lock
|
||||||
|
- mutex-JOS::ApplyManager::com_lock
|
||||||
|
- mutex-JOS::SubmitManager::lock
|
||||||
|
- mutex-WBThrottle::lock
|
||||||
|
- objecter
|
||||||
|
- osd
|
||||||
|
- recoverystate_perf
|
||||||
|
- throttle-filestore_bytes
|
||||||
|
- throttle-filestore_ops
|
||||||
|
- throttle-msgr_dispatch_throttler-client
|
||||||
|
- throttle-msgr_dispatch_throttler-cluster
|
||||||
|
- throttle-msgr_dispatch_throttler-hb_back_server
|
||||||
|
- throttle-msgr_dispatch_throttler-hb_front_serve
|
||||||
|
- throttle-msgr_dispatch_throttler-hbclient
|
||||||
|
- throttle-msgr_dispatch_throttler-ms_objecter
|
||||||
|
- throttle-objecter_bytes
|
||||||
|
- throttle-objecter_ops
|
||||||
|
- throttle-osd_client_bytes
|
||||||
|
- throttle-osd_client_messages
|
||||||
|
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
<pre>
|
||||||
|
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
|
||||||
|
* Plugin: ceph, Collection 1
|
||||||
|
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
|
||||||
|
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
|
||||||
|
> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
|
||||||
|
> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
|
||||||
|
</pre>
|
||||||
249
plugins/inputs/ceph/ceph.go
Normal file
249
plugins/inputs/ceph/ceph.go
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
package ceph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
measurement = "ceph"
|
||||||
|
typeMon = "monitor"
|
||||||
|
typeOsd = "osd"
|
||||||
|
osdPrefix = "ceph-osd"
|
||||||
|
monPrefix = "ceph-mon"
|
||||||
|
sockSuffix = "asok"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Ceph struct {
|
||||||
|
CephBinary string
|
||||||
|
OsdPrefix string
|
||||||
|
MonPrefix string
|
||||||
|
SocketDir string
|
||||||
|
SocketSuffix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ceph) setDefaults() {
|
||||||
|
if c.CephBinary == "" {
|
||||||
|
c.CephBinary = "/usr/bin/ceph"
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.OsdPrefix == "" {
|
||||||
|
c.OsdPrefix = osdPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.MonPrefix == "" {
|
||||||
|
c.MonPrefix = monPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SocketDir == "" {
|
||||||
|
c.SocketDir = "/var/run/ceph"
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SocketSuffix == "" {
|
||||||
|
c.SocketSuffix = sockSuffix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ceph) Description() string {
|
||||||
|
return "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster."
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## All configuration values are optional, defaults are shown below
|
||||||
|
|
||||||
|
## location of ceph binary
|
||||||
|
ceph_binary = "/usr/bin/ceph"
|
||||||
|
|
||||||
|
## directory in which to look for socket files
|
||||||
|
socket_dir = "/var/run/ceph"
|
||||||
|
|
||||||
|
## prefix of MON and OSD socket files, used to determine socket type
|
||||||
|
mon_prefix = "ceph-mon"
|
||||||
|
osd_prefix = "ceph-osd"
|
||||||
|
|
||||||
|
## suffix used to identify socket files
|
||||||
|
socket_suffix = "asok"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (c *Ceph) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ceph) Gather(acc telegraf.Accumulator) error {
|
||||||
|
c.setDefaults()
|
||||||
|
sockets, err := findSockets(c)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range sockets {
|
||||||
|
dump, err := perfDump(c.CephBinary, s)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error reading from socket '%s': %v", s.socket, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
data, err := parseDump(dump)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error parsing dump from socket '%s': %v", s.socket, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for tag, metrics := range *data {
|
||||||
|
acc.AddFields(measurement,
|
||||||
|
map[string]interface{}(metrics),
|
||||||
|
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add(measurement, func() telegraf.Input { return &Ceph{} })
|
||||||
|
}
|
||||||
|
|
||||||
|
var perfDump = func(binary string, socket *socket) (string, error) {
|
||||||
|
cmdArgs := []string{"--admin-daemon", socket.socket}
|
||||||
|
if socket.sockType == typeOsd {
|
||||||
|
cmdArgs = append(cmdArgs, "perf", "dump")
|
||||||
|
} else if socket.sockType == typeMon {
|
||||||
|
cmdArgs = append(cmdArgs, "perfcounters_dump")
|
||||||
|
} else {
|
||||||
|
return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(binary, cmdArgs...)
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error running ceph dump: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var findSockets = func(c *Ceph) ([]*socket, error) {
|
||||||
|
listing, err := ioutil.ReadDir(c.SocketDir)
|
||||||
|
if err != nil {
|
||||||
|
return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err)
|
||||||
|
}
|
||||||
|
sockets := make([]*socket, 0, len(listing))
|
||||||
|
for _, info := range listing {
|
||||||
|
f := info.Name()
|
||||||
|
var sockType string
|
||||||
|
var sockPrefix string
|
||||||
|
if strings.HasPrefix(f, c.MonPrefix) {
|
||||||
|
sockType = typeMon
|
||||||
|
sockPrefix = monPrefix
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(f, c.OsdPrefix) {
|
||||||
|
sockType = typeOsd
|
||||||
|
sockPrefix = osdPrefix
|
||||||
|
|
||||||
|
}
|
||||||
|
if sockType == typeOsd || sockType == typeMon {
|
||||||
|
path := filepath.Join(c.SocketDir, f)
|
||||||
|
sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sockets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSockId(fname, prefix, suffix string) string {
|
||||||
|
s := fname
|
||||||
|
s = strings.TrimPrefix(s, prefix)
|
||||||
|
s = strings.TrimSuffix(s, suffix)
|
||||||
|
s = strings.Trim(s, ".-_")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type socket struct {
|
||||||
|
sockId string
|
||||||
|
sockType string
|
||||||
|
socket string
|
||||||
|
}
|
||||||
|
|
||||||
|
type metric struct {
|
||||||
|
pathStack []string // lifo stack of name components
|
||||||
|
value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pops names of pathStack to build the flattened name for a metric
|
||||||
|
func (m *metric) name() string {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
for i := len(m.pathStack) - 1; i >= 0; i-- {
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
buf.WriteString(".")
|
||||||
|
}
|
||||||
|
buf.WriteString(m.pathStack[i])
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricMap map[string]interface{}
|
||||||
|
|
||||||
|
type taggedMetricMap map[string]metricMap
|
||||||
|
|
||||||
|
// Parses a raw JSON string into a taggedMetricMap
|
||||||
|
// Delegates the actual parsing to newTaggedMetricMap(..)
|
||||||
|
func parseDump(dump string) (*taggedMetricMap, error) {
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
err := json.Unmarshal([]byte(dump), &data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmm := newTaggedMetricMap(data)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builds a TaggedMetricMap out of a generic string map.
|
||||||
|
// The top-level key is used as a tag and all sub-keys are flattened into metrics
|
||||||
|
func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
|
||||||
|
tmm := make(taggedMetricMap)
|
||||||
|
for tag, datapoints := range data {
|
||||||
|
mm := make(metricMap)
|
||||||
|
for _, m := range flatten(datapoints) {
|
||||||
|
mm[m.name()] = m.value
|
||||||
|
}
|
||||||
|
tmm[tag] = mm
|
||||||
|
}
|
||||||
|
return &tmm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively flattens any k-v hierarchy present in data.
|
||||||
|
// Nested keys are flattened into ordered slices associated with a metric value.
|
||||||
|
// The key slices are treated as stacks, and are expected to be reversed and concatenated
|
||||||
|
// when passed as metrics to the accumulator. (see (*metric).name())
|
||||||
|
func flatten(data interface{}) []*metric {
|
||||||
|
var metrics []*metric
|
||||||
|
|
||||||
|
switch val := data.(type) {
|
||||||
|
case float64:
|
||||||
|
metrics = []*metric{&metric{make([]string, 0, 1), val}}
|
||||||
|
case map[string]interface{}:
|
||||||
|
metrics = make([]*metric, 0, len(val))
|
||||||
|
for k, v := range val {
|
||||||
|
for _, m := range flatten(v) {
|
||||||
|
m.pathStack = append(m.pathStack, k)
|
||||||
|
metrics = append(metrics, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Printf("Ignoring unexpected type '%T' for value %v", val, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
}
|
||||||
682
plugins/inputs/ceph/ceph_test.go
Normal file
682
plugins/inputs/ceph/ceph_test.go
Normal file
@@ -0,0 +1,682 @@
|
|||||||
|
package ceph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
epsilon = float64(0.00000001)
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseSockId(t *testing.T) {
|
||||||
|
s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix)
|
||||||
|
assert.Equal(t, s, "1")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseMonDump(t *testing.T) {
|
||||||
|
dump, err := parseDump(monPerfDump)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
|
||||||
|
assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseOsdDump(t *testing.T) {
|
||||||
|
dump, err := parseDump(osdPerfDump)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
|
||||||
|
assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGather(t *testing.T) {
|
||||||
|
saveFind := findSockets
|
||||||
|
saveDump := perfDump
|
||||||
|
defer func() {
|
||||||
|
findSockets = saveFind
|
||||||
|
perfDump = saveDump
|
||||||
|
}()
|
||||||
|
|
||||||
|
findSockets = func(c *Ceph) ([]*socket, error) {
|
||||||
|
return []*socket{&socket{"osd.1", typeOsd, ""}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
perfDump = func(binary string, s *socket) (string, error) {
|
||||||
|
return osdPerfDump, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := &testutil.Accumulator{}
|
||||||
|
c := &Ceph{}
|
||||||
|
c.Gather(acc)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindSockets(t *testing.T) {
|
||||||
|
tmpdir, err := ioutil.TempDir("", "socktest")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
err := os.Remove(tmpdir)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}()
|
||||||
|
c := &Ceph{
|
||||||
|
CephBinary: "foo",
|
||||||
|
SocketDir: tmpdir,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.setDefaults()
|
||||||
|
|
||||||
|
for _, st := range sockTestParams {
|
||||||
|
createTestFiles(tmpdir, st)
|
||||||
|
|
||||||
|
sockets, err := findSockets(c)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
for i := 1; i <= st.osds; i++ {
|
||||||
|
assertFoundSocket(t, tmpdir, typeOsd, i, sockets)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 1; i <= st.mons; i++ {
|
||||||
|
assertFoundSocket(t, tmpdir, typeMon, i, sockets)
|
||||||
|
}
|
||||||
|
cleanupTestFiles(tmpdir, st)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*socket) {
|
||||||
|
var prefix string
|
||||||
|
if sockType == typeOsd {
|
||||||
|
prefix = osdPrefix
|
||||||
|
} else {
|
||||||
|
prefix = monPrefix
|
||||||
|
}
|
||||||
|
expected := path.Join(dir, sockFile(prefix, i))
|
||||||
|
found := false
|
||||||
|
for _, s := range sockets {
|
||||||
|
fmt.Printf("Checking %s\n", s.socket)
|
||||||
|
if s.socket == expected {
|
||||||
|
found = true
|
||||||
|
assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s)
|
||||||
|
assert.Equal(t, s.sockId, strconv.Itoa(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, found, "Did not find socket: %s", expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sockFile(prefix string, i int) string {
|
||||||
|
return strings.Join([]string{prefix, strconv.Itoa(i), sockSuffix}, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestFiles(dir string, st *SockTest) {
|
||||||
|
writeFile := func(prefix string, i int) {
|
||||||
|
f := sockFile(prefix, i)
|
||||||
|
fpath := path.Join(dir, f)
|
||||||
|
ioutil.WriteFile(fpath, []byte(""), 0777)
|
||||||
|
}
|
||||||
|
tstFileApply(st, writeFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupTestFiles(dir string, st *SockTest) {
|
||||||
|
rmFile := func(prefix string, i int) {
|
||||||
|
f := sockFile(prefix, i)
|
||||||
|
fpath := path.Join(dir, f)
|
||||||
|
err := os.Remove(fpath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error removing test file %s: %v\n", fpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tstFileApply(st, rmFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tstFileApply(st *SockTest, fn func(prefix string, i int)) {
|
||||||
|
for i := 1; i <= st.osds; i++ {
|
||||||
|
fn(osdPrefix, i)
|
||||||
|
}
|
||||||
|
for i := 1; i <= st.mons; i++ {
|
||||||
|
fn(monPrefix, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SockTest struct {
|
||||||
|
osds int
|
||||||
|
mons int
|
||||||
|
}
|
||||||
|
|
||||||
|
var sockTestParams = []*SockTest{
|
||||||
|
&SockTest{
|
||||||
|
osds: 2,
|
||||||
|
mons: 2,
|
||||||
|
},
|
||||||
|
&SockTest{
|
||||||
|
mons: 1,
|
||||||
|
},
|
||||||
|
&SockTest{
|
||||||
|
osds: 1,
|
||||||
|
},
|
||||||
|
&SockTest{},
|
||||||
|
}
|
||||||
|
|
||||||
|
var monPerfDump = `
|
||||||
|
{ "cluster": { "num_mon": 2,
|
||||||
|
"num_mon_quorum": 2,
|
||||||
|
"num_osd": 26,
|
||||||
|
"num_osd_up": 26,
|
||||||
|
"num_osd_in": 26,
|
||||||
|
"osd_epoch": 3306,
|
||||||
|
"osd_kb": 11487846448,
|
||||||
|
"osd_kb_used": 5678670180,
|
||||||
|
"osd_kb_avail": 5809176268,
|
||||||
|
"num_pool": 12,
|
||||||
|
"num_pg": 768,
|
||||||
|
"num_pg_active_clean": 768,
|
||||||
|
"num_pg_active": 768,
|
||||||
|
"num_pg_peering": 0,
|
||||||
|
"num_object": 397616,
|
||||||
|
"num_object_degraded": 0,
|
||||||
|
"num_object_unfound": 0,
|
||||||
|
"num_bytes": 2917848227467,
|
||||||
|
"num_mds_up": 0,
|
||||||
|
"num_mds_in": 0,
|
||||||
|
"num_mds_failed": 0,
|
||||||
|
"mds_epoch": 1},
|
||||||
|
"leveldb": { "leveldb_get": 321950312,
|
||||||
|
"leveldb_transaction": 18729922,
|
||||||
|
"leveldb_compact": 0,
|
||||||
|
"leveldb_compact_range": 74141,
|
||||||
|
"leveldb_compact_queue_merge": 0,
|
||||||
|
"leveldb_compact_queue_len": 0},
|
||||||
|
"mon": {},
|
||||||
|
"paxos": { "start_leader": 0,
|
||||||
|
"start_peon": 1,
|
||||||
|
"restart": 4,
|
||||||
|
"refresh": 9363435,
|
||||||
|
"refresh_latency": { "avgcount": 9363435,
|
||||||
|
"sum": 5378.794002000},
|
||||||
|
"begin": 9363435,
|
||||||
|
"begin_keys": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"begin_bytes": { "avgcount": 9363435,
|
||||||
|
"sum": 110468605489},
|
||||||
|
"begin_latency": { "avgcount": 9363435,
|
||||||
|
"sum": 5850.060682000},
|
||||||
|
"commit": 9363435,
|
||||||
|
"commit_keys": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"commit_bytes": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"commit_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"collect": 1,
|
||||||
|
"collect_keys": { "avgcount": 1,
|
||||||
|
"sum": 1},
|
||||||
|
"collect_bytes": { "avgcount": 1,
|
||||||
|
"sum": 24},
|
||||||
|
"collect_latency": { "avgcount": 1,
|
||||||
|
"sum": 0.000280000},
|
||||||
|
"collect_uncommitted": 0,
|
||||||
|
"collect_timeout": 0,
|
||||||
|
"accept_timeout": 0,
|
||||||
|
"lease_ack_timeout": 0,
|
||||||
|
"lease_timeout": 0,
|
||||||
|
"store_state": 9363435,
|
||||||
|
"store_state_keys": { "avgcount": 9363435,
|
||||||
|
"sum": 176572789},
|
||||||
|
"store_state_bytes": { "avgcount": 9363435,
|
||||||
|
"sum": 216355887217},
|
||||||
|
"store_state_latency": { "avgcount": 9363435,
|
||||||
|
"sum": 6866.540527000},
|
||||||
|
"share_state": 0,
|
||||||
|
"share_state_keys": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"share_state_bytes": { "avgcount": 0,
|
||||||
|
"sum": 0},
|
||||||
|
"new_pn": 0,
|
||||||
|
"new_pn_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-mon_client_bytes": { "val": 246,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 896030,
|
||||||
|
"get_sum": 45854374,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 896026,
|
||||||
|
"put_sum": 45854128,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-mon_daemon_bytes": { "val": 0,
|
||||||
|
"max": 419430400,
|
||||||
|
"get": 2773768,
|
||||||
|
"get_sum": 3627676976,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 2773768,
|
||||||
|
"put_sum": 3627676976,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-mon": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 34504949,
|
||||||
|
"get_sum": 226860281124,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 34504949,
|
||||||
|
"put_sum": 226860281124,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}}}
|
||||||
|
`
|
||||||
|
|
||||||
|
var osdPerfDump = `
|
||||||
|
{ "WBThrottle": { "bytes_dirtied": 28405539,
|
||||||
|
"bytes_wb": 0,
|
||||||
|
"ios_dirtied": 93,
|
||||||
|
"ios_wb": 0,
|
||||||
|
"inodes_dirtied": 86,
|
||||||
|
"inodes_wb": 0},
|
||||||
|
"filestore": { "journal_queue_max_ops": 0,
|
||||||
|
"journal_queue_ops": 0,
|
||||||
|
"journal_ops": 1108008,
|
||||||
|
"journal_queue_max_bytes": 0,
|
||||||
|
"journal_queue_bytes": 0,
|
||||||
|
"journal_bytes": 73233416196,
|
||||||
|
"journal_latency": { "avgcount": 1108008,
|
||||||
|
"sum": 290.981036000},
|
||||||
|
"journal_wr": 1091866,
|
||||||
|
"journal_wr_bytes": { "avgcount": 1091866,
|
||||||
|
"sum": 74925682688},
|
||||||
|
"journal_full": 0,
|
||||||
|
"committing": 0,
|
||||||
|
"commitcycle": 110389,
|
||||||
|
"commitcycle_interval": { "avgcount": 110389,
|
||||||
|
"sum": 552132.109360000},
|
||||||
|
"commitcycle_latency": { "avgcount": 110389,
|
||||||
|
"sum": 178.657804000},
|
||||||
|
"op_queue_max_ops": 50,
|
||||||
|
"op_queue_ops": 0,
|
||||||
|
"ops": 1108008,
|
||||||
|
"op_queue_max_bytes": 104857600,
|
||||||
|
"op_queue_bytes": 0,
|
||||||
|
"bytes": 73226768148,
|
||||||
|
"apply_latency": { "avgcount": 1108008,
|
||||||
|
"sum": 947.742722000},
|
||||||
|
"queue_transaction_latency_avg": { "avgcount": 1108008,
|
||||||
|
"sum": 0.511327000}},
|
||||||
|
"leveldb": { "leveldb_get": 4361221,
|
||||||
|
"leveldb_transaction": 4351276,
|
||||||
|
"leveldb_compact": 0,
|
||||||
|
"leveldb_compact_range": 0,
|
||||||
|
"leveldb_compact_queue_merge": 0,
|
||||||
|
"leveldb_compact_queue_len": 0},
|
||||||
|
"mutex-FileJournal::completions_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-FileJournal::finisher_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-FileJournal::write_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-FileJournal::writeq_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-JOS::ApplyManager::apply_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-JOS::ApplyManager::com_lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-JOS::SubmitManager::lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"mutex-WBThrottle::lock": { "wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"objecter": { "op_active": 0,
|
||||||
|
"op_laggy": 0,
|
||||||
|
"op_send": 0,
|
||||||
|
"op_send_bytes": 0,
|
||||||
|
"op_resend": 0,
|
||||||
|
"op_ack": 0,
|
||||||
|
"op_commit": 0,
|
||||||
|
"op": 0,
|
||||||
|
"op_r": 0,
|
||||||
|
"op_w": 0,
|
||||||
|
"op_rmw": 0,
|
||||||
|
"op_pg": 0,
|
||||||
|
"osdop_stat": 0,
|
||||||
|
"osdop_create": 0,
|
||||||
|
"osdop_read": 0,
|
||||||
|
"osdop_write": 0,
|
||||||
|
"osdop_writefull": 0,
|
||||||
|
"osdop_append": 0,
|
||||||
|
"osdop_zero": 0,
|
||||||
|
"osdop_truncate": 0,
|
||||||
|
"osdop_delete": 0,
|
||||||
|
"osdop_mapext": 0,
|
||||||
|
"osdop_sparse_read": 0,
|
||||||
|
"osdop_clonerange": 0,
|
||||||
|
"osdop_getxattr": 0,
|
||||||
|
"osdop_setxattr": 0,
|
||||||
|
"osdop_cmpxattr": 0,
|
||||||
|
"osdop_rmxattr": 0,
|
||||||
|
"osdop_resetxattrs": 0,
|
||||||
|
"osdop_tmap_up": 0,
|
||||||
|
"osdop_tmap_put": 0,
|
||||||
|
"osdop_tmap_get": 0,
|
||||||
|
"osdop_call": 0,
|
||||||
|
"osdop_watch": 0,
|
||||||
|
"osdop_notify": 0,
|
||||||
|
"osdop_src_cmpxattr": 0,
|
||||||
|
"osdop_pgls": 0,
|
||||||
|
"osdop_pgls_filter": 0,
|
||||||
|
"osdop_other": 0,
|
||||||
|
"linger_active": 0,
|
||||||
|
"linger_send": 0,
|
||||||
|
"linger_resend": 0,
|
||||||
|
"poolop_active": 0,
|
||||||
|
"poolop_send": 0,
|
||||||
|
"poolop_resend": 0,
|
||||||
|
"poolstat_active": 0,
|
||||||
|
"poolstat_send": 0,
|
||||||
|
"poolstat_resend": 0,
|
||||||
|
"statfs_active": 0,
|
||||||
|
"statfs_send": 0,
|
||||||
|
"statfs_resend": 0,
|
||||||
|
"command_active": 0,
|
||||||
|
"command_send": 0,
|
||||||
|
"command_resend": 0,
|
||||||
|
"map_epoch": 3300,
|
||||||
|
"map_full": 0,
|
||||||
|
"map_inc": 3293,
|
||||||
|
"osd_sessions": 0,
|
||||||
|
"osd_session_open": 0,
|
||||||
|
"osd_session_close": 0,
|
||||||
|
"osd_laggy": 0},
|
||||||
|
"osd": { "opq": 0,
|
||||||
|
"op_wip": 0,
|
||||||
|
"op": 23939,
|
||||||
|
"op_in_bytes": 1245903961,
|
||||||
|
"op_out_bytes": 29103083856,
|
||||||
|
"op_latency": { "avgcount": 23939,
|
||||||
|
"sum": 440.192015000},
|
||||||
|
"op_process_latency": { "avgcount": 23939,
|
||||||
|
"sum": 30.170685000},
|
||||||
|
"op_r": 23112,
|
||||||
|
"op_r_out_bytes": 29103056146,
|
||||||
|
"op_r_latency": { "avgcount": 23112,
|
||||||
|
"sum": 19.373526000},
|
||||||
|
"op_r_process_latency": { "avgcount": 23112,
|
||||||
|
"sum": 14.625928000},
|
||||||
|
"op_w": 549,
|
||||||
|
"op_w_in_bytes": 1245804358,
|
||||||
|
"op_w_rlat": { "avgcount": 549,
|
||||||
|
"sum": 17.022299000},
|
||||||
|
"op_w_latency": { "avgcount": 549,
|
||||||
|
"sum": 418.494610000},
|
||||||
|
"op_w_process_latency": { "avgcount": 549,
|
||||||
|
"sum": 13.316555000},
|
||||||
|
"op_rw": 278,
|
||||||
|
"op_rw_in_bytes": 99603,
|
||||||
|
"op_rw_out_bytes": 27710,
|
||||||
|
"op_rw_rlat": { "avgcount": 278,
|
||||||
|
"sum": 2.213785000},
|
||||||
|
"op_rw_latency": { "avgcount": 278,
|
||||||
|
"sum": 2.323879000},
|
||||||
|
"op_rw_process_latency": { "avgcount": 278,
|
||||||
|
"sum": 2.228202000},
|
||||||
|
"subop": 1074774,
|
||||||
|
"subop_in_bytes": 26841811636,
|
||||||
|
"subop_latency": { "avgcount": 1074774,
|
||||||
|
"sum": 745.509160000},
|
||||||
|
"subop_w": 0,
|
||||||
|
"subop_w_in_bytes": 26841811636,
|
||||||
|
"subop_w_latency": { "avgcount": 1074774,
|
||||||
|
"sum": 745.509160000},
|
||||||
|
"subop_pull": 0,
|
||||||
|
"subop_pull_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"subop_push": 0,
|
||||||
|
"subop_push_in_bytes": 0,
|
||||||
|
"subop_push_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"pull": 0,
|
||||||
|
"push": 28,
|
||||||
|
"push_out_bytes": 103483392,
|
||||||
|
"push_in": 0,
|
||||||
|
"push_in_bytes": 0,
|
||||||
|
"recovery_ops": 15,
|
||||||
|
"loadavg": 202,
|
||||||
|
"buffer_bytes": 0,
|
||||||
|
"numpg": 18,
|
||||||
|
"numpg_primary": 8,
|
||||||
|
"numpg_replica": 10,
|
||||||
|
"numpg_stray": 0,
|
||||||
|
"heartbeat_to_peers": 10,
|
||||||
|
"heartbeat_from_peers": 0,
|
||||||
|
"map_messages": 7413,
|
||||||
|
"map_message_epochs": 9792,
|
||||||
|
"map_message_epoch_dups": 10105,
|
||||||
|
"messages_delayed_for_map": 83,
|
||||||
|
"stat_bytes": 102123175936,
|
||||||
|
"stat_bytes_used": 49961820160,
|
||||||
|
"stat_bytes_avail": 52161355776,
|
||||||
|
"copyfrom": 0,
|
||||||
|
"tier_promote": 0,
|
||||||
|
"tier_flush": 0,
|
||||||
|
"tier_flush_fail": 0,
|
||||||
|
"tier_try_flush": 0,
|
||||||
|
"tier_try_flush_fail": 0,
|
||||||
|
"tier_evict": 0,
|
||||||
|
"tier_whiteout": 0,
|
||||||
|
"tier_dirty": 230,
|
||||||
|
"tier_clean": 0,
|
||||||
|
"tier_delay": 0,
|
||||||
|
"agent_wake": 0,
|
||||||
|
"agent_skip": 0,
|
||||||
|
"agent_flush": 0,
|
||||||
|
"agent_evict": 0},
|
||||||
|
"recoverystate_perf": { "initial_latency": { "avgcount": 473,
|
||||||
|
"sum": 0.027207000},
|
||||||
|
"started_latency": { "avgcount": 1480,
|
||||||
|
"sum": 9854902.397648000},
|
||||||
|
"reset_latency": { "avgcount": 1953,
|
||||||
|
"sum": 0.096206000},
|
||||||
|
"start_latency": { "avgcount": 1953,
|
||||||
|
"sum": 0.059947000},
|
||||||
|
"primary_latency": { "avgcount": 765,
|
||||||
|
"sum": 4688922.186935000},
|
||||||
|
"peering_latency": { "avgcount": 704,
|
||||||
|
"sum": 1668.652135000},
|
||||||
|
"backfilling_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"waitremotebackfillreserved_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"waitlocalbackfillreserved_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"notbackfilling_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"repnotrecovering_latency": { "avgcount": 462,
|
||||||
|
"sum": 5158922.114600000},
|
||||||
|
"repwaitrecoveryreserved_latency": { "avgcount": 15,
|
||||||
|
"sum": 0.008275000},
|
||||||
|
"repwaitbackfillreserved_latency": { "avgcount": 1,
|
||||||
|
"sum": 0.000095000},
|
||||||
|
"RepRecovering_latency": { "avgcount": 16,
|
||||||
|
"sum": 2274.944727000},
|
||||||
|
"activating_latency": { "avgcount": 514,
|
||||||
|
"sum": 261.008520000},
|
||||||
|
"waitlocalrecoveryreserved_latency": { "avgcount": 20,
|
||||||
|
"sum": 0.175422000},
|
||||||
|
"waitremoterecoveryreserved_latency": { "avgcount": 20,
|
||||||
|
"sum": 0.682778000},
|
||||||
|
"recovering_latency": { "avgcount": 20,
|
||||||
|
"sum": 0.697551000},
|
||||||
|
"recovered_latency": { "avgcount": 511,
|
||||||
|
"sum": 0.011038000},
|
||||||
|
"clean_latency": { "avgcount": 503,
|
||||||
|
"sum": 4686961.154278000},
|
||||||
|
"active_latency": { "avgcount": 506,
|
||||||
|
"sum": 4687223.640464000},
|
||||||
|
"replicaactive_latency": { "avgcount": 446,
|
||||||
|
"sum": 5161197.078966000},
|
||||||
|
"stray_latency": { "avgcount": 794,
|
||||||
|
"sum": 4805.105128000},
|
||||||
|
"getinfo_latency": { "avgcount": 704,
|
||||||
|
"sum": 1138.477937000},
|
||||||
|
"getlog_latency": { "avgcount": 678,
|
||||||
|
"sum": 0.036393000},
|
||||||
|
"waitactingchange_latency": { "avgcount": 69,
|
||||||
|
"sum": 59.172893000},
|
||||||
|
"incomplete_latency": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000},
|
||||||
|
"getmissing_latency": { "avgcount": 609,
|
||||||
|
"sum": 0.012288000},
|
||||||
|
"waitupthru_latency": { "avgcount": 576,
|
||||||
|
"sum": 530.106999000}},
|
||||||
|
"throttle-filestore_bytes": { "val": 0,
|
||||||
|
"max": 0,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-filestore_ops": { "val": 0,
|
||||||
|
"max": 0,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-client": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 130730,
|
||||||
|
"get_sum": 1246039872,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 130730,
|
||||||
|
"put_sum": 1246039872,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-cluster": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 1108033,
|
||||||
|
"get_sum": 71277949992,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 1108033,
|
||||||
|
"put_sum": 71277949992,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-hb_back_server": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 18320575,
|
||||||
|
"get_sum": 861067025,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 18320575,
|
||||||
|
"put_sum": 861067025,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-hb_front_server": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 18320575,
|
||||||
|
"get_sum": 861067025,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 18320575,
|
||||||
|
"put_sum": 861067025,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-hbclient": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 40479394,
|
||||||
|
"get_sum": 1902531518,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 40479394,
|
||||||
|
"put_sum": 1902531518,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-msgr_dispatch_throttler-ms_objecter": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-objecter_bytes": { "val": 0,
|
||||||
|
"max": 104857600,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-objecter_ops": { "val": 0,
|
||||||
|
"max": 1024,
|
||||||
|
"get": 0,
|
||||||
|
"get_sum": 0,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 0,
|
||||||
|
"put_sum": 0,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-osd_client_bytes": { "val": 0,
|
||||||
|
"max": 524288000,
|
||||||
|
"get": 24241,
|
||||||
|
"get_sum": 1241992581,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 25958,
|
||||||
|
"put_sum": 1241992581,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}},
|
||||||
|
"throttle-osd_client_messages": { "val": 0,
|
||||||
|
"max": 100,
|
||||||
|
"get": 49214,
|
||||||
|
"get_sum": 49214,
|
||||||
|
"get_or_fail_fail": 0,
|
||||||
|
"get_or_fail_success": 0,
|
||||||
|
"take": 0,
|
||||||
|
"take_sum": 0,
|
||||||
|
"put": 49214,
|
||||||
|
"put_sum": 49214,
|
||||||
|
"wait": { "avgcount": 0,
|
||||||
|
"sum": 0.000000000}}}
|
||||||
|
`
|
||||||
92
plugins/inputs/chrony/README.md
Normal file
92
plugins/inputs/chrony/README.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# chrony Input Plugin
|
||||||
|
|
||||||
|
Get standard chrony metrics, requires chronyc executable.
|
||||||
|
|
||||||
|
Below is the documentation of the various headers returned by `chronyc tracking`.
|
||||||
|
|
||||||
|
- Reference ID - This is the refid and name (or IP address) if available, of the
|
||||||
|
server to which the computer is currently synchronised. If this is 127.127.1.1
|
||||||
|
it means the computer is not synchronised to any external source and that you
|
||||||
|
have the ‘local’ mode operating (via the local command in chronyc (see section local),
|
||||||
|
or the local directive in the ‘/etc/chrony.conf’ file (see section local)).
|
||||||
|
- Stratum - The stratum indicates how many hops away from a computer with an attached
|
||||||
|
reference clock we are. Such a computer is a stratum-1 computer, so the computer in the
|
||||||
|
example is two hops away (i.e. a.b.c is a stratum-2 and is synchronised from a stratum-1).
|
||||||
|
- Ref time - This is the time (UTC) at which the last measurement from the reference
|
||||||
|
source was processed.
|
||||||
|
- System time - In normal operation, chronyd never steps the system clock, because any
|
||||||
|
jump in the timescale can have adverse consequences for certain application programs.
|
||||||
|
Instead, any error in the system clock is corrected by slightly speeding up or slowing
|
||||||
|
down the system clock until the error has been removed, and then returning to the system
|
||||||
|
clock’s normal speed. A consequence of this is that there will be a period when the
|
||||||
|
system clock (as read by other programs using the gettimeofday() system call, or by the
|
||||||
|
date command in the shell) will be different from chronyd's estimate of the current true
|
||||||
|
time (which it reports to NTP clients when it is operating in server mode). The value
|
||||||
|
reported on this line is the difference due to this effect.
|
||||||
|
- Last offset - This is the estimated local offset on the last clock update.
|
||||||
|
- RMS offset - This is a long-term average of the offset value.
|
||||||
|
- Frequency - The ‘frequency’ is the rate by which the system’s clock would be
|
||||||
|
wrong if chronyd was not correcting it. It is expressed in ppm (parts per million).
|
||||||
|
For example, a value of 1ppm would mean that when the system’s clock thinks it has
|
||||||
|
advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time.
|
||||||
|
- Residual freq - This shows the ‘residual frequency’ for the currently selected
|
||||||
|
reference source. This reflects any difference between what the measurements from the
|
||||||
|
reference source indicate the frequency should be and the frequency currently being used.
|
||||||
|
The reason this is not always zero is that a smoothing procedure is applied to the
|
||||||
|
frequency. Each time a measurement from the reference source is obtained and a new
|
||||||
|
residual frequency computed, the estimated accuracy of this residual is compared with the
|
||||||
|
estimated accuracy (see ‘skew’ next) of the existing frequency value. A weighted average
|
||||||
|
is computed for the new frequency, with weights depending on these accuracies. If the
|
||||||
|
measurements from the reference source follow a consistent trend, the residual will be
|
||||||
|
driven to zero over time.
|
||||||
|
- Skew - This is the estimated error bound on the frequency.
|
||||||
|
- Root delay - This is the total of the network path delays to the stratum-1 computer
|
||||||
|
from which the computer is ultimately synchronised. In certain extreme situations, this
|
||||||
|
value can be negative. (This can arise in a symmetric peer arrangement where the computers’
|
||||||
|
frequencies are not tracking each other and the network delay is very short relative to the
|
||||||
|
turn-around time at each computer.)
|
||||||
|
- Root dispersion - This is the total dispersion accumulated through all the computers
|
||||||
|
back to the stratum-1 computer from which the computer is ultimately synchronised.
|
||||||
|
Dispersion is due to system clock resolution, statistical measurement variations etc.
|
||||||
|
- Leap status - This is the leap status, which can be Normal, Insert second,
|
||||||
|
Delete second or Not synchronised.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Get standard chrony metrics, requires chronyc executable.
|
||||||
|
[[inputs.chrony]]
|
||||||
|
## If true, chronyc tries to perform a DNS lookup for the time server.
|
||||||
|
# dns_lookup = false
|
||||||
|
```
|
||||||
|
|
||||||
|
### Measurements & Fields:
|
||||||
|
|
||||||
|
- chrony
|
||||||
|
- last_offset (float, seconds)
|
||||||
|
- rms_offset (float, seconds)
|
||||||
|
- frequency (float, ppm)
|
||||||
|
- residual_freq (float, ppm)
|
||||||
|
- skew (float, ppm)
|
||||||
|
- root_delay (float, seconds)
|
||||||
|
- root_dispersion (float, seconds)
|
||||||
|
- update_interval (float, seconds)
|
||||||
|
|
||||||
|
### Tags:
|
||||||
|
|
||||||
|
- All measurements have the following tags:
|
||||||
|
- reference_id
|
||||||
|
- stratum
|
||||||
|
- leap_status
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ telegraf -config telegraf.conf -input-filter chrony -test
|
||||||
|
* Plugin: chrony, Collection 1
|
||||||
|
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
129
plugins/inputs/chrony/chrony.go
Normal file
129
plugins/inputs/chrony/chrony.go
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package chrony
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
execCommand = exec.Command // execCommand is used to mock commands in tests.
|
||||||
|
)
|
||||||
|
|
||||||
|
type Chrony struct {
|
||||||
|
DNSLookup bool `toml:"dns_lookup"`
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Chrony) Description() string {
|
||||||
|
return "Get standard chrony metrics, requires chronyc executable."
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Chrony) SampleConfig() string {
|
||||||
|
return `
|
||||||
|
## If true, chronyc tries to perform a DNS lookup for the time server.
|
||||||
|
# dns_lookup = false
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Chrony) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if len(c.path) == 0 {
|
||||||
|
return errors.New("chronyc not found: verify that chrony is installed and that chronyc is in your PATH")
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := []string{}
|
||||||
|
if !c.DNSLookup {
|
||||||
|
flags = append(flags, "-n")
|
||||||
|
}
|
||||||
|
flags = append(flags, "tracking")
|
||||||
|
|
||||||
|
cmd := execCommand(c.path, flags...)
|
||||||
|
out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
|
||||||
|
}
|
||||||
|
fields, tags, err := processChronycOutput(string(out))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
acc.AddFields("chrony", fields, tags)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processChronycOutput takes in a string output from the chronyc command, like:
|
||||||
|
//
|
||||||
|
// Reference ID : 192.168.1.22 (ntp.example.com)
|
||||||
|
// Stratum : 3
|
||||||
|
// Ref time (UTC) : Thu May 12 14:27:07 2016
|
||||||
|
// System time : 0.000020390 seconds fast of NTP time
|
||||||
|
// Last offset : +0.000012651 seconds
|
||||||
|
// RMS offset : 0.000025577 seconds
|
||||||
|
// Frequency : 16.001 ppm slow
|
||||||
|
// Residual freq : -0.000 ppm
|
||||||
|
// Skew : 0.006 ppm
|
||||||
|
// Root delay : 0.001655 seconds
|
||||||
|
// Root dispersion : 0.003307 seconds
|
||||||
|
// Update interval : 507.2 seconds
|
||||||
|
// Leap status : Normal
|
||||||
|
//
|
||||||
|
// The value on the left side of the colon is used as field name, if the first field on
|
||||||
|
// the right side is a float. If it cannot be parsed as float, it is a tag name.
|
||||||
|
//
|
||||||
|
// Ref time is ignored and all names are converted to snake case.
|
||||||
|
//
|
||||||
|
// It returns (<fields>, <tags>)
|
||||||
|
func processChronycOutput(out string) (map[string]interface{}, map[string]string, error) {
|
||||||
|
tags := map[string]string{}
|
||||||
|
fields := map[string]interface{}{}
|
||||||
|
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
stats := strings.Split(line, ":")
|
||||||
|
if len(stats) < 2 {
|
||||||
|
return nil, nil, fmt.Errorf("unexpected output from chronyc, expected ':' in %s", out)
|
||||||
|
}
|
||||||
|
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
|
||||||
|
// ignore reference time
|
||||||
|
if strings.Contains(name, "time") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
valueFields := strings.Fields(stats[1])
|
||||||
|
if len(valueFields) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("unexpected output from chronyc: %s", out)
|
||||||
|
}
|
||||||
|
if strings.Contains(strings.ToLower(name), "stratum") {
|
||||||
|
tags["stratum"] = valueFields[0]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
value, err := strconv.ParseFloat(valueFields[0], 64)
|
||||||
|
if err != nil {
|
||||||
|
tags[name] = strings.ToLower(valueFields[0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(stats[1], "slow") {
|
||||||
|
value = -value
|
||||||
|
}
|
||||||
|
fields[name] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
return fields, tags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
c := Chrony{}
|
||||||
|
path, _ := exec.LookPath("chronyc")
|
||||||
|
if len(path) > 0 {
|
||||||
|
c.path = path
|
||||||
|
}
|
||||||
|
inputs.Add("chrony", func() telegraf.Input {
|
||||||
|
return &c
|
||||||
|
})
|
||||||
|
}
|
||||||
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
3
plugins/inputs/chrony/chrony_notlinux.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package chrony
|
||||||
109
plugins/inputs/chrony/chrony_test.go
Normal file
109
plugins/inputs/chrony/chrony_test.go
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package chrony
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGather(t *testing.T) {
|
||||||
|
c := Chrony{
|
||||||
|
path: "chronyc",
|
||||||
|
}
|
||||||
|
// overwriting exec commands with mock commands
|
||||||
|
execCommand = fakeExecCommand
|
||||||
|
defer func() { execCommand = exec.Command }()
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := c.Gather(&acc)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"reference_id": "192.168.1.22",
|
||||||
|
"leap_status": "normal",
|
||||||
|
"stratum": "3",
|
||||||
|
}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"last_offset": 0.000012651,
|
||||||
|
"rms_offset": 0.000025577,
|
||||||
|
"frequency": -16.001,
|
||||||
|
"residual_freq": 0.0,
|
||||||
|
"skew": 0.006,
|
||||||
|
"root_delay": 0.001655,
|
||||||
|
"root_dispersion": 0.003307,
|
||||||
|
"update_interval": 507.2,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "chrony", fields, tags)
|
||||||
|
|
||||||
|
// test with dns lookup
|
||||||
|
c.DNSLookup = true
|
||||||
|
err = c.Gather(&acc)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
acc.AssertContainsTaggedFields(t, "chrony", fields, tags)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// fackeExecCommand is a helper function that mock
|
||||||
|
// the exec.Command call (and call the test binary)
|
||||||
|
func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||||
|
cs := []string{"-test.run=TestHelperProcess", "--", command}
|
||||||
|
cs = append(cs, args...)
|
||||||
|
cmd := exec.Command(os.Args[0], cs...)
|
||||||
|
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHelperProcess isn't a real test. It's used to mock exec.Command
|
||||||
|
// For example, if you run:
|
||||||
|
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
|
||||||
|
// it returns below mockData.
|
||||||
|
func TestHelperProcess(t *testing.T) {
|
||||||
|
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup := "Reference ID : 192.168.1.22 (ntp.example.com)\n"
|
||||||
|
noLookup := "Reference ID : 192.168.1.22 (192.168.1.22)\n"
|
||||||
|
mockData := `Stratum : 3
|
||||||
|
Ref time (UTC) : Thu May 12 14:27:07 2016
|
||||||
|
System time : 0.000020390 seconds fast of NTP time
|
||||||
|
Last offset : +0.000012651 seconds
|
||||||
|
RMS offset : 0.000025577 seconds
|
||||||
|
Frequency : 16.001 ppm slow
|
||||||
|
Residual freq : -0.000 ppm
|
||||||
|
Skew : 0.006 ppm
|
||||||
|
Root delay : 0.001655 seconds
|
||||||
|
Root dispersion : 0.003307 seconds
|
||||||
|
Update interval : 507.2 seconds
|
||||||
|
Leap status : Normal
|
||||||
|
`
|
||||||
|
|
||||||
|
args := os.Args
|
||||||
|
|
||||||
|
// Previous arguments are tests stuff, that looks like :
|
||||||
|
// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
|
||||||
|
cmd, args := args[3], args[4:]
|
||||||
|
|
||||||
|
if cmd == "chronyc" {
|
||||||
|
if args[0] == "tracking" {
|
||||||
|
fmt.Fprint(os.Stdout, lookup+mockData)
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(os.Stdout, noLookup+mockData)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(os.Stdout, "command not found")
|
||||||
|
os.Exit(1)
|
||||||
|
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user