Compare commits
1189 Commits
v0.1.0
...
0.13.0-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7577be61e6 | ||
|
|
8216831387 | ||
|
|
6a44163a81 | ||
|
|
4c28f15b35 | ||
|
|
095ef04c04 | ||
|
|
7d49979658 | ||
|
|
7a36695a21 | ||
|
|
5865587bd0 | ||
|
|
219bf93566 | ||
|
|
8371546a66 | ||
|
|
0b9b7bddd7 | ||
|
|
4c8449f4bc | ||
|
|
36d7b5c9ab | ||
|
|
f2b0ea6722 | ||
|
|
46f4be88a6 | ||
|
|
6381efa7ce | ||
|
|
85ee66efb9 | ||
|
|
40dccf5b29 | ||
|
|
c114849a31 | ||
|
|
4e9798d0e6 | ||
|
|
a30b1a394f | ||
|
|
91460436cf | ||
|
|
3f807a9432 | ||
|
|
cbe32c7482 | ||
|
|
5d3c582ecf | ||
|
|
3ed006d216 | ||
|
|
3e1026286b | ||
|
|
b59266249d | ||
|
|
015261a524 | ||
|
|
024e1088eb | ||
|
|
08f4b1ae8a | ||
|
|
1390c22004 | ||
|
|
8742ead585 | ||
|
|
59a297abe6 | ||
|
|
18636ea628 | ||
|
|
cf5980ace2 | ||
|
|
a7b0861436 | ||
|
|
89f2c0b0a4 | ||
|
|
ee4f4d7800 | ||
|
|
4de75ce621 | ||
|
|
1c4043ab39 | ||
|
|
44c945b9f5 | ||
|
|
c7719ac365 | ||
|
|
b9c24189e4 | ||
|
|
411d8d7439 | ||
|
|
671b40df2a | ||
|
|
249a860c6f | ||
|
|
0367a39e1f | ||
|
|
1a7340bb02 | ||
|
|
ce7d852d22 | ||
|
|
01b01c5969 | ||
|
|
c159460b2c | ||
|
|
07728d7425 | ||
|
|
d3a25e4dc1 | ||
|
|
1751c35f69 | ||
|
|
93f5b8cc4a | ||
|
|
5b1e59a48c | ||
|
|
7b27cad1ba | ||
|
|
1b083d63ab | ||
|
|
23f2b47531 | ||
|
|
194288c00e | ||
|
|
f9c8ed0dc3 | ||
|
|
88def9b71b | ||
|
|
f818f44693 | ||
|
|
8a395fdb4a | ||
|
|
c0588926b8 | ||
|
|
f1b7ecb2a2 | ||
|
|
4bcf157d88 | ||
|
|
2f7da03cce | ||
|
|
f1c995dcb8 | ||
|
|
9aec58c6b8 | ||
|
|
46aaaa9b70 | ||
|
|
46543d6323 | ||
|
|
a585119a67 | ||
|
|
8cc72368ca | ||
|
|
92e57ee06c | ||
|
|
c737a19d9f | ||
|
|
708a97d773 | ||
|
|
b95a90dbd6 | ||
|
|
a2d1ee08d4 | ||
|
|
7e64dc380f | ||
|
|
046cb6a564 | ||
|
|
644ce9edab | ||
|
|
059b601b13 | ||
|
|
d59999f510 | ||
|
|
c5d31e7527 | ||
|
|
c121e38da6 | ||
|
|
b16bc3d2e3 | ||
|
|
c732abbda2 | ||
|
|
61d681a7c8 | ||
|
|
7828bc09cf | ||
|
|
36d330fea0 | ||
|
|
4d46589d39 | ||
|
|
93f57edd3a | ||
|
|
8ec8ae0587 | ||
|
|
ce94e636bb | ||
|
|
21c7378b61 | ||
|
|
75a9845d20 | ||
|
|
d638f6e411 | ||
|
|
81d0a64d46 | ||
|
|
f76739cb1b | ||
|
|
7f992fd321 | ||
|
|
e428d11add | ||
|
|
0ed5b75a14 | ||
|
|
b1b4adec74 | ||
|
|
1934cc2e62 | ||
|
|
ae8cf8c35e | ||
|
|
f5878eafb9 | ||
|
|
27fe4f7062 | ||
|
|
7ad8b26297 | ||
|
|
1a383b7d90 | ||
|
|
445946792e | ||
|
|
de82c7d5ac | ||
|
|
07f0d561dc | ||
|
|
be379f3dac | ||
|
|
1bf904fe60 | ||
|
|
c6faf005cb | ||
|
|
b534b58542 | ||
|
|
920711533e | ||
|
|
194110433e | ||
|
|
7926396d2a | ||
|
|
797522e8ca | ||
|
|
64b5d1a269 | ||
|
|
d00d3802c9 | ||
|
|
46fff13341 | ||
|
|
be3374a3ef | ||
|
|
264ac0b017 | ||
|
|
17033b3c6c | ||
|
|
c4ea122d66 | ||
|
|
90185dc6b3 | ||
|
|
377b030d88 | ||
|
|
437bd87d7c | ||
|
|
73a7916ce3 | ||
|
|
f947fa86e3 | ||
|
|
7219efbdb7 | ||
|
|
b7435b9cd1 | ||
|
|
207ab5a0d1 | ||
|
|
70aa0ef85d | ||
|
|
dfbe231a51 | ||
|
|
cce35da366 | ||
|
|
1a612bcae9 | ||
|
|
d5b9e003fe | ||
|
|
e19c474a92 | ||
|
|
8274798499 | ||
|
|
9f68a32934 | ||
|
|
5c688daff1 | ||
|
|
741fb1181f | ||
|
|
fd1f05c8e0 | ||
|
|
708cbf937f | ||
|
|
32213cad01 | ||
|
|
9320a6e115 | ||
|
|
0f16c0f4cf | ||
|
|
30464396d9 | ||
|
|
64066c4ea8 | ||
|
|
7e97787d9d | ||
|
|
40f2dd8c6c | ||
|
|
4dd364e1c3 | ||
|
|
03f2a35b31 | ||
|
|
73bd98df57 | ||
|
|
bcf1fc658d | ||
|
|
863cbe512d | ||
|
|
d871e9aee7 | ||
|
|
70ef61ac6d | ||
|
|
a4a140bfad | ||
|
|
d2d91e713a | ||
|
|
d9bb1ceaec | ||
|
|
5fe8903fd2 | ||
|
|
8509101b83 | ||
|
|
357849c348 | ||
|
|
0f1b4e06f5 | ||
|
|
8e041420cd | ||
|
|
9211d22b2b | ||
|
|
f5246eb167 | ||
|
|
e436b2d720 | ||
|
|
51f4e9c0d3 | ||
|
|
8c3371c4ac | ||
|
|
6ff0fc6d83 | ||
|
|
9347a70425 | ||
|
|
91957f0848 | ||
|
|
62105bb353 | ||
|
|
e03f684508 | ||
|
|
2f41ae24f8 | ||
|
|
4ad551be9a | ||
|
|
bd640ae2c5 | ||
|
|
2cfc882c62 | ||
|
|
21ece2d76d | ||
|
|
d055d7f496 | ||
|
|
b1cfb1afe4 | ||
|
|
2f215356d6 | ||
|
|
e07c79259b | ||
|
|
59085f072a | ||
|
|
474d6db42f | ||
|
|
a95710ed0c | ||
|
|
51d7724255 | ||
|
|
276e7629bd | ||
|
|
69606a45e0 | ||
|
|
7f65ffcb15 | ||
|
|
4f5f6761f3 | ||
|
|
f543dbb42f | ||
|
|
5917a42997 | ||
|
|
fbe1664214 | ||
|
|
d09bb13cb6 | ||
|
|
31c323c097 | ||
|
|
8f09aadfdf | ||
|
|
20b4e8c779 | ||
|
|
402a0108ae | ||
|
|
9de4a8efcf | ||
|
|
077fa2e6b9 | ||
|
|
2ae9316f48 | ||
|
|
9b5a90e3b9 | ||
|
|
483942dc41 | ||
|
|
2ddda6457f | ||
|
|
681e695170 | ||
|
|
a043664dc4 | ||
|
|
e940f99646 | ||
|
|
22073042a9 | ||
|
|
2634cc408a | ||
|
|
36446bcbc2 | ||
|
|
b371ec5cf6 | ||
|
|
18f4afb388 | ||
|
|
77dcbe95c0 | ||
|
|
061b749041 | ||
|
|
5b0c3951f6 | ||
|
|
f2394b5a8d | ||
|
|
fe7b884cc9 | ||
|
|
5c1b635229 | ||
|
|
63410491b7 | ||
|
|
26e0a4bbde | ||
|
|
c356e56522 | ||
|
|
fd26bbbd0b | ||
|
|
7aa55371b5 | ||
|
|
ba06533c3e | ||
|
|
d66d66e74b | ||
|
|
d6b5f3efe6 | ||
|
|
b5a431624b | ||
|
|
8e7284de5a | ||
|
|
b2d38cd31c | ||
|
|
a15fed35b7 | ||
|
|
eee6b0059c | ||
|
|
530b4f3bee | ||
|
|
bac1c223de | ||
|
|
57f7582b4d | ||
|
|
5afe819ebd | ||
|
|
59568f5311 | ||
|
|
822706367b | ||
|
|
f8e9fafda3 | ||
|
|
c2bb9db012 | ||
|
|
e4e7d7fbfc | ||
|
|
035e4cf90a | ||
|
|
18fff4a3f5 | ||
|
|
675b6dc305 | ||
|
|
4071c78b2b | ||
|
|
2cb32a683e | ||
|
|
b6dc9c004b | ||
|
|
4ea0c707c1 | ||
|
|
2fbcb5c6d8 | ||
|
|
a4d60d9750 | ||
|
|
d3925890b1 | ||
|
|
8c6c144f28 | ||
|
|
db8c24cc7b | ||
|
|
ecbbb8426f | ||
|
|
0752879fc8 | ||
|
|
3f2a04b25b | ||
|
|
aa15e7916e | ||
|
|
7b09623fa8 | ||
|
|
2f45b8b7f5 | ||
|
|
5ffa2a30be | ||
|
|
b102ae141a | ||
|
|
845abcdd77 | ||
|
|
805db7ca50 | ||
|
|
bd3d0c330f | ||
|
|
0060df9877 | ||
|
|
cd66e203bd | ||
|
|
240f99478a | ||
|
|
41534c73f0 | ||
|
|
6139a69fa8 | ||
|
|
3cca312e61 | ||
|
|
7e312797ec | ||
|
|
fe44fa648a | ||
|
|
3249030257 | ||
|
|
8f98c20c51 | ||
|
|
1c76d5d096 | ||
|
|
35f1e28809 | ||
|
|
20999979de | ||
|
|
c6706a86f1 | ||
|
|
b4b1866286 | ||
|
|
28eb9b4c29 | ||
|
|
0a9accccc1 | ||
|
|
c3d220175f | ||
|
|
095c90ad22 | ||
|
|
a77bfecb02 | ||
|
|
72027b5b3c | ||
|
|
e5503c56ad | ||
|
|
ee7b225272 | ||
|
|
03d37725a9 | ||
|
|
29d1cbb673 | ||
|
|
e81278b800 | ||
|
|
e5482a5725 | ||
|
|
8464be691e | ||
|
|
ed9937bbd8 | ||
|
|
b2a4d4a018 | ||
|
|
74aaf4f75b | ||
|
|
2945f9daa9 | ||
|
|
3b496ab3d8 | ||
|
|
e1f30aeff9 | ||
|
|
a92e73231d | ||
|
|
8d91115623 | ||
|
|
9af8d6912a | ||
|
|
fe43fb47e1 | ||
|
|
ca3a80fbe1 | ||
|
|
f0747e76da | ||
|
|
7416d6ea71 | ||
|
|
ea7cbc781e | ||
|
|
3568fb9f93 | ||
|
|
43b7ce4f6d | ||
|
|
baa38d6266 | ||
|
|
1677960caa | ||
|
|
0fab573c98 | ||
|
|
04a8e5b888 | ||
|
|
6284e2011c | ||
|
|
a97c93abe4 | ||
|
|
664816383a | ||
|
|
fc4cb1654c | ||
|
|
f1fa915985 | ||
|
|
11482a75a1 | ||
|
|
e983d35c25 | ||
|
|
85c4f753ad | ||
|
|
1847ce3f3d | ||
|
|
83c27cc7b1 | ||
|
|
3e8f96a463 | ||
|
|
69e4f16b13 | ||
|
|
918c3fb260 | ||
|
|
54ee44839c | ||
|
|
8362aa9d66 | ||
|
|
2a6ff16819 | ||
|
|
47ad73cc89 | ||
|
|
9687f71a17 | ||
|
|
ed684be18d | ||
|
|
5aef725c13 | ||
|
|
d00550c45f | ||
|
|
9ce8d78835 | ||
|
|
29016822fd | ||
|
|
bb50d7edb4 | ||
|
|
d43d6f2b13 | ||
|
|
636dc27ead | ||
|
|
a18f535f21 | ||
|
|
6994d4a712 | ||
|
|
c9d0ae7cf3 | ||
|
|
9edc25999e | ||
|
|
53c130b704 | ||
|
|
e4e174981d | ||
|
|
584a52ac21 | ||
|
|
f9b5767dae | ||
|
|
3179829fa5 | ||
|
|
187d1b853d | ||
|
|
8d2e5f0bda | ||
|
|
7def6663bd | ||
|
|
a13d19c582 | ||
|
|
1837f83282 | ||
|
|
b14cfd6c64 | ||
|
|
963c51f473 | ||
|
|
1f77b75e14 | ||
|
|
e5f3acd139 | ||
|
|
c8365b3b7e | ||
|
|
29c671ce46 | ||
|
|
38ac9d2ecf | ||
|
|
3573d93855 | ||
|
|
3cc2cda026 | ||
|
|
7d10986f10 | ||
|
|
8c6a6604ce | ||
|
|
7170280401 | ||
|
|
babecb6d49 | ||
|
|
9770802901 | ||
|
|
4c1e817b38 | ||
|
|
52b329be4e | ||
|
|
1d50d62a79 | ||
|
|
07502c9804 | ||
|
|
59e0e49822 | ||
|
|
05170d78be | ||
|
|
88c83277c6 | ||
|
|
d0734b105b | ||
|
|
4860dc148c | ||
|
|
ee468be696 | ||
|
|
7f539c951a | ||
|
|
e495ae9030 | ||
|
|
ccb6b3c64b | ||
|
|
85594cc92e | ||
|
|
0b72612cd2 | ||
|
|
dd086c7830 | ||
|
|
6a601ceb97 | ||
|
|
8236534e3c | ||
|
|
0fef147713 | ||
|
|
0198296ced | ||
|
|
37726a02af | ||
|
|
a9c135488e | ||
|
|
72f5c9b62d | ||
|
|
8d0f50a6fd | ||
|
|
6c353e8b8f | ||
|
|
512d9822f0 | ||
|
|
d003ca46c7 | ||
|
|
7587dc350e | ||
|
|
28664fedb2 | ||
|
|
ef20f05221 | ||
|
|
cabf5d004d | ||
|
|
d551da26e5 | ||
|
|
893357f01e | ||
|
|
fc7fa4b6c5 | ||
|
|
fb75db2f1f | ||
|
|
7c20522a30 | ||
|
|
c09884c686 | ||
|
|
9273782093 | ||
|
|
44ffe29c10 | ||
|
|
e619493ece | ||
|
|
1449c8b887 | ||
|
|
6b06a23102 | ||
|
|
b55a93a3e1 | ||
|
|
f5f43e6d1b | ||
|
|
1e03a9440b | ||
|
|
9a59512f75 | ||
|
|
35150caea4 | ||
|
|
f01da8fee4 | ||
|
|
434c08a357 | ||
|
|
bd9c5b6995 | ||
|
|
b941d270ce | ||
|
|
9406961125 | ||
|
|
0d391b66a3 | ||
|
|
a11e07e250 | ||
|
|
d266dad1f4 | ||
|
|
331b700d1b | ||
|
|
2163fde0a4 | ||
|
|
24a2aaef4b | ||
|
|
042cf517b2 | ||
|
|
b97027ac9a | ||
|
|
4ea3f82e50 | ||
|
|
38c4111e6c | ||
|
|
338341add8 | ||
|
|
93bb679f9d | ||
|
|
40d859354f | ||
|
|
9e7c8df384 | ||
|
|
f088dd7e00 | ||
|
|
10c4e4f63f | ||
|
|
962325cc40 | ||
|
|
a9c33abfa5 | ||
|
|
d835c19fce | ||
|
|
1f1384afc6 | ||
|
|
9d4b55be19 | ||
|
|
c549ab907a | ||
|
|
9c0d14bb60 | ||
|
|
a822d942cd | ||
|
|
3a64a01f91 | ||
|
|
6ebb6bc7ee | ||
|
|
be95dfdd0e | ||
|
|
88890fa7c2 | ||
|
|
f8930b9cbc | ||
|
|
c10227a766 | ||
|
|
7e7e462de1 | ||
|
|
a93e1ceac8 | ||
|
|
7f8469b66a | ||
|
|
cf568487c8 | ||
|
|
4c74a2dd3a | ||
|
|
a70452219b | ||
|
|
47ea2d5fb4 | ||
|
|
16540e35f1 | ||
|
|
3bfb3a9fe2 | ||
|
|
f9517dcf24 | ||
|
|
7878b22b09 | ||
|
|
e6d7e4e309 | ||
|
|
40d0da404e | ||
|
|
8675bd125a | ||
|
|
4e5dfa5d33 | ||
|
|
89f5b77550 | ||
|
|
5b15cd9163 | ||
|
|
dbf1383a38 | ||
|
|
46b367e74b | ||
|
|
3da390682d | ||
|
|
5349a3b6d1 | ||
|
|
f2ab5f61f5 | ||
|
|
e910a03af4 | ||
|
|
4d0dc8b7c8 | ||
|
|
e0dc1ef5bd | ||
|
|
f24f5e98dd | ||
|
|
6647cfc228 | ||
|
|
ddcd99a1ce | ||
|
|
55c07f23b0 | ||
|
|
8192572e23 | ||
|
|
0cdf1b07e9 | ||
|
|
8653bae6ac | ||
|
|
fc1aa7d3b4 | ||
|
|
8bdcd6d576 | ||
|
|
d3925fe578 | ||
|
|
d3a5cca1bc | ||
|
|
f3b553712a | ||
|
|
839651fadb | ||
|
|
6a50fceea4 | ||
|
|
7efe108686 | ||
|
|
c313af1b24 | ||
|
|
1388b1b58b | ||
|
|
551db20657 | ||
|
|
bc71e956a5 | ||
|
|
5af6974796 | ||
|
|
a712036b56 | ||
|
|
37b96c192b | ||
|
|
8cbdf0f907 | ||
|
|
ef5c630d3a | ||
|
|
6eea89f4c0 | ||
|
|
dbbb2d9877 | ||
|
|
c483e16d72 | ||
|
|
40a5bad968 | ||
|
|
1421bce371 | ||
|
|
5e7dd6d51b | ||
|
|
71f4e72b22 | ||
|
|
b24e71b232 | ||
|
|
f60c090e4c | ||
|
|
50334e6bac | ||
|
|
963a9429dd | ||
|
|
2eda8d64c7 | ||
|
|
9b96c62e46 | ||
|
|
378b7467a4 | ||
|
|
c0d98ecd4b | ||
|
|
b44644b6bf | ||
|
|
7bfb42946e | ||
|
|
e8907acd28 | ||
|
|
d6ef3b1e02 | ||
|
|
a39a7a7a03 | ||
|
|
923be102b3 | ||
|
|
7531e218c1 | ||
|
|
3cc1fecb53 | ||
|
|
3c89847489 | ||
|
|
fb837ca66d | ||
|
|
2ec1ffdc11 | ||
|
|
56509a61b9 | ||
|
|
f37f8ac815 | ||
|
|
231b5feb23 | ||
|
|
81fa063338 | ||
|
|
07b4a4dbca | ||
|
|
fd6daaa73b | ||
|
|
6496d185ab | ||
|
|
7499c1f969 | ||
|
|
9c5db1057d | ||
|
|
30d24a3c1c | ||
|
|
ad4af06802 | ||
|
|
64b98a9b61 | ||
|
|
4fdcb136bc | ||
|
|
0e398f5802 | ||
|
|
b9869eadc3 | ||
|
|
936c5a8a7a | ||
|
|
10f19fade1 | ||
|
|
c01594c2a4 | ||
|
|
ccbd7bb785 | ||
|
|
6eb49dee5d | ||
|
|
6a4bf9fcff | ||
|
|
9ada89d51a | ||
|
|
524fddedb4 | ||
|
|
c4a7711e02 | ||
|
|
2e20fc413c | ||
|
|
498482d0f6 | ||
|
|
4bd5b6a4d6 | ||
|
|
2e764cb22d | ||
|
|
c8914679b7 | ||
|
|
e25ac0d587 | ||
|
|
41374aabcb | ||
|
|
f60d846eb3 | ||
|
|
96e54ab326 | ||
|
|
40a3feaad0 | ||
|
|
2611931f82 | ||
|
|
ec39d10695 | ||
|
|
30d8ed411a | ||
|
|
64a832467e | ||
|
|
9c5321c538 | ||
|
|
aba123dae0 | ||
|
|
5aca58ad2a | ||
|
|
a34418d724 | ||
|
|
5f4262921a | ||
|
|
6fcd05b855 | ||
|
|
7746a2b3cd | ||
|
|
2749dcd128 | ||
|
|
92343d91d6 | ||
|
|
ce7b48143a | ||
|
|
e30e98a496 | ||
|
|
4798bd9d33 | ||
|
|
38d6cb97ad | ||
|
|
3be111a160 | ||
|
|
97a66b73cf | ||
|
|
50fc3ec974 | ||
|
|
ee8d99b955 | ||
|
|
5bf7c4d241 | ||
|
|
c2b5f21832 | ||
|
|
bdac9b7241 | ||
|
|
ec6eae9537 | ||
|
|
f607074899 | ||
|
|
0571eecb0c | ||
|
|
4f3d6ddf17 | ||
|
|
34f0c593ad | ||
|
|
97ebcc2af1 | ||
|
|
4852b5c11e | ||
|
|
16ce06f621 | ||
|
|
811a54af6c | ||
|
|
e02973b6f4 | ||
|
|
b91eab6737 | ||
|
|
c89ef84df7 | ||
|
|
e3c8a1131a | ||
|
|
eb78b9268f | ||
|
|
d62e63c448 | ||
|
|
03e66d5b87 | ||
|
|
22afc99f1e | ||
|
|
c83f220fc4 | ||
|
|
0d0a8e9b68 | ||
|
|
bcafadb68a | ||
|
|
9999b2e3c6 | ||
|
|
6c23fb3173 | ||
|
|
e6517d4140 | ||
|
|
00a6dbbe97 | ||
|
|
4cf47dcd0f | ||
|
|
03863bd84d | ||
|
|
7a2eeb7439 | ||
|
|
6fb7d2883d | ||
|
|
a844c1ac74 | ||
|
|
a7b77d9658 | ||
|
|
3509713a23 | ||
|
|
4b3b41fea5 | ||
|
|
2be7fc072f | ||
|
|
ca222a14de | ||
|
|
4aa94ee290 | ||
|
|
5c051eb801 | ||
|
|
3761f00062 | ||
|
|
b705608b04 | ||
|
|
a5f2d5ff21 | ||
|
|
979e5f193a | ||
|
|
8dde60e869 | ||
|
|
224a570a08 | ||
|
|
78f2ea89f8 | ||
|
|
13ccf420d7 | ||
|
|
d47740bd8d | ||
|
|
e2aa0e8a35 | ||
|
|
d505be1fd4 | ||
|
|
40fd33d1b0 | ||
|
|
317a352a65 | ||
|
|
970bfce997 | ||
|
|
a3feddd8ed | ||
|
|
a8294c2c34 | ||
|
|
0823eed546 | ||
|
|
f85bc6e7f7 | ||
|
|
21c4e70f33 | ||
|
|
03a6f28d55 | ||
|
|
19e5d975ca | ||
|
|
5664625f67 | ||
|
|
375045953f | ||
|
|
b10b186cc8 | ||
|
|
bf8e0f4cae | ||
|
|
a6ae597dfc | ||
|
|
b975419bc7 | ||
|
|
0f036d6bec | ||
|
|
20fbfc7006 | ||
|
|
e167b72b16 | ||
|
|
68ef07bff6 | ||
|
|
10a20e208a | ||
|
|
e10394ba3b | ||
|
|
019585f0db | ||
|
|
e619845ffe | ||
|
|
3012928452 | ||
|
|
352ccde52b | ||
|
|
92fb51026a | ||
|
|
acf9c1141a | ||
|
|
a8bcc51071 | ||
|
|
dcd1c6766c | ||
|
|
00ee2529bc | ||
|
|
2af97cdbcb | ||
|
|
1accab02ed | ||
|
|
1a05899be0 | ||
|
|
d54f6be639 | ||
|
|
00614026b3 | ||
|
|
acf1da4d30 | ||
|
|
921ffb7bdb | ||
|
|
b2e22cbc59 | ||
|
|
55c598f9ff | ||
|
|
eabc0875de | ||
|
|
62270a3697 | ||
|
|
40d8aeecb0 | ||
|
|
2daa9ff260 | ||
|
|
25fd4297a8 | ||
|
|
f05d89ed72 | ||
|
|
4c2501be95 | ||
|
|
e2854232d0 | ||
|
|
6794fd06eb | ||
|
|
befc906167 | ||
|
|
422d240afb | ||
|
|
2b966b40f2 | ||
|
|
a992e16f7d | ||
|
|
0398dc1226 | ||
|
|
5592738603 | ||
|
|
688ffd024b | ||
|
|
4ac1c819e0 | ||
|
|
a6e0ae2896 | ||
|
|
cb8499c264 | ||
|
|
d2fb065d0d | ||
|
|
4449f7f2fb | ||
|
|
7cc60dfb8f | ||
|
|
028bae8f04 | ||
|
|
fa9555c430 | ||
|
|
48d11f0a5c | ||
|
|
09a0c3b40f | ||
|
|
e622bd5e7f | ||
|
|
0d31f40e16 | ||
|
|
e13500fc4f | ||
|
|
2a76942a74 | ||
|
|
c73c28de7e | ||
|
|
9e0ec0927c | ||
|
|
23e6715a02 | ||
|
|
f7eae86cdb | ||
|
|
889c0a50a4 | ||
|
|
7d15061984 | ||
|
|
ccbfb038ee | ||
|
|
cb951ebd28 | ||
|
|
d35c78e933 | ||
|
|
e9356c893b | ||
|
|
869483617b | ||
|
|
df96958fb8 | ||
|
|
de7ad9dfbc | ||
|
|
bf1cf4557e | ||
|
|
86d20496ea | ||
|
|
ae7ad2230f | ||
|
|
2007064c47 | ||
|
|
c8852339c9 | ||
|
|
eb0a19062e | ||
|
|
2f08577967 | ||
|
|
891f3af504 | ||
|
|
c5f200917a | ||
|
|
21622a1a17 | ||
|
|
a1067fa4ae | ||
|
|
4395a46190 | ||
|
|
c938523cd5 | ||
|
|
ae10fc7fb4 | ||
|
|
0299a17da1 | ||
|
|
d77cfd6ecc | ||
|
|
03d79996de | ||
|
|
553208a960 | ||
|
|
dfc59866e8 | ||
|
|
ac685d19f8 | ||
|
|
dd2e9e08df | ||
|
|
499b5befd6 | ||
|
|
c26ce9c4fe | ||
|
|
6263bc2d1b | ||
|
|
f7504fb5eb | ||
|
|
6869362f43 | ||
|
|
7600cc87d8 | ||
|
|
3192c78d96 | ||
|
|
c3dad00c1b | ||
|
|
a1bad378d2 | ||
|
|
73f1ed4f25 | ||
|
|
b28b4bd71e | ||
|
|
b15928c95e | ||
|
|
97d4f9e0ff | ||
|
|
0986caf0ad | ||
|
|
9cccf8f88a | ||
|
|
3ae5b4b280 | ||
|
|
62b0e25b84 | ||
|
|
4e5ed9d3b9 | ||
|
|
555436a222 | ||
|
|
6977119f1e | ||
|
|
52be516fa3 | ||
|
|
2dd3eee58e | ||
|
|
d40351286a | ||
|
|
d84a258b0a | ||
|
|
eb2a4dc724 | ||
|
|
316fa1cc01 | ||
|
|
04e2db1f41 | ||
|
|
2f7d781635 | ||
|
|
88ff269370 | ||
|
|
7121e1a3b0 | ||
|
|
8fd06b96d7 | ||
|
|
181c3cdc28 | ||
|
|
ccfa913186 | ||
|
|
2a9f31bfea | ||
|
|
0bc76f094a | ||
|
|
d394003739 | ||
|
|
17dd058308 | ||
|
|
99b1a3071d | ||
|
|
dc38e448bd | ||
|
|
1d1180ec0c | ||
|
|
81539c4ed6 | ||
|
|
cf1dcfe37c | ||
|
|
7293376973 | ||
|
|
d9f1a60a64 | ||
|
|
4f6526e1a5 | ||
|
|
e6ea09f482 | ||
|
|
d620651ef6 | ||
|
|
9221f93be9 | ||
|
|
795ea49093 | ||
|
|
6827459b9f | ||
|
|
e424d47ce6 | ||
|
|
ca0e732331 | ||
|
|
8e52905ea9 | ||
|
|
5cc26bb640 | ||
|
|
fdf00c1be6 | ||
|
|
47258a7093 | ||
|
|
5112d077d5 | ||
|
|
b4e8a23da4 | ||
|
|
63e9a4ae68 | ||
|
|
7e96a9afda | ||
|
|
f5a225f1e0 | ||
|
|
6f4a3816a5 | ||
|
|
29363794c1 | ||
|
|
64a3a718e6 | ||
|
|
b01c28ebc6 | ||
|
|
f5d1aaf7d9 | ||
|
|
f6f45881da | ||
|
|
cd7468f3be | ||
|
|
cd93b9ae0b | ||
|
|
0ffaafd788 | ||
|
|
c6283d1b5a | ||
|
|
24527859e6 | ||
|
|
0c6c5718fe | ||
|
|
c4bbc18cb6 | ||
|
|
6e76759225 | ||
|
|
87ed2d4a21 | ||
|
|
74b3309225 | ||
|
|
1d741cbfc5 | ||
|
|
12420db4b9 | ||
|
|
aad6a7e262 | ||
|
|
b12b804f0a | ||
|
|
64d38ed17e | ||
|
|
f8d64a7378 | ||
|
|
b92a0d5126 | ||
|
|
e0372358df | ||
|
|
1bce6e3faf | ||
|
|
81dd281789 | ||
|
|
f7b38dc270 | ||
|
|
ec9819071a | ||
|
|
72edc3c4fe | ||
|
|
5657e8d1da | ||
|
|
0700e0cf94 | ||
|
|
1cd2db9f8c | ||
|
|
10d411c4f7 | ||
|
|
167b8b8eb8 | ||
|
|
74da03d9fa | ||
|
|
b8a58dad65 | ||
|
|
b012713cf2 | ||
|
|
82d914149e | ||
|
|
450f5e03a5 | ||
|
|
b04706b875 | ||
|
|
10b0438201 | ||
|
|
0270ace3d4 | ||
|
|
bbb27fa484 | ||
|
|
df15e7b379 | ||
|
|
df651ab98e | ||
|
|
dd7a3b37b0 | ||
|
|
94a623c00e | ||
|
|
6cb0f2d392 | ||
|
|
17e165382f | ||
|
|
733ba07312 | ||
|
|
46cd9ff9f5 | ||
|
|
66ed4f7328 | ||
|
|
3be6d84675 | ||
|
|
406e980fae | ||
|
|
211065565f | ||
|
|
d979ee5573 | ||
|
|
c843b53c30 | ||
|
|
5d280e4d25 | ||
|
|
f00d43aa09 | ||
|
|
2e68d3cb3c | ||
|
|
4d6f11b61f | ||
|
|
aac9ba6c1e | ||
|
|
d926a3b5da | ||
|
|
fa5753c579 | ||
|
|
3fa3b2d836 | ||
|
|
76041e84e8 | ||
|
|
19c6572926 | ||
|
|
2217fb8c58 | ||
|
|
50fcb3914d | ||
|
|
9a0c0886ce | ||
|
|
fc41cc9878 | ||
|
|
08b220a1fb | ||
|
|
7e3beaf822 | ||
|
|
d2150efc19 | ||
|
|
380146b75b | ||
|
|
2bf096cfc7 | ||
|
|
cb887dee81 | ||
|
|
2ee7d5eeb6 | ||
|
|
6d6158ff08 | ||
|
|
11126cf4ae | ||
|
|
bd00f46d8b | ||
|
|
d8482cc286 | ||
|
|
f7a4317990 | ||
|
|
a55f6498c8 | ||
|
|
81f4aa9a5d | ||
|
|
3c7c8926fb | ||
|
|
a7ed46160a | ||
|
|
a9b97c7a2b | ||
|
|
0780ad4ad9 | ||
|
|
bf9992b613 | ||
|
|
8c5e1ff0a0 | ||
|
|
b3044a6e2b | ||
|
|
6260dd1018 | ||
|
|
e47801074e | ||
|
|
6d42973d7c | ||
|
|
68e41f130c | ||
|
|
65b33a848e | ||
|
|
5bfb6df0e0 | ||
|
|
13061d1ec7 | ||
|
|
0143a4227e | ||
|
|
3f63bcde12 | ||
|
|
b86c6bba4e | ||
|
|
4d19fc0860 | ||
|
|
9c57c30e57 | ||
|
|
9969c4e810 | ||
|
|
e2bc5d80c9 | ||
|
|
ab191e2b58 | ||
|
|
d418a6e872 | ||
|
|
bdfd1aef62 | ||
|
|
ff2de0c715 | ||
|
|
5b78b1e548 | ||
|
|
d1f965ae30 | ||
|
|
434267898b | ||
|
|
a00510a73c | ||
|
|
846fd31121 | ||
|
|
ab4344a781 | ||
|
|
ac97fefb91 | ||
|
|
8d034f544c | ||
|
|
ca1d2c7000 | ||
|
|
0acf15c025 | ||
|
|
94eed9b43c | ||
|
|
8a6665c03f | ||
|
|
85ae6fffbb | ||
|
|
bd85a36cb1 | ||
|
|
a449e4b47c | ||
|
|
42602a3f35 | ||
|
|
50f902cb02 | ||
|
|
b014ac12ee | ||
|
|
610f24e0cd | ||
|
|
f45f7e56fd | ||
|
|
afe366d6b7 | ||
|
|
1daa059ef9 | ||
|
|
9777aa6165 | ||
|
|
143ec1a019 | ||
|
|
13ee9ff37b | ||
|
|
a3c846b73e | ||
|
|
3d05575e9d | ||
|
|
9d00b5e165 | ||
|
|
a29b39e17a | ||
|
|
8273679634 | ||
|
|
f8c1e953d4 | ||
|
|
532d953b5a | ||
|
|
ecfdafab06 | ||
|
|
9bc39987f1 | ||
|
|
601b444a60 | ||
|
|
4b0671205d | ||
|
|
db634f4c0b | ||
|
|
7d9efd7cff | ||
|
|
06ef2a72c5 | ||
|
|
5e8b6dd164 | ||
|
|
03c7d564d9 | ||
|
|
c3ec3f4bc8 | ||
|
|
7273e2e6f2 | ||
|
|
af770e042a | ||
|
|
07a1bffc60 | ||
|
|
183e79398d | ||
|
|
d98bedd6e1 | ||
|
|
461245c83d | ||
|
|
6fcbb7bdb0 | ||
|
|
2304d03b40 | ||
|
|
4e3213f3bd | ||
|
|
55fb249f6b | ||
|
|
4d614b3088 | ||
|
|
cad0a762a0 | ||
|
|
0ae5075cc9 | ||
|
|
3145a732f2 | ||
|
|
ceaf6fd67a | ||
|
|
c26fa33094 | ||
|
|
b199d7a9fe | ||
|
|
0e65d8e64e | ||
|
|
1e742aec04 | ||
|
|
ba1e4917d1 | ||
|
|
4ce61875a4 | ||
|
|
04963f12a3 | ||
|
|
5d4b6c41a8 | ||
|
|
5cb3a096c1 | ||
|
|
ddf438dac0 | ||
|
|
ed13924c5a | ||
|
|
5cc6f88ade | ||
|
|
32124a7913 | ||
|
|
08042089f9 | ||
|
|
53969ae054 | ||
|
|
16c424de2a | ||
|
|
9e2f8f664b | ||
|
|
343d8f87b4 | ||
|
|
374a0af084 | ||
|
|
9f2e6d6172 | ||
|
|
af647990ab | ||
|
|
9b2b1df7e2 | ||
|
|
abdef7c326 | ||
|
|
b312e48d31 | ||
|
|
48a075529a | ||
|
|
7f22211e4b | ||
|
|
a63c3c8e0b | ||
|
|
bba162c55b | ||
|
|
540ba6d6ae | ||
|
|
29e8ce68e4 | ||
|
|
5691253acd | ||
|
|
cd5c85a245 | ||
|
|
7e1d1c19e6 | ||
|
|
46cdb40800 | ||
|
|
e3c6101b93 | ||
|
|
448aeb9c55 | ||
|
|
5e55104aa6 | ||
|
|
03cd83dc82 | ||
|
|
0cebae8e23 | ||
|
|
38bbe7567a | ||
|
|
fc95e8401a | ||
|
|
b70f821a10 | ||
|
|
95bb21f3f5 | ||
|
|
94741d52ed | ||
|
|
1ac6da4a8b | ||
|
|
090c0a60fa | ||
|
|
e7ca9113bc | ||
|
|
924700f381 | ||
|
|
d280b968d7 | ||
|
|
1d8c7a74d6 | ||
|
|
c1dc77c69c | ||
|
|
3ecb5a20a5 | ||
|
|
0c1460062d | ||
|
|
c0cef8ca43 | ||
|
|
a3e20ab2d6 | ||
|
|
7a23eb69eb | ||
|
|
7da12dc324 | ||
|
|
ed9b43e2cc | ||
|
|
2cd56e43a8 | ||
|
|
91f6c4b740 | ||
|
|
c0249caef9 | ||
|
|
e0d0bc0966 | ||
|
|
24eb7d6bc9 | ||
|
|
48c10f9454 | ||
|
|
d9b208260e | ||
|
|
5dd16399b3 | ||
|
|
96014f8e94 | ||
|
|
5dd14f2ee2 | ||
|
|
ad2e0bc4e3 | ||
|
|
85c61fb684 | ||
|
|
2601a09a83 | ||
|
|
d318ef6df7 | ||
|
|
7ed19de44e | ||
|
|
72652ff16e | ||
|
|
4a12471918 | ||
|
|
32cbbdbf73 | ||
|
|
ab28707d71 | ||
|
|
42a7203b1e | ||
|
|
5259c50612 | ||
|
|
06a84def5f | ||
|
|
d7bda01ccb | ||
|
|
890b2453f8 | ||
|
|
df9e1669cf | ||
|
|
8b491a46f3 | ||
|
|
c698dc9784 | ||
|
|
77dd1e3d45 | ||
|
|
b3cb8d0f53 | ||
|
|
260fc43281 | ||
|
|
b4ef7bb3ed | ||
|
|
816313de30 | ||
|
|
bb7bdffada | ||
|
|
0647666c65 | ||
|
|
8255945ea7 | ||
|
|
2364595697 | ||
|
|
e442d754d0 | ||
|
|
6b510652ed | ||
|
|
9ea5a88f84 | ||
|
|
aa0adc98f9 | ||
|
|
fdd2401f7b | ||
|
|
6b820d91ae | ||
|
|
611ad26d1b | ||
|
|
0911b5b2e8 | ||
|
|
c660ff80bf | ||
|
|
ef098923d6 | ||
|
|
a4f7ffea3f | ||
|
|
c5deb9d557 | ||
|
|
3ff2ea8d4e | ||
|
|
85ecee3525 | ||
|
|
d09e5f37ab | ||
|
|
7edcd7aaf5 | ||
|
|
7def1364b5 | ||
|
|
3b588f0502 | ||
|
|
03c520798e | ||
|
|
c0fa6af51b | ||
|
|
a4d0c47fc6 | ||
|
|
5bf00e87cc | ||
|
|
014ddd76f4 | ||
|
|
6eb4bdcf0e | ||
|
|
b4e032d9c9 | ||
|
|
4ca39dfd1e | ||
|
|
15ef62747a | ||
|
|
ad6dcb478d | ||
|
|
0b7aa65dbf | ||
|
|
e484d4bbf4 | ||
|
|
e6ff9c6cd5 | ||
|
|
fad63b28d1 | ||
|
|
7a075e091d | ||
|
|
0df4708267 | ||
|
|
d5b4e4ba60 | ||
|
|
b717dc0742 | ||
|
|
6ad37267e4 | ||
|
|
22d4d1fb42 | ||
|
|
98b0543b26 | ||
|
|
c0512e720c | ||
|
|
0f6664b260 | ||
|
|
f76f99e789 | ||
|
|
e2d48f42cc | ||
|
|
ec138cae62 | ||
|
|
986b89f5ed | ||
|
|
d799011039 | ||
|
|
0faa1c886a | ||
|
|
cb839d0fe8 | ||
|
|
ec4079733e | ||
|
|
4743c9ab16 | ||
|
|
c4e5e743c4 | ||
|
|
1d23681efe | ||
|
|
56d49f2f4f | ||
|
|
ac54b7cdd1 | ||
|
|
efe2771a34 | ||
|
|
10c4ec74cc | ||
|
|
d90026646f | ||
|
|
9cd1344740 | ||
|
|
c6a9335bf2 | ||
|
|
6c87148cd4 | ||
|
|
3f6c46e1ec | ||
|
|
b3c13b7aef | ||
|
|
55cfd5c904 | ||
|
|
d38f2223a5 | ||
|
|
86145d5eb5 | ||
|
|
ef335d9fd7 | ||
|
|
d2810ddc95 | ||
|
|
037c43cd25 | ||
|
|
aa86c16838 | ||
|
|
ed16a84e0d | ||
|
|
530a60a52d | ||
|
|
48463681fb | ||
|
|
f5a8739b7c | ||
|
|
4471e2bdbb | ||
|
|
63552282d7 | ||
|
|
ae385b336d | ||
|
|
0db55007ab | ||
|
|
d545b197ea | ||
|
|
bbc6fa57fa | ||
|
|
120218f9c6 | ||
|
|
38ee6adcd2 | ||
|
|
1d8e6473c6 | ||
|
|
494704b479 | ||
|
|
d634b08969 | ||
|
|
350f91601c | ||
|
|
659e1cfe85 | ||
|
|
1943d89147 | ||
|
|
aa822756e7 | ||
|
|
073b1084b7 | ||
|
|
5cbe15b676 | ||
|
|
e2cff9febe | ||
|
|
e9ad786578 | ||
|
|
0692b4be61 | ||
|
|
6550d4f634 | ||
|
|
c523ae2c52 | ||
|
|
5e1ba3fbb7 | ||
|
|
6e8a298d21 | ||
|
|
815e9534b8 | ||
|
|
5390a8ea71 | ||
|
|
e34c52402f | ||
|
|
86a6f337f6 | ||
|
|
a1f7d5549b | ||
|
|
5fbd07b146 | ||
|
|
b8f3c68b89 | ||
|
|
043b171028 | ||
|
|
b86d789abe | ||
|
|
e1c7dc80ae | ||
|
|
1fe0791a74 | ||
|
|
0d87eb4725 | ||
|
|
e2dac56a40 | ||
|
|
039fc80ed7 | ||
|
|
8e90a444c2 | ||
|
|
2ccd828e81 | ||
|
|
480f29bde7 | ||
|
|
11a6db8268 | ||
|
|
6566cc51e3 | ||
|
|
051cd03bbf | ||
|
|
0aa0a40d89 | ||
|
|
9dcbe750d1 | ||
|
|
10bf663a3b | ||
|
|
b71cfb7cfd | ||
|
|
87fedcfa74 | ||
|
|
3b9174a322 | ||
|
|
851fdd439f | ||
|
|
ab78e8efec | ||
|
|
b829febe0d | ||
|
|
39c90dd879 | ||
|
|
3a43042089 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
CHANGELOG.md merge=union
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
pkg/
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
.idea
|
||||
*~
|
||||
*#
|
||||
|
||||
704
CHANGELOG.md
Normal file
704
CHANGELOG.md
Normal file
@@ -0,0 +1,704 @@
|
||||
## v0.13 [2016-05-09]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- **Breaking change** in jolokia plugin. See
|
||||
https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md
|
||||
for updated configuration. The plugin will now support proxy mode and will make
|
||||
POST requests.
|
||||
|
||||
- New [agent] configuration option: `metric_batch_size`. This option tells
|
||||
telegraf the maximum batch size to allow to accumulate before sending a flush
|
||||
to the configured outputs. `metric_buffer_limit` now refers to the absolute
|
||||
maximum number of metrics that will accumulate before metrics are dropped.
|
||||
|
||||
- There is no longer an option to
|
||||
`flush_buffer_when_full`, this is now the default and only behavior of telegraf.
|
||||
|
||||
- **Breaking Change**: docker plugin tags. The cont_id tag no longer exists, it
|
||||
will now be a field, and be called container_id. Additionally, cont_image and
|
||||
cont_name are being renamed to container_image and container_name.
|
||||
|
||||
- **Breaking Change**: docker plugin measurements. The `docker_cpu`, `docker_mem`,
|
||||
`docker_blkio` and `docker_net` measurements are being renamed to
|
||||
`docker_container_cpu`, `docker_container_mem`, `docker_container_blkio` and
|
||||
`docker_container_net`. Why? Because these metrics are
|
||||
specifically tracking per-container stats. The problem with per-container stats,
|
||||
in some use-cases, is that if containers are short-lived AND names are not
|
||||
kept consistent, then the series cardinality will balloon very quickly.
|
||||
So adding "container" to each metric will:
|
||||
(1) make it more clear that these metrics are per-container, and
|
||||
(2) allow users to easily drop per-container metrics if cardinality is an
|
||||
issue (`namedrop = ["docker_container_*"]`)
|
||||
|
||||
- `tagexclude` and `taginclude` are now available, which can be used to remove
|
||||
tags from measurements on inputs and outputs. See
|
||||
[the configuration doc](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md)
|
||||
for more details.
|
||||
|
||||
- **Measurement filtering:** All measurement filters now match based on glob
|
||||
only. Previously there was an undocumented behavior where filters would match
|
||||
based on _prefix_ in addition to globs. This means that a filter like
|
||||
`fielddrop = ["time_"]` will need to be changed to `fielddrop = ["time_*"]`
|
||||
|
||||
- **datadog**: measurement and field names will no longer have `_` replaced by `.`
|
||||
|
||||
- The following plugins have changed their tags to _not_ overwrite the host tag:
|
||||
- cassandra: `host -> cassandra_host`
|
||||
- disque: `host -> disque_host`
|
||||
- rethinkdb: `host -> rethinkdb_host`
|
||||
|
||||
- **Breaking Change**: The `win_perf_counters` input has been changed to sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as spaces with underscores. This is needed because Graphite doesn't like slashes and spaces, and was failing to accept metrics that had them. The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
|
||||
- The `disk` input plugin can now be configured with the `HOST_MOUNT_PREFIX` environment variable.
|
||||
This value is prepended to any mountpaths discovered before retrieving stats.
|
||||
It is not included on the report path. This is necessary for reporting host disk stats when running from within a container.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1031](https://github.com/influxdata/telegraf/pull/1031): Jolokia plugin proxy mode. Thanks @saiello!
|
||||
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
|
||||
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
|
||||
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
|
||||
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
|
||||
- [#1056](https://github.com/influxdata/telegraf/pull/1056): Don't allow inputs to overwrite host tags.
|
||||
- [#1035](https://github.com/influxdata/telegraf/issues/1035): Add `user`, `exe`, `pidfile` tags to procstat plugin.
|
||||
- [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin.
|
||||
- [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat.
|
||||
- [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin
|
||||
- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman!
|
||||
- [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers.
|
||||
- [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements.
|
||||
- [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja!
|
||||
- [#1122](https://github.com/influxdata/telegraf/pull/1122): Support setting config path through env variable and default paths.
|
||||
- [#1128](https://github.com/influxdata/telegraf/pull/1128): MongoDB jumbo chunks metric for MongoDB input plugin
|
||||
- [#1146](https://github.com/influxdata/telegraf/pull/1146): HAProxy socket support. Thanks weshmashian!
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1050](https://github.com/influxdata/telegraf/issues/1050): jolokia plugin - do not overwrite host tag. Thanks @saiello!
|
||||
- [#921](https://github.com/influxdata/telegraf/pull/921): mqtt_consumer stops gathering metrics. Thanks @chaton78!
|
||||
- [#1013](https://github.com/influxdata/telegraf/pull/1013): Close dead riemann output connections. Thanks @echupriyanov!
|
||||
- [#1012](https://github.com/influxdata/telegraf/pull/1012): Set default tags in test accumulator.
|
||||
- [#1024](https://github.com/influxdata/telegraf/issues/1024): Don't replace `.` with `_` in datadog output.
|
||||
- [#1058](https://github.com/influxdata/telegraf/issues/1058): Fix possible leaky TCP connections in influxdb output.
|
||||
- [#1044](https://github.com/influxdata/telegraf/pull/1044): Fix SNMP OID possible collisions. Thanks @relip
|
||||
- [#1022](https://github.com/influxdata/telegraf/issues/1022): Dont error deb/rpm install on systemd errors.
|
||||
- [#1078](https://github.com/influxdata/telegraf/issues/1078): Use default AWS credential chain.
|
||||
- [#1070](https://github.com/influxdata/telegraf/issues/1070): SQL Server input. Fix datatype conversion.
|
||||
- [#1089](https://github.com/influxdata/telegraf/issues/1089): Fix leaky TCP connections in phpfpm plugin.
|
||||
- [#914](https://github.com/influxdata/telegraf/issues/914): Telegraf can drop metrics on full buffers.
|
||||
- [#1098](https://github.com/influxdata/telegraf/issues/1098): Sanitize invalid OpenTSDB characters.
|
||||
- [#1110](https://github.com/influxdata/telegraf/pull/1110): Sanitize * to - in graphite serializer. Thanks @goodeggs!
|
||||
- [#1118](https://github.com/influxdata/telegraf/pull/1118): Sanitize Counter names for `win_perf_counters` input.
|
||||
- [#1125](https://github.com/influxdata/telegraf/pull/1125): Wrap all exec command runners with a timeout, so hung os processes don't halt Telegraf.
|
||||
- [#1113](https://github.com/influxdata/telegraf/pull/1113): Set MaxRetry and RequiredAcks defaults in Kafka output.
|
||||
- [#1090](https://github.com/influxdata/telegraf/issues/1090): [agent] and [global_tags] config sometimes not getting applied.
|
||||
- [#1133](https://github.com/influxdata/telegraf/issues/1133): Use a timeout for docker list & stat cmds.
|
||||
- [#1052](https://github.com/influxdata/telegraf/issues/1052): Docker panic fix when decode fails.
|
||||
- [#1136](https://github.com/influxdata/telegraf/pull/1136): "DELAYED" Inserts were deprecated in MySQL 5.6.6. Thanks @PierreF
|
||||
|
||||
## v0.12.1 [2016-04-14]
|
||||
|
||||
### Release Notes
|
||||
- Breaking change in the dovecot input plugin. See Features section below.
|
||||
- Graphite output templates are now supported. See
|
||||
https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
|
||||
- Possible breaking change for the librato and graphite outputs. Telegraf will
|
||||
no longer insert field names when the field is simply named `value`. This is
|
||||
because the `value` field is redundant in the graphite/librato context.
|
||||
|
||||
### Features
|
||||
- [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra!
|
||||
- [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs.
|
||||
- [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener.
|
||||
- [#992](https://github.com/influxdata/telegraf/pull/992): Refactor allocations in TCP/UDP listeners.
|
||||
- [#935](https://github.com/influxdata/telegraf/pull/935): AWS Cloudwatch input plugin. Thanks @joshhardy & @ljosa!
|
||||
- [#943](https://github.com/influxdata/telegraf/pull/943): http_response input plugin. Thanks @Lswith!
|
||||
- [#939](https://github.com/influxdata/telegraf/pull/939): sysstat input plugin. Thanks @zbindenren!
|
||||
- [#998](https://github.com/influxdata/telegraf/pull/998): **breaking change** enabled global, user and ip queries in dovecot plugin. Thanks @mikif70!
|
||||
- [#1001](https://github.com/influxdata/telegraf/pull/1001): Graphite serializer templates.
|
||||
- [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin.
|
||||
|
||||
### Bugfixes
|
||||
- [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name)
|
||||
- [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw!
|
||||
- [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj!
|
||||
- [#645](https://github.com/influxdata/telegraf/issues/645): docker plugin i/o error on closed pipe. Thanks @tripledes!
|
||||
|
||||
## v0.12.0 [2016-04-05]
|
||||
|
||||
### Features
|
||||
- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file.
|
||||
- [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented).
|
||||
- [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension
|
||||
- [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini!
|
||||
- [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert!
|
||||
- [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo!
|
||||
- [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey!
|
||||
- [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues
|
||||
- [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type.
|
||||
- [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama!
|
||||
- [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa!
|
||||
- [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty!
|
||||
- [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert!
|
||||
- [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent.
|
||||
- [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config.
|
||||
- [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug!
|
||||
- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere!
|
||||
|
||||
### Bugfixes
|
||||
- [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided.
|
||||
- [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write.
|
||||
- [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name.
|
||||
- [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue.
|
||||
- [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key.
|
||||
- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic.
|
||||
- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert!
|
||||
- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk!
|
||||
- [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout.
|
||||
- [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF!
|
||||
|
||||
## v0.11.1 [2016-03-17]
|
||||
|
||||
### Release Notes
|
||||
- Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859)
|
||||
|
||||
### Features
|
||||
- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref!
|
||||
- [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou!
|
||||
|
||||
### Bugfixes
|
||||
- [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix
|
||||
- [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic
|
||||
|
||||
## v0.11.0 [2016-03-15]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
- [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies
|
||||
- [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF!
|
||||
- [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide!
|
||||
- [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration.
|
||||
- [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert!
|
||||
- [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert!
|
||||
- [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug!
|
||||
- [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener.
|
||||
- [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions.
|
||||
- [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert!
|
||||
- [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998!
|
||||
- [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert!
|
||||
- [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin.
|
||||
- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.)
|
||||
- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics.
|
||||
|
||||
### Bugfixes
|
||||
- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":"
|
||||
- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty!
|
||||
- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert!
|
||||
- [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78!
|
||||
- [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi!
|
||||
- [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert!
|
||||
- [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package
|
||||
- [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory
|
||||
- [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable.
|
||||
- [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag.
|
||||
- [#821](https://github.com/influxdata/telegraf/issues/821): Remove postgres password from server tag. Thanks @menardorama!
|
||||
|
||||
## v0.10.4.1
|
||||
|
||||
### Release Notes
|
||||
- Bug in the build script broke deb and rpm packages.
|
||||
|
||||
### Bugfixes
|
||||
- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken
|
||||
- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken
|
||||
|
||||
## v0.10.4 [2016-02-24]
|
||||
|
||||
### Release Notes
|
||||
- The pass/drop parameters have been renamed to fielddrop/fieldpass parameters,
|
||||
to more accurately indicate their purpose.
|
||||
- There are also now namedrop/namepass parameters for passing/dropping based
|
||||
on the metric _name_.
|
||||
- Experimental windows builds now available.
|
||||
|
||||
### Features
|
||||
- [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene!
|
||||
- [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion!
|
||||
- [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel!
|
||||
- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF!
|
||||
- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath!
|
||||
|
||||
### Bugfixes
|
||||
- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode.
|
||||
- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters.
|
||||
|
||||
## v0.10.3 [2016-02-18]
|
||||
|
||||
### Release Notes
|
||||
- Users of the `exec` and `kafka_consumer` (and the new `nats_consumer`
|
||||
and `mqtt_consumer` plugins) can now specify the incoming data
|
||||
format that they would like to parse. Currently supports: "json", "influx", and
|
||||
"graphite"
|
||||
- Users of message broker and file output plugins can now choose what data format
|
||||
they would like to output. Currently supports: "influx" and "graphite"
|
||||
- More info on parsing _incoming_ data formats can be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md)
|
||||
- More info on serializing _outgoing_ data formats can be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
|
||||
- Telegraf now has an option `flush_buffer_when_full` that will flush the
|
||||
metric buffer whenever it fills up for each output, rather than dropping
|
||||
points and only flushing on a set time interval. This will default to `true`
|
||||
and is in the `[agent]` config section.
|
||||
|
||||
### Features
|
||||
- [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate!
|
||||
- [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs.
|
||||
- [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70!
|
||||
- [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen!
|
||||
- [#676](https://github.com/influxdata/telegraf/pull/676): MQTT consumer input plugin.
|
||||
- [#683](https://github.com/influxdata/telegraf/pull/683): PostGRES input plugin: add pg_stat_bgwriter. Thanks @menardorama!
|
||||
- [#679](https://github.com/influxdata/telegraf/pull/679): File/stdout output plugin.
|
||||
- [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats.
|
||||
- [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei!
|
||||
- [#650](https://github.com/influxdata/telegraf/pull/650): net_response input plugin. Thanks @titilambert!
|
||||
- [#699](https://github.com/influxdata/telegraf/pull/699): Flush based on buffer size rather than time.
|
||||
- [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes!
|
||||
|
||||
### Bugfixes
|
||||
- [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux.
|
||||
- [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug.
|
||||
- [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues.
|
||||
- [#394](https://github.com/influxdata/telegraf/issues/394): Support HTTP POST. Thanks @gabelev!
|
||||
- [#715](https://github.com/influxdata/telegraf/pull/715): Fix influxdb precision config panic. Thanks @netixen!
|
||||
|
||||
## v0.10.2 [2016-02-04]
|
||||
|
||||
### Release Notes
|
||||
- Statsd timing measurements are now aggregated into a single measurement with
|
||||
fields.
|
||||
- Graphite output now inserts tags into the bucket in alphabetical order.
|
||||
- Normalized TLS/SSL support for output plugins: MQTT, AMQP, Kafka
|
||||
- `verify_ssl` config option was removed from Kafka because it was actually
|
||||
doing the opposite of what it claimed to do (yikes). It's been replaced by
|
||||
`insecure_skip_verify`
|
||||
|
||||
### Features
|
||||
- [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse!
|
||||
- [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type.
|
||||
- [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch!
|
||||
- [#601](https://github.com/influxdata/telegraf/issues/601): Warn when overwriting cached metrics.
|
||||
- [#614](https://github.com/influxdata/telegraf/pull/614): PowerDNS input plugin. Thanks @Kasen!
|
||||
- [#617](https://github.com/influxdata/telegraf/pull/617): exec plugin: parse influx line protocol in addition to JSON.
|
||||
- [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support
|
||||
|
||||
### Bugfixes
|
||||
- [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements.
|
||||
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
|
||||
- [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong.
|
||||
- [#602](https://github.com/influxdata/telegraf/issues/602): Fix statsd field name templating.
|
||||
- [#612](https://github.com/influxdata/telegraf/pull/612): Docker input panic fix if stats received are nil.
|
||||
- [#634](https://github.com/influxdata/telegraf/pull/634): Properly set host headers in httpjson. Thanks @reginaldosousa!
|
||||
|
||||
## v0.10.1 [2016-01-27]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
|
||||
defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
|
||||
successful write to that output occurs.
|
||||
- The docker plugin has been significantly overhauled to add more metrics
|
||||
and allow for docker-machine (incl OSX) support.
|
||||
[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
|
||||
for the latest measurements, fields, and tags. There is also now support for
|
||||
specifying a docker endpoint to get metrics from.
|
||||
|
||||
### Features
|
||||
- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
|
||||
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
|
||||
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
|
||||
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
|
||||
- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
|
||||
- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
|
||||
- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
|
||||
- AMQP SSL support. Thanks @ekini!
|
||||
- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
|
||||
- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
|
||||
- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
|
||||
- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
|
||||
- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
|
||||
- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
|
||||
- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
|
||||
- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
|
||||
- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
|
||||
- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
|
||||
- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
|
||||
- [#589](https://github.com/influxdata/telegraf/pull/589): Microsoft SQL Server input plugin. Thanks @zensqlmonitor!
|
||||
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
|
||||
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
|
||||
|
||||
### Bugfixes
|
||||
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
|
||||
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
|
||||
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
|
||||
- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
|
||||
- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
|
||||
- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
|
||||
- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
|
||||
- [#585](https://github.com/influxdata/telegraf/pull/585): Log stack trace and continue on Telegraf panic. Thanks @wutaizeng!
|
||||
|
||||
## v0.10.0 [2016-01-12]
|
||||
|
||||
### Release Notes
|
||||
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
|
||||
and configuration files are in `/etc/telegraf`
|
||||
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
|
||||
`plugins` is too generic, as there are now also "output plugins", and will likely
|
||||
be "aggregator plugins" and "filter plugins" in the future. Additionally,
|
||||
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
|
||||
directory.
|
||||
- **breaking change** the `io` plugin has been renamed `diskio`
|
||||
- **breaking change** plugin measurements aggregated into a single measurement.
|
||||
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
|
||||
for configuration.
|
||||
- **breaking change** `twemproxy` plugin: `prefix` option removed.
|
||||
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_`
|
||||
instead of only `cpu_`
|
||||
- **breaking change** some command-line flags have been renamed to separate words.
|
||||
`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`,
|
||||
`-outputfilter` -> `-output-filter`
|
||||
- The prometheus plugin schema has not been changed (measurements have not been
|
||||
aggregated).
|
||||
|
||||
### Packaging change note:
|
||||
|
||||
RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their
|
||||
configurations overwritten by the upgrade. There is a backup stored at
|
||||
/etc/telegraf/telegraf.conf.$(date +%s).backup.
|
||||
|
||||
### Features
|
||||
- Plugin measurements aggregated into a single measurement.
|
||||
- Added ability to specify per-plugin tags
|
||||
- Added ability to specify per-plugin measurement suffix and prefix.
|
||||
(`name_prefix` and `name_suffix`)
|
||||
- Added ability to override base plugin measurement name. (`name_override`)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.2.5 [unreleased]
|
||||
|
||||
### Features
|
||||
- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||
|
||||
### Bugfixes
|
||||
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||
|
||||
## v0.2.4 [2015-12-08]
|
||||
|
||||
### Features
|
||||
- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||
- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||
- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
|
||||
- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
|
||||
- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
|
||||
- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
|
||||
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||
|
||||
### Bugfixes
|
||||
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
|
||||
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||
|
||||
## v0.2.3 [2015-11-30]
|
||||
|
||||
### Release Notes
|
||||
- **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`.
|
||||
and most of the config option names have changed.
|
||||
This only affects the kafka consumer _plugin_ (not the
|
||||
output). There were a number of problems with the kafka plugin that led to it
|
||||
only collecting data once at startup, so the kafka plugin was basically non-
|
||||
functional.
|
||||
- Plugins can now be specified as a list, and multiple plugin instances of the
|
||||
same type can be specified, like this:
|
||||
|
||||
```
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
```
|
||||
|
||||
- Riemann output added
|
||||
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
|
||||
|
||||
### Features
|
||||
- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||
- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
|
||||
- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
|
||||
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
|
||||
|
||||
## v0.2.2 [2015-11-18]
|
||||
|
||||
### Release Notes
|
||||
- 0.2.1 has a bug where all lists within plugins get duplicated, this includes
|
||||
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
|
||||
|
||||
### Bugfixes
|
||||
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
|
||||
|
||||
## v0.2.1 [2015-11-16]
|
||||
|
||||
### Release Notes
|
||||
- Telegraf will no longer use docker-compose for "long" unit test, it has been
|
||||
changed to just run docker commands in the Makefile. See `make docker-run` and
|
||||
`make docker-kill`. `make test` will still run all unit tests with docker.
|
||||
- Long unit tests are now run in CircleCI, with docker & race detector
|
||||
- Redis plugin tag has changed from `host` to `server`
|
||||
- HAProxy plugin tag has changed from `host` to `server`
|
||||
- UDP output now supported
|
||||
- Telegraf will now compile on FreeBSD
|
||||
- Users can now specify outputs as lists, specifying multiple outputs of the
|
||||
same type.
|
||||
|
||||
### Features
|
||||
- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||
- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||
- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||
- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
|
||||
- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||
- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||
|
||||
## v0.2.0 [2015-10-27]
|
||||
|
||||
### Release Notes
|
||||
- The -test flag will now only output 2 collections for plugins that need it
|
||||
- There is a new agent configuration option: `flush_interval`. This option tells
|
||||
Telegraf how often to flush data to InfluxDB and other output sinks. For example,
|
||||
users can set `interval = "2s"` and `flush_interval = "60s"` for Telegraf to
|
||||
collect data every 2 seconds, and flush every 60 seconds.
|
||||
- `precision` and `utc` are no longer valid agent config values. `precision` has
|
||||
moved to the `influxdb` output config, where it will continue to default to "s"
|
||||
- debug and test output will now print the raw line-protocol string
|
||||
- Telegraf will now, by default, round the collection interval to the nearest
|
||||
even interval. This means that `interval="10s"` will collect every :00, :10, etc.
|
||||
To ease scale concerns, flushing will be "jittered" by a random amount so that
|
||||
all Telegraf instances do not flush at the same time. Both of these options can
|
||||
be controlled via the `round_interval` and `flush_jitter` config options.
|
||||
- Telegraf will now retry metric flushes twice
|
||||
|
||||
### Features
|
||||
- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
|
||||
- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||
- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||
- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||
- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||
- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||
- Memory plugin: cached and buffered measurements re-added
|
||||
- Logging: additional logging for each collection interval, track the number
|
||||
of metrics collected and from how many inputs.
|
||||
- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||
- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||
- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||
- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||
- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||
- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
|
||||
- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||
- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||
- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||
- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
|
||||
- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
|
||||
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||
- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||
- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||
- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||
- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||
- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||
|
||||
## v0.1.9 [2015-09-22]
|
||||
|
||||
### Release Notes
|
||||
- InfluxDB output config change: `url` is now `urls`, and is a list. Config files
|
||||
will still be backwards compatible if only `url` is specified.
|
||||
- The -test flag will now output two metric collections
|
||||
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
|
||||
allow filtering of output sinks on the command-line using the `-outputfilter`
|
||||
flag, much like how the `-filter` flag works for inputs.
|
||||
- Support for filtering on config-file creation -- Telegraf now supports
|
||||
filtering to -sample-config command. You can now run
|
||||
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
|
||||
file with only the cpu plugin defined, and the influxdb output defined.
|
||||
- **Breaking Change**: The CPU collection plugin has been refactored to fix some
|
||||
bugs and outdated dependency issues. At the same time, I also decided to fix
|
||||
a naming consistency issue, so cpu_percentageIdle will become cpu_usage_idle.
|
||||
Also, all CPU time measurements now have it indicated in their name, so cpu_idle
|
||||
will become cpu_time_idle. Additionally, cpu_time measurements are going to be
|
||||
dropped in the default config.
|
||||
- **Breaking Change**: The memory plugin has been refactored and some measurements
|
||||
have been renamed for consistency. Some measurements have also been removed from being outputted. They are still being collected by gopsutil, and could easily be
|
||||
re-added in a "verbose" mode if there is demand for it.
|
||||
|
||||
### Features
|
||||
- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
|
||||
- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||
- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||
- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||
- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
|
||||
- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
|
||||
and filtering when specifying a config file.
|
||||
|
||||
### Bugfixes
|
||||
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
|
||||
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
|
||||
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||
- Fix net plugin on darwin
|
||||
- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||
- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||
- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||
- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||
- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||
|
||||
## v0.1.8 [2015-09-04]
|
||||
|
||||
### Release Notes
|
||||
- Telegraf will now write data in UTC at second precision by default
|
||||
- Now using Go 1.5 to build telegraf
|
||||
|
||||
### Features
|
||||
- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||
- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||
- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||
- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||
- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
|
||||
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
|
||||
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.1.7 [2015-08-28]
|
||||
|
||||
### Features
|
||||
- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
|
||||
- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||
- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||
- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||
- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
|
||||
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||
- Indent the toml config file for readability
|
||||
|
||||
### Bugfixes
|
||||
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
|
||||
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
|
||||
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||
- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||
|
||||
## v0.1.6 [2015-08-20]
|
||||
|
||||
### Features
|
||||
- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
|
||||
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||
|
||||
### Bugfixes
|
||||
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||
- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||
|
||||
## v0.1.5 [2015-08-13]
|
||||
|
||||
### Features
|
||||
- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||
- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||
- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||
- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||
- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||
- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
|
||||
- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||
- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||
- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
|
||||
- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||
- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||
|
||||
### Bugfixes
|
||||
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
|
||||
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||
- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
|
||||
- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||
|
||||
## v0.1.4 [2015-07-09]
|
||||
|
||||
### Features
|
||||
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||
|
||||
### Bugfixes
|
||||
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||
|
||||
## v0.1.3 [2015-07-05]
|
||||
|
||||
### Features
|
||||
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||
|
||||
## v0.1.2 [2015-07-01]
|
||||
|
||||
### Features
|
||||
- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||
|
||||
### Bugfixes
|
||||
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
|
||||
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||
- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||
- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||
|
||||
## v0.1.1 [2015-06-19]
|
||||
|
||||
### Release Notes
|
||||
|
||||
This is the initial release of Telegraf.
|
||||
300
CONTRIBUTING.md
Normal file
300
CONTRIBUTING.md
Normal file
@@ -0,0 +1,300 @@
|
||||
## Steps for Contributing:
|
||||
|
||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||
1. Make changes or write plugin (see below for details)
|
||||
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
||||
1. If your plugin requires a new Go package,
|
||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
||||
Output plugins READMEs are less structured,
|
||||
but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
|
||||
## GoDoc
|
||||
|
||||
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
|
||||
on the GoDoc
|
||||
|
||||
[](https://godoc.org/github.com/influxdata/telegraf)
|
||||
|
||||
## Sign the CLA
|
||||
|
||||
Before we can merge a pull request, you will need to sign the CLA,
|
||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
## Adding a dependency
|
||||
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
This section is for developers who want to create new collection inputs.
|
||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered and makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new inputs.
|
||||
|
||||
### Input Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `telegraf.Input` interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the
|
||||
current host.
|
||||
|
||||
### Input Plugin Example
|
||||
|
||||
```go
|
||||
package simple
|
||||
|
||||
// simple.go
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo plugin"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
## Input Plugins Accepting Arbitrary Data Formats
|
||||
|
||||
Some input plugins (such as
|
||||
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
|
||||
accept arbitrary input data formats. An overview of these data formats can
|
||||
be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
|
||||
function on the plugin object (see the exec plugin for an example), as well as
|
||||
defining `parser` as a field of the object.
|
||||
|
||||
You can then utilize the parser internally in your plugin, parsing data as you
|
||||
see fit. Telegraf's configuration layer will take care of instantiating and
|
||||
creating the `Parser` object.
|
||||
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
Below is the `Parser` interface.
|
||||
|
||||
```go
|
||||
// Parser is an interface defining functions that a parser plugin must satisfy.
|
||||
type Parser interface {
|
||||
// Parse takes a byte buffer separated by newlines
|
||||
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
|
||||
// and parses it into telegraf metrics
|
||||
Parse(buf []byte) ([]telegraf.Metric, error)
|
||||
|
||||
// ParseLine takes a single string metric
|
||||
// ie, "cpu.usage.idle 90"
|
||||
// and parses it into a telegraf metric.
|
||||
ParseLine(line string) (telegraf.Metric, error)
|
||||
}
|
||||
```
|
||||
|
||||
And you can view the code
|
||||
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
|
||||
|
||||
## Service Input Plugins
|
||||
|
||||
This section is for developers who want to create new "service" collection
|
||||
inputs. A service plugin differs from a regular plugin in that it operates
|
||||
a background service while Telegraf is running. One example would be the `statsd`
|
||||
plugin, which operates a statsd server.
|
||||
|
||||
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
||||
will require threads and locks to verify data integrity. Service Input Plugins should
|
||||
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||
|
||||
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||
and `Stop()` methods.
|
||||
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`inputs.ServiceInput` interface.
|
||||
|
||||
## Output Plugins
|
||||
|
||||
This section is for developers who want to create a new output sink. Outputs
|
||||
are created in a similar manner as collection plugins, and their interface has
|
||||
similar constructs.
|
||||
|
||||
### Output Plugin Guidelines
|
||||
|
||||
* An output must conform to the `outputs.Output` interface.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
output can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this output does.
|
||||
|
||||
### Output Example
|
||||
|
||||
```go
|
||||
package simpleoutput
|
||||
|
||||
// simpleoutput.go
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo output"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "url = localhost"
|
||||
}
|
||||
|
||||
func (s *Simple) Connect() error {
|
||||
// Make a connection to the URL here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Close() error {
|
||||
// Close connection to the URL here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||
for _, pt := range points {
|
||||
// write `pt` to the output sink here
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Output Plugins Writing Arbitrary Data Formats
|
||||
|
||||
Some output plugins (such as
|
||||
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
|
||||
can write arbitrary output data formats. An overview of these data formats can
|
||||
be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
|
||||
|
||||
In order to enable this, you must specify a
|
||||
`SetSerializer(serializer serializers.Serializer)`
|
||||
function on the plugin object (see the file plugin for an example), as well as
|
||||
defining `serializer` as a field of the object.
|
||||
|
||||
You can then utilize the serializer internally in your plugin, serializing data
|
||||
before it's written. Telegraf's configuration layer will take care of
|
||||
instantiating and creating the `Serializer` object.
|
||||
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Service Output Plugins
|
||||
|
||||
This section is for developers who want to create new "service" output. A
|
||||
service output differs from a regular output in that it operates a background service
|
||||
while Telegraf is running. One example would be the `prometheus_client` output,
|
||||
which operates an HTTP server.
|
||||
|
||||
Their interface is quite similar to a regular output, with the addition of `Start()`
|
||||
and `Stop()` methods.
|
||||
|
||||
### Service Output Guidelines
|
||||
|
||||
* Same as the `Output` guidelines, except that they must conform to the
|
||||
`output.ServiceOutput` interface.
|
||||
|
||||
## Unit Tests
|
||||
|
||||
### Execute short tests
|
||||
|
||||
execute `make test-short`
|
||||
|
||||
### Execute long tests
|
||||
|
||||
As Telegraf collects metrics from several third-party services it becomes a
|
||||
difficult task to mock each service as some of them have complicated protocols
|
||||
which would take some time to replicate.
|
||||
|
||||
To overcome this situation we've decided to use docker containers to provide a
|
||||
fast and reproducible environment to test those services which require it.
|
||||
For other situations
|
||||
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
|
||||
a simple mock will suffice.
|
||||
|
||||
To execute Telegraf tests follow these simple steps:
|
||||
|
||||
- Install docker following [these](https://docs.docker.com/installation/)
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||
get the IP address.
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
re-running
|
||||
58
Godeps
Normal file
58
Godeps
Normal file
@@ -0,0 +1,58 @@
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/gobwas/glob d877f6352135181470c40c73ebb81aefa22115fa
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 37d89088411de59a4ef9fc340afa0e89dfcb4ea9
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
59
Godeps_windows
Normal file
59
Godeps_windows
Normal file
@@ -0,0 +1,59 @@
|
||||
github.com/Microsoft/go-winio 9f57cbbcbcb41dea496528872a4f0e37a4f7ae98
|
||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
||||
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
||||
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
||||
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
|
||||
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
|
||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
|
||||
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
108
Makefile
Normal file
108
Makefile
Normal file
@@ -0,0 +1,108 @@
|
||||
UNAME := $(shell sh -c 'uname')
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
# Standard Telegraf build
|
||||
default: prepare build
|
||||
|
||||
# Windows build
|
||||
windows: prepare-windows build-windows
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go install -ldflags "-X main.version=$(VERSION)" ./...
|
||||
|
||||
build-windows:
|
||||
go build -o telegraf.exe -ldflags \
|
||||
"-X main.version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||
"-s -X main.version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -ldflags "-X main.version=$(VERSION)" ./...
|
||||
|
||||
# run package script
|
||||
package:
|
||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
# Use the windows godeps file to prepare dependencies
|
||||
prepare-windows:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore -f Godeps_windows
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
ifeq ($(UNAME), Linux)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||
sleep 60
|
||||
# SUCCESS, running tests
|
||||
go test -race ./...
|
||||
|
||||
# Run "short" unit tests
|
||||
test-short: vet
|
||||
go test -short ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
.PHONY: test test-short vet build default
|
||||
111
PLUGINS.md
111
PLUGINS.md
@@ -1,111 +0,0 @@
|
||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered as well as makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new plugins.
|
||||
|
||||
## Guidelines
|
||||
|
||||
* A plugin must conform to the `plugins.Plugin` interface.
|
||||
* Telegraf promises to run each plugin's Gather function serially. This means
|
||||
developers don't have to worry about thread safety within these functions.
|
||||
* Each generated metric automatically has the name of the plugin that generated
|
||||
it prepended. This is to keep plugins honest.
|
||||
* Plugins should call `plugins.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdb/telegraf/plugins/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
### Plugin interface
|
||||
|
||||
```go
|
||||
type Plugin interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type Accumulator interface {
|
||||
Add(measurement string, value interface{}, tags map[string]string)
|
||||
AddValuesWithTime(measurement string, values map[string]interface{}, tags map[string]string, timestamp time.Time)
|
||||
}
|
||||
```
|
||||
|
||||
### Accumulator
|
||||
|
||||
The way that a plugin emits metrics is by interacting with the Accumulator.
|
||||
|
||||
The `Add` function takes 3 arguments:
|
||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
|
||||
* **value**: A value for the metric. This accepts 5 different types of value:
|
||||
* **int**: The most common type. All int types are accepted but favor using `int64`
|
||||
Useful for counters, etc.
|
||||
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
|
||||
* **string**: Typically used to indicate a message, or some kind of freeform information.
|
||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
|
||||
* **tags**: This is a map of strings to strings to describe the where or who about the metric. For instance, the `net` plugin adds a tag named `"interface"` set to the name of the network interface, like `"eth0"`.
|
||||
|
||||
The `AddValuesWithTime` allows multiple values for a point to be passed. The values
|
||||
used are the same type profile as **value** above. The **timestamp** argument
|
||||
allows a point to be registered as having occurred at an arbitrary time.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the current host.
|
||||
|
||||
```go
|
||||
|
||||
type Process struct {
|
||||
CPUTime float64
|
||||
MemoryBytes int64
|
||||
PID int
|
||||
}
|
||||
|
||||
func Gather(acc plugins.Accumulator) error {
|
||||
for _, process := range system.Processes() {
|
||||
tags := map[string]string {
|
||||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
}
|
||||
|
||||
acc.Add("cpu", process.CPUTime, tags)
|
||||
acc.Add("memoory", process.MemoryBytes, tags)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
```go
|
||||
|
||||
// simple.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins"
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo plugin"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc plugins.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("simple", func() plugins.Plugin { &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
267
README.md
267
README.md
@@ -1,25 +1,258 @@
|
||||
# Telegraf - A native agent for InfluxDB
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf)
|
||||
|
||||
## Quickstart
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||
|
||||
* Build from source or download telegraf (binaries forthcoming)
|
||||
* Run `telegraf -sample-config > telegraf.toml` to create an initial configuration
|
||||
* Edit the configuration to match your needs
|
||||
* Run `telegraf -config telegraf.toml -test` to output one full measurement sample to STDOUT
|
||||
* Run `telegraf -config telegraf.toml` to gather and send metrics to InfluxDB
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
|
||||
## Telegraf Options
|
||||
New input and output plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
new plugins.
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the config. If you don't see an `agent` section run `telegraf -sample-config > telegraf.toml` to create a valid initial configuration:
|
||||
## Installation:
|
||||
|
||||
* **hostname**: The hostname is passed as a tag. By default this will be the value retured by `hostname` on the machine running Telegraf. You can override that value here.
|
||||
* **interval**: How ofter to gather metrics. Uses a simple number + unit parser, ie "10s" for 10 seconds or "5m" for 5 minutes.
|
||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as InfluxDB.
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
## Plugin Options
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_0.13.0-1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1.x86_64.rpm
|
||||
|
||||
There are 3 configuration options that are configurable per plugin:
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_0.13.0-1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1.armhf.rpm
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the current plugin. Each string in the array is tested as a prefix against metrics and if it matches, the metric is emitted.
|
||||
* **drop**: The inverse of pass, if a metric matches, it is not emitted.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single global interval, but if one particular plugin should be run less or more often, you can configure that here.
|
||||
##### Package Instructions:
|
||||
|
||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||
* On sysv systems, the telegraf daemon can be controlled via
|
||||
`service telegraf [action]`
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### yum/apt Repositories:
|
||||
|
||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||
[here](https://docs.influxdata.com/influxdb/v0.10/introduction/installation/#installation)
|
||||
for instructions on setting up the repo. Once it is configured, you will be able
|
||||
to use this repo to install & update telegraf.
|
||||
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_armhf.tar.gz
|
||||
|
||||
##### tarball Instructions:
|
||||
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
sudo tar -C / -zxvf ./telegraf-0.13.0-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-0.13.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_freebsd_amd64.tar.gz
|
||||
|
||||
##### tarball Instructions:
|
||||
|
||||
See linux instructions above.
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
```
|
||||
brew update
|
||||
brew install telegraf
|
||||
```
|
||||
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_windows_amd64.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_windows_i386.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.5+.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get github.com/influxdata/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
## How to use it:
|
||||
|
||||
```console
|
||||
$ telegraf -help
|
||||
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Input Plugins
|
||||
|
||||
Telegraf currently has support for collecting metrics from many sources. For
|
||||
more information on each, please look at the directory of the same name in
|
||||
`plugins/inputs`.
|
||||
|
||||
Currently implemented sources:
|
||||
|
||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch)
|
||||
* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike)
|
||||
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
||||
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
||||
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
||||
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
||||
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
||||
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
||||
* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query)
|
||||
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
|
||||
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
|
||||
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
|
||||
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
|
||||
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
||||
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
||||
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
|
||||
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
|
||||
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
|
||||
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
|
||||
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
|
||||
* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp)
|
||||
* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached)
|
||||
* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos)
|
||||
* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb)
|
||||
* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql)
|
||||
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
||||
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
||||
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
||||
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
||||
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
||||
* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping)
|
||||
* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql)
|
||||
* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible)
|
||||
* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns)
|
||||
* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus)
|
||||
* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent)
|
||||
* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq)
|
||||
* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops)
|
||||
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
|
||||
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
|
||||
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
|
||||
* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source)
|
||||
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
||||
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
||||
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
||||
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
||||
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
||||
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
||||
* [sysstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sysstat)
|
||||
* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system)
|
||||
* cpu
|
||||
* mem
|
||||
* net
|
||||
* netstat
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
* processes
|
||||
* kernel (/proc/stat)
|
||||
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
||||
* [github_webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/github_webhooks)
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
## Supported Output Plugins
|
||||
|
||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb)
|
||||
* [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon)
|
||||
* [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp)
|
||||
* [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis)
|
||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch)
|
||||
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
||||
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
||||
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
||||
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
||||
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
||||
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
|
||||
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
|
||||
* [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see the
|
||||
[contributing guide](CONTRIBUTING.md)
|
||||
for details on contributing a plugin to Telegraf.
|
||||
|
||||
122
Vagrantfile
vendored
122
Vagrantfile
vendored
@@ -1,122 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# All Vagrant configuration is done here. The most common configuration
|
||||
# options are documented and commented below. For a complete reference,
|
||||
# please see the online documentation at vagrantup.com.
|
||||
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
|
||||
# Disable automatic box update checking. If you disable this, then
|
||||
# boxes will only be checked for updates when the user runs
|
||||
# `vagrant box outdated`. This is not recommended.
|
||||
# config.vm.box_check_update = false
|
||||
|
||||
# Create a forwarded port mapping which allows access to a specific port
|
||||
# within the machine from a port on the host machine. In the example below,
|
||||
# accessing "localhost:8080" will access port 80 on the guest machine.
|
||||
# config.vm.network "forwarded_port", guest: 80, host: 8080
|
||||
|
||||
# Create a private network, which allows host-only access to the machine
|
||||
# using a specific IP.
|
||||
# config.vm.network "private_network", ip: "192.168.33.10"
|
||||
|
||||
# Create a public network, which generally matched to bridged network.
|
||||
# Bridged networks make the machine appear as another physical device on
|
||||
# your network.
|
||||
# config.vm.network "public_network"
|
||||
|
||||
# If true, then any SSH connections made will enable agent forwarding.
|
||||
# Default value: false
|
||||
# config.ssh.forward_agent = true
|
||||
|
||||
# Share an additional folder to the guest VM. The first argument is
|
||||
# the path on the host to the actual folder. The second argument is
|
||||
# the path on the guest to mount the folder. And the optional third
|
||||
# argument is a set of non-required options.
|
||||
config.vm.synced_folder "~/go", "/home/vagrant/go"
|
||||
|
||||
# Provider-specific configuration so you can fine-tune various
|
||||
# backing providers for Vagrant. These expose provider-specific options.
|
||||
# Example for VirtualBox:
|
||||
#
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
# # Don't boot with headless mode
|
||||
# vb.gui = true
|
||||
#
|
||||
# # Use VBoxManage to customize the VM. For example to change memory:
|
||||
vb.customize ["modifyvm", :id, "--memory", "1024"]
|
||||
end
|
||||
#
|
||||
# View the documentation for the provider you're using for more
|
||||
# information on available options.
|
||||
|
||||
# Enable provisioning with CFEngine. CFEngine Community packages are
|
||||
# automatically installed. For example, configure the host as a
|
||||
# policy server and optionally a policy file to run:
|
||||
#
|
||||
# config.vm.provision "cfengine" do |cf|
|
||||
# cf.am_policy_hub = true
|
||||
# # cf.run_file = "motd.cf"
|
||||
# end
|
||||
#
|
||||
# You can also configure and bootstrap a client to an existing
|
||||
# policy server:
|
||||
#
|
||||
# config.vm.provision "cfengine" do |cf|
|
||||
# cf.policy_server_address = "10.0.2.15"
|
||||
# end
|
||||
|
||||
# Enable provisioning with Puppet stand alone. Puppet manifests
|
||||
# are contained in a directory path relative to this Vagrantfile.
|
||||
# You will need to create the manifests directory and a manifest in
|
||||
# the file default.pp in the manifests_path directory.
|
||||
#
|
||||
# config.vm.provision "puppet" do |puppet|
|
||||
# puppet.manifests_path = "manifests"
|
||||
# puppet.manifest_file = "site.pp"
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef solo, specifying a cookbooks path, roles
|
||||
# path, and data_bags path (all relative to this Vagrantfile), and adding
|
||||
# some recipes and/or roles.
|
||||
#
|
||||
# config.vm.provision "chef_solo" do |chef|
|
||||
# chef.cookbooks_path = "../my-recipes/cookbooks"
|
||||
# chef.roles_path = "../my-recipes/roles"
|
||||
# chef.data_bags_path = "../my-recipes/data_bags"
|
||||
# chef.add_recipe "mysql"
|
||||
# chef.add_role "web"
|
||||
#
|
||||
# # You may also specify custom JSON attributes:
|
||||
# chef.json = { mysql_password: "foo" }
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef server, specifying the chef server URL,
|
||||
# and the path to the validation key (relative to this Vagrantfile).
|
||||
#
|
||||
# The Opscode Platform uses HTTPS. Substitute your organization for
|
||||
# ORGNAME in the URL and validation key.
|
||||
#
|
||||
# If you have your own Chef Server, use the appropriate URL, which may be
|
||||
# HTTP instead of HTTPS depending on your configuration. Also change the
|
||||
# validation key to validation.pem.
|
||||
#
|
||||
# config.vm.provision "chef_client" do |chef|
|
||||
# chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
|
||||
# chef.validation_key_path = "ORGNAME-validator.pem"
|
||||
# end
|
||||
#
|
||||
# If you're using the Opscode platform, your validator client is
|
||||
# ORGNAME-validator, replacing ORGNAME with your organization name.
|
||||
#
|
||||
# If you have your own Chef Server, the default validation client name is
|
||||
# chef-validator, unless you changed the configuration.
|
||||
#
|
||||
# chef.validation_client_name = "ORGNAME-validator"
|
||||
end
|
||||
100
accumulator.go
100
accumulator.go
@@ -1,91 +1,21 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
import "time"
|
||||
|
||||
"github.com/influxdb/influxdb/client"
|
||||
)
|
||||
type Accumulator interface {
|
||||
// Create a point with a value, decorating it with tags
|
||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||
// it after passing to Add.
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
type BatchPoints struct {
|
||||
client.BatchPoints
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
Debug bool
|
||||
|
||||
Prefix string
|
||||
|
||||
Config *ConfiguredPlugin
|
||||
}
|
||||
|
||||
func (bp *BatchPoints) Add(measurement string, val interface{}, tags map[string]string) {
|
||||
measurement = bp.Prefix + measurement
|
||||
|
||||
if bp.Config != nil {
|
||||
if !bp.Config.ShouldPass(measurement) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if bp.Debug {
|
||||
var tg []string
|
||||
|
||||
for k, v := range tags {
|
||||
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tg)
|
||||
|
||||
fmt.Printf("> [%s] %s value=%v\n", strings.Join(tg, " "), measurement, val)
|
||||
}
|
||||
|
||||
bp.Points = append(bp.Points, client.Point{
|
||||
Measurement: measurement,
|
||||
Tags: tags,
|
||||
Fields: map[string]interface{}{
|
||||
"value": val,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BatchPoints) AddValuesWithTime(
|
||||
measurement string,
|
||||
values map[string]interface{},
|
||||
tags map[string]string,
|
||||
timestamp time.Time,
|
||||
) {
|
||||
measurement = bp.Prefix + measurement
|
||||
|
||||
if bp.Config != nil {
|
||||
if !bp.Config.ShouldPass(measurement) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if bp.Debug {
|
||||
var tg []string
|
||||
|
||||
for k, v := range tags {
|
||||
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
|
||||
}
|
||||
|
||||
var vals []string
|
||||
|
||||
for k, v := range values {
|
||||
vals = append(vals, fmt.Sprintf("%s=%v", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tg)
|
||||
sort.Strings(vals)
|
||||
|
||||
fmt.Printf("> [%s] %s %s\n", strings.Join(tg, " "), measurement, strings.Join(vals, " "))
|
||||
}
|
||||
|
||||
bp.Points = append(bp.Points, client.Point{
|
||||
Measurement: measurement,
|
||||
Tags: tags,
|
||||
Fields: values,
|
||||
Time: timestamp,
|
||||
})
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
}
|
||||
|
||||
292
agent.go
292
agent.go
@@ -1,292 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type runningPlugin struct {
|
||||
name string
|
||||
plugin plugins.Plugin
|
||||
config *ConfiguredPlugin
|
||||
}
|
||||
|
||||
type Agent struct {
|
||||
Interval Duration
|
||||
Debug bool
|
||||
Hostname string
|
||||
|
||||
Config *Config
|
||||
|
||||
plugins []*runningPlugin
|
||||
|
||||
conn *client.Client
|
||||
}
|
||||
|
||||
func NewAgent(config *Config) (*Agent, error) {
|
||||
agent := &Agent{Config: config, Interval: Duration{10 * time.Second}}
|
||||
|
||||
err := config.ApplyAgent(agent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agent.Hostname = hostname
|
||||
}
|
||||
|
||||
if config.Tags == nil {
|
||||
config.Tags = map[string]string{}
|
||||
}
|
||||
|
||||
config.Tags["host"] = agent.Hostname
|
||||
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
func (agent *Agent) Connect() error {
|
||||
config := agent.Config
|
||||
|
||||
u, err := url.Parse(config.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := client.NewClient(client.Config{
|
||||
URL: *u,
|
||||
Username: config.Username,
|
||||
Password: config.Password,
|
||||
UserAgent: config.UserAgent,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
agent.conn = c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) LoadPlugins() ([]string, error) {
|
||||
var names []string
|
||||
|
||||
for _, name := range a.Config.PluginsDeclared() {
|
||||
creator, ok := plugins.Plugins[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Undefined but requested plugin: %s", name)
|
||||
}
|
||||
|
||||
plugin := creator()
|
||||
|
||||
config, err := a.Config.ApplyPlugin(name, plugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.plugins = append(a.plugins, &runningPlugin{name, plugin, config})
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (a *Agent) crankParallel() error {
|
||||
points := make(chan *BatchPoints, len(a.plugins))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
if plugin.config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(plugin *runningPlugin) {
|
||||
defer wg.Done()
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Debug = a.Debug
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
|
||||
plugin.plugin.Gather(&acc)
|
||||
|
||||
points <- &acc
|
||||
}(plugin)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
close(points)
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
for sub := range points {
|
||||
acc.Points = append(acc.Points, sub.Points...)
|
||||
}
|
||||
|
||||
_, err := a.conn.Write(acc.BatchPoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Agent) crank() error {
|
||||
var acc BatchPoints
|
||||
|
||||
acc.Debug = a.Debug
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
_, err := a.conn.Write(acc.BatchPoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) error {
|
||||
ticker := time.NewTicker(plugin.config.Interval)
|
||||
|
||||
for {
|
||||
var acc BatchPoints
|
||||
|
||||
acc.Debug = a.Debug
|
||||
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
a.conn.Write(acc.BatchPoints)
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) TestAllPlugins() error {
|
||||
var names []string
|
||||
|
||||
for name, _ := range plugins.Plugins {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Debug = true
|
||||
|
||||
fmt.Printf("* Testing all plugins with default configuration\n")
|
||||
|
||||
for _, name := range names {
|
||||
plugin := plugins.Plugins[name]()
|
||||
|
||||
fmt.Printf("* Plugin: %s\n", name)
|
||||
|
||||
acc.Prefix = name + "_"
|
||||
err := plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) Test() error {
|
||||
var acc BatchPoints
|
||||
|
||||
acc.Debug = true
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
|
||||
fmt.Printf("* Plugin: %s\n", plugin.name)
|
||||
if plugin.config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", plugin.config.Interval)
|
||||
}
|
||||
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
if a.conn == nil {
|
||||
err := a.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
if plugin.config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(plugin *runningPlugin) {
|
||||
defer wg.Done()
|
||||
a.crankSeparate(shutdown, plugin)
|
||||
}(plugin)
|
||||
}
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
ticker := time.NewTicker(a.Interval.Duration)
|
||||
|
||||
for {
|
||||
err := a.crankParallel()
|
||||
if err != nil {
|
||||
log.Printf("Error in plugins: %s", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
174
agent/accumulator.go
Normal file
174
agent/accumulator.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *internal_models.InputConfig,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
acc := accumulator{}
|
||||
acc.metrics = metrics
|
||||
acc.inputConfig = inputConfig
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
|
||||
inputConfig *internal_models.InputConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
tags[k] = v
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
tags[k] = v
|
||||
}
|
||||
ac.inputConfig.Filter.FilterTags(tags)
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
result[k] = v
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
}
|
||||
if ac.debug {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
ac.metrics <- m
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) addDefaultTag(key, value string) {
|
||||
if ac.defaultTags == nil {
|
||||
ac.defaultTags = make(map[string]string)
|
||||
}
|
||||
ac.defaultTags[key] = value
|
||||
}
|
||||
334
agent/accumulator_test.go
Normal file
334
agent/accumulator_test.go
Normal file
@@ -0,0 +1,334 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDefaultTags(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
// Test that all Inf fields get dropped, and not added to metrics channel
|
||||
func TestAddInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": inf,
|
||||
"nusage": ninf,
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
assert.Len(t, a.metrics, 0)
|
||||
|
||||
// test that non-inf fields are kept and not dropped
|
||||
fields["notinf"] = float64(100)
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest notinf=100")
|
||||
}
|
||||
|
||||
// Test that nan fields are dropped and not added
|
||||
func TestAddNaNFields(t *testing.T) {
|
||||
nan := math.NaN()
|
||||
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": nan,
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
assert.Len(t, a.metrics, 0)
|
||||
|
||||
// test that non-nan fields are kept and not dropped
|
||||
fields["notnan"] = float64(100)
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest notnan=100")
|
||||
}
|
||||
|
||||
func TestAddUint64Fields(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddUint64Overflow(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(9223372036854775808),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddInts(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", int(101), map[string]string{})
|
||||
a.Add("acctest", int32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", int64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,default=tag value=101i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFloats(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", float32(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddStrings(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", "test", map[string]string{"acc": "test"})
|
||||
a.Add("acctest", "foo", map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddBools(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
|
||||
a.Add("acctest", true, map[string]string{"acc": "test"})
|
||||
a.Add("acctest", false, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
// Test that tag filters get applied to metrics.
|
||||
func TestAccFilterTags(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
filter := internal_models.Filter{
|
||||
TagExclude: []string{"acc"},
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
a.inputConfig = &internal_models.InputConfig{}
|
||||
a.inputConfig.Filter = filter
|
||||
|
||||
a.Add("acctest", float64(101), map[string]string{})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
390
agent/agent.go
Normal file
390
agent/agent.go
Normal file
@@ -0,0 +1,390 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
type Agent struct {
|
||||
Config *config.Config
|
||||
}
|
||||
|
||||
// NewAgent returns an Agent struct based off the given Config
|
||||
func NewAgent(config *config.Config) (*Agent, error) {
|
||||
a := &Agent{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
if !a.Config.Agent.OmitHostname {
|
||||
if a.Config.Agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.Config.Agent.Hostname = hostname
|
||||
}
|
||||
|
||||
config.Tags["host"] = a.Config.Agent.Hostname
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.Quiet = a.Config.Agent.Quiet
|
||||
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||
}
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s, "+
|
||||
"error was '%s' \n", o.Name, err)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to all configured outputs
|
||||
func (a *Agent) Close() error {
|
||||
var err error
|
||||
for _, o := range a.Config.Outputs {
|
||||
err = o.Output.Close()
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
ot.Stop()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func panicRecover(input *internal_models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name, err, trace)
|
||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
}
|
||||
|
||||
// gatherParallel runs the inputs that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
|
||||
for _, input := range a.Config.Inputs {
|
||||
if input.Config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
counter++
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer panicRecover(input)
|
||||
defer wg.Done()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if jitter != 0 {
|
||||
nanoSleep := rand.Int63n(jitter)
|
||||
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
|
||||
if err != nil {
|
||||
log.Printf("Jittering collection interval failed for plugin %s",
|
||||
input.Name)
|
||||
} else {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
}(input)
|
||||
}
|
||||
|
||||
if counter == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherSeparate runs the inputs that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
shutdown chan struct{},
|
||||
input *internal_models.RunningInput,
|
||||
metricC chan telegraf.Metric,
|
||||
) error {
|
||||
defer panicRecover(input)
|
||||
|
||||
ticker := time.NewTicker(input.Config.Interval)
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
start := time.Now()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
}
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
shutdown := make(chan struct{})
|
||||
defer close(shutdown)
|
||||
metricC := make(chan telegraf.Metric)
|
||||
|
||||
// dummy receiver for the point channel
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-metricC:
|
||||
// do nothing
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(true)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name {
|
||||
case "cpu", "mongodb", "procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// flush writes a list of metrics to all configured outputs
|
||||
func (a *Agent) flush() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(a.Config.Outputs))
|
||||
for _, o := range a.Config.Outputs {
|
||||
go func(output *internal_models.RunningOutput) {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// flusher monitors the metrics input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached metrics before shutdown")
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
a.flush()
|
||||
case m := <-metricC:
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.AddMetric(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// jitterInterval applies the the interval jitter to the flush interval using
|
||||
// crypto/rand number generator
|
||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
var jitter int64
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
}
|
||||
|
||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
||||
outinterval = time.Duration(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
return outinterval
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(
|
||||
a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 10000)
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Start service of any ServicePlugins
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Special handling for inputs that have their own collection interval
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
if input.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input)
|
||||
}
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
for {
|
||||
if err := a.gatherParallel(metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
184
agent/agent_test.go
Normal file
184
agent/agent_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
// needing to load the plugins
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAgent_OmitHostname(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.Agent.OmitHostname = true
|
||||
_, err := NewAgent(c)
|
||||
assert.NoError(t, err)
|
||||
assert.NotContains(t, c.Tags, "host")
|
||||
}
|
||||
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.InputFilters = []string{"mysql"}
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "redis"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
}
|
||||
|
||||
func TestAgent_LoadOutput(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb"}
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"kafka"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(c.Outputs))
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
|
||||
func TestAgent_ZeroJitter(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval.Nanoseconds()
|
||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroInterval(t *testing.T) {
|
||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(5*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroBoth(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval
|
||||
exp := time.Duration(500 * time.Millisecond)
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMax(t *testing.T) {
|
||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMin(t *testing.T) {
|
||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
/*
|
||||
func TestAgent_DrivesMetrics(t *testing.T) {
|
||||
var (
|
||||
plugin plugins.MockPlugin
|
||||
)
|
||||
|
||||
defer plugin.AssertExpectations(t)
|
||||
defer metrics.AssertExpectations(t)
|
||||
|
||||
a := &Agent{
|
||||
plugins: []plugins.Plugin{&plugin},
|
||||
Config: &Config{},
|
||||
}
|
||||
|
||||
plugin.On("Add", "foo", 1.2, nil).Return(nil)
|
||||
plugin.On("Add", "bar", 888, nil).Return(nil)
|
||||
|
||||
err := a.crank()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAgent_AppliesTags(t *testing.T) {
|
||||
var (
|
||||
plugin plugins.MockPlugin
|
||||
metrics MockMetrics
|
||||
)
|
||||
|
||||
defer plugin.AssertExpectations(t)
|
||||
defer metrics.AssertExpectations(t)
|
||||
|
||||
a := &Agent{
|
||||
plugins: []plugins.Plugin{&plugin},
|
||||
metrics: &metrics,
|
||||
Config: &Config{
|
||||
Tags: map[string]string{
|
||||
"dc": "us-west-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m1 := cypress.Metric()
|
||||
m1.Add("name", "foo")
|
||||
m1.Add("value", 1.2)
|
||||
|
||||
msgs := []*cypress.Message{m1}
|
||||
|
||||
m2 := cypress.Metric()
|
||||
m2.Timestamp = m1.Timestamp
|
||||
m2.Add("name", "foo")
|
||||
m2.Add("value", 1.2)
|
||||
m2.AddTag("dc", "us-west-1")
|
||||
|
||||
plugin.On("Read").Return(msgs, nil)
|
||||
metrics.On("Receive", m2).Return(nil)
|
||||
|
||||
err := a.crank()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
*/
|
||||
21
circle.yml
Normal file
21
circle.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.6.2 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.6.2.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- docker info
|
||||
post:
|
||||
- gem install fpm
|
||||
- sudo apt-get install -y rpm python-boto
|
||||
|
||||
test:
|
||||
override:
|
||||
- bash scripts/circle-test.sh
|
||||
@@ -7,102 +7,275 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdb/telegraf"
|
||||
_ "github.com/influxdb/telegraf/plugins/all"
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout")
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
var fConfig = flag.String("config", "", "configuration file to load")
|
||||
var fConfigDirectory = flag.String("config-directory", "",
|
||||
"directory containing additional *.conf files")
|
||||
var fVersion = flag.Bool("version", false, "display the version")
|
||||
var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration")
|
||||
var fSampleConfig = flag.Bool("sample-config", false,
|
||||
"print out full sample configuration")
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fInputList = flag.Bool("input-list", false,
|
||||
"print available input plugins.")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fOutputList = flag.Bool("output-list", false,
|
||||
"print available output plugins.")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||
"directory containing additional *.conf files")
|
||||
|
||||
var Version = "unreleased"
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
var (
|
||||
version string
|
||||
commit string
|
||||
branch string
|
||||
)
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-input-list print all the plugins inputs
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-output-list print all the available outputs
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
In addition to the -config flag, telegraf will also load the config file from
|
||||
an environment variable or default location. Precedence is:
|
||||
1. -config flag
|
||||
2. $TELEGRAF_CONFIG_PATH environment variable
|
||||
3. $HOME/.telegraf/telegraf.conf
|
||||
4. /etc/telegraf/telegraf.conf
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
`
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
if *fVersion {
|
||||
fmt.Printf("InfluxDB Telegraf agent - Version %s\n", Version)
|
||||
return
|
||||
}
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
fmt.Printf("WARNING '--filter' flag is deprecated, please use" +
|
||||
" '--input-filter'")
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
telegraf.PrintSampleConfig()
|
||||
return
|
||||
}
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
fmt.Printf("WARNING '--outputfilter' flag is deprecated, please use" +
|
||||
" '--output-filter'")
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
var (
|
||||
config *telegraf.Config
|
||||
err error
|
||||
)
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if *fConfig != "" {
|
||||
config, err = telegraf.LoadConfig(*fConfig)
|
||||
if *fOutputList {
|
||||
fmt.Println("Available Output Plugins:")
|
||||
for k, _ := range outputs.Outputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if *fInputList {
|
||||
fmt.Println("Available Input Plugins:")
|
||||
for k, _ := range inputs.Inputs {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no other options are specified, load the config file and run.
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err := c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
fmt.Printf("WARNING '--configdirectory' flag is deprecated, please use" +
|
||||
" '--config-directory'")
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
config = telegraf.DefaultConfig()
|
||||
}
|
||||
|
||||
ag, err := telegraf.NewAgent(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Debug = true
|
||||
}
|
||||
if *fQuiet {
|
||||
ag.Config.Agent.Quiet = true
|
||||
}
|
||||
|
||||
plugins, err := ag.LoadPlugins()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if *fTest {
|
||||
if *fConfig != "" {
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
} else {
|
||||
err = ag.TestAllPlugins()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
go func() {
|
||||
sig := <-signals
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
|
||||
signals := make(chan os.Signal)
|
||||
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-signals
|
||||
close(shutdown)
|
||||
}()
|
||||
|
||||
log.Print("InfluxDB Agent running")
|
||||
log.Printf("Loaded plugins: %s", strings.Join(plugins, " "))
|
||||
if ag.Debug {
|
||||
log.Printf("Debug: enabled")
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v\n",
|
||||
ag.Interval, ag.Debug, ag.Hostname)
|
||||
}
|
||||
|
||||
if config.URL != "" {
|
||||
log.Printf("Sending metrics to: %s", config.URL)
|
||||
log.Printf("Tags enabled: %v", config.ListTags())
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
302
config.go
302
config.go
@@ -1,302 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/naoina/toml"
|
||||
"github.com/naoina/toml/ast"
|
||||
)
|
||||
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
URL string
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
UserAgent string
|
||||
Tags map[string]string
|
||||
|
||||
agent *ast.Table
|
||||
plugins map[string]*ast.Table
|
||||
}
|
||||
|
||||
func (c *Config) Plugins() map[string]*ast.Table {
|
||||
return c.plugins
|
||||
}
|
||||
|
||||
type ConfiguredPlugin struct {
|
||||
Name string
|
||||
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (cp *ConfiguredPlugin) ShouldPass(measurement string) bool {
|
||||
if cp.Pass != nil {
|
||||
for _, pat := range cp.Pass {
|
||||
if strings.HasPrefix(measurement, pat) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if cp.Drop != nil {
|
||||
for _, pat := range cp.Drop {
|
||||
if strings.HasPrefix(measurement, pat) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Config) ApplyAgent(v interface{}) error {
|
||||
if c.agent != nil {
|
||||
return toml.UnmarshalTable(c.agent, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) ApplyPlugin(name string, v interface{}) (*ConfiguredPlugin, error) {
|
||||
cp := &ConfiguredPlugin{Name: name}
|
||||
|
||||
if tbl, ok := c.plugins[name]; ok {
|
||||
|
||||
if node, ok := tbl.Fields["pass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
cp.Pass = append(cp.Pass, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["drop"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
cp.Drop = append(cp.Drop, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp.Interval = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "drop")
|
||||
delete(tbl.Fields, "pass")
|
||||
delete(tbl.Fields, "interval")
|
||||
return cp, toml.UnmarshalTable(tbl, v)
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func (c *Config) PluginsDeclared() []string {
|
||||
var plugins []string
|
||||
|
||||
for name, _ := range c.plugins {
|
||||
plugins = append(plugins, name)
|
||||
}
|
||||
|
||||
sort.Strings(plugins)
|
||||
|
||||
return plugins
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
var ErrInvalidConfig = errors.New("invalid configuration")
|
||||
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tbl, err := toml.Parse(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Config{
|
||||
plugins: make(map[string]*ast.Table),
|
||||
}
|
||||
|
||||
for name, val := range tbl.Fields {
|
||||
subtbl, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return nil, ErrInvalidConfig
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "influxdb":
|
||||
err := toml.UnmarshalTable(subtbl, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "agent":
|
||||
c.agent = subtbl
|
||||
default:
|
||||
c.plugins[name] = subtbl
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Config) ListTags() string {
|
||||
var tags []string
|
||||
|
||||
for k, v := range c.Tags {
|
||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
type hasConfig interface {
|
||||
BasicConfig() string
|
||||
}
|
||||
|
||||
type hasDescr interface {
|
||||
Description() string
|
||||
}
|
||||
|
||||
var header = `# Telegraf configuration
|
||||
|
||||
# If this file is missing an [agent] section, you must first generate a
|
||||
# valid config with 'telegraf -sample-config > telegraf.toml'
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared plugins.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[influxdb]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
url = "http://localhost:8086" # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# tags = { "dc": "us-east-1" }
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
|
||||
# [influxdb.tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf itself
|
||||
# [agent]
|
||||
# interval = "10s"
|
||||
# debug = false
|
||||
# hostname = "prod3241"
|
||||
|
||||
# PLUGINS
|
||||
|
||||
`
|
||||
|
||||
func PrintSampleConfig() {
|
||||
fmt.Printf(header)
|
||||
|
||||
var names []string
|
||||
|
||||
for name, _ := range plugins.Plugins {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
creator := plugins.Plugins[name]
|
||||
|
||||
plugin := creator()
|
||||
|
||||
fmt.Printf("# %s\n[%s]\n", plugin.Description(), name)
|
||||
|
||||
var config string
|
||||
|
||||
config = strings.TrimSpace(plugin.SampleConfig())
|
||||
|
||||
if config == "" {
|
||||
fmt.Printf(" # no configuration\n\n")
|
||||
} else {
|
||||
fmt.Printf("\n")
|
||||
lines := strings.Split(config, "\n")
|
||||
for _, line := range lines {
|
||||
fmt.Printf("%s\n", line)
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
277
docs/CONFIGURATION.md
Normal file
277
docs/CONFIGURATION.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# Telegraf Configuration
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be generated using the -sample-config flag:
|
||||
|
||||
```
|
||||
telegraf -sample-config > telegraf.conf
|
||||
```
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
-input-filter and -output-filter flags:
|
||||
|
||||
```
|
||||
telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka
|
||||
```
|
||||
|
||||
You can see the latest config file with all available plugins here:
|
||||
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
## `[global_tags]` Configuration
|
||||
|
||||
Global tags can be specified in the `[global_tags]` section of the config file
|
||||
in key="value" format. All metrics being gathered on this host will be tagged
|
||||
with the tags specified here.
|
||||
|
||||
## `[agent]` Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
config.
|
||||
|
||||
* **interval**: Default data collection interval for all inputs
|
||||
* **round_interval**: Rounds collection interval to 'interval'
|
||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
|
||||
most metric_batch_size metrics.
|
||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||
for each output, and will flush this buffer on a successful write.
|
||||
This should be a multiple of metric_batch_size and could not be less
|
||||
than 2 times metric_batch_size.
|
||||
* **collection_jitter**: Collection jitter is used to jitter
|
||||
the collection by a random amount.
|
||||
Each plugin will sleep for a random time within jitter before collecting.
|
||||
This can be used to avoid many plugins querying things like sysfs at the
|
||||
same time, which can have a measurable effect on the system.
|
||||
* **flush_interval**: Default data flushing interval for all outputs.
|
||||
You should not set this below
|
||||
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
* **flush_jitter**: Jitter the flush interval by a random amount.
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode.
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
|
||||
#### Measurement Filtering
|
||||
|
||||
Filters can be configured per input or output, see below for examples.
|
||||
|
||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against
|
||||
measurement names and if it matches, the field is emitted.
|
||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
fielddrop is not available for outputs.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
||||
As opposed to tagdrop, which will drop an entire measurement based on it's
|
||||
tags, tagexclude simply strips the given tag keys from the measurement. This
|
||||
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
||||
as it is more efficient to filter out tags at the ingestion point.
|
||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
||||
the tag keys in the final measurement.
|
||||
|
||||
## Input Configuration
|
||||
|
||||
Some configuration options are configurable per input:
|
||||
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
fields which begin with `time_`.
|
||||
|
||||
```toml
|
||||
[global_tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# INPUTS
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
fielddrop = ["time_*"]
|
||||
```
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
fielddrop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[inputs.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[inputs.disk]]
|
||||
[inputs.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
#### Input Config: fieldpass and fielddrop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
fielddrop = ["usage_guest", "usage_steal"]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[inputs.disk]]
|
||||
fieldpass = ["inodes*"]
|
||||
```
|
||||
|
||||
#### Input Config: namepass and namedrop
|
||||
|
||||
```toml
|
||||
# Drop all metrics about containers for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namedrop = ["container_*"]
|
||||
|
||||
# Only store rest client related metrics for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namepass = ["rest_client_*"]
|
||||
```
|
||||
|
||||
#### Input Config: taginclude and tagexclude
|
||||
|
||||
```toml
|
||||
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = true
|
||||
taginclude = ["cpu"]
|
||||
|
||||
# Exclude the "fstype" tag from the measurements for the disk plugin.
|
||||
[[inputs.disk]]
|
||||
tagexclude = ["fstype"]
|
||||
```
|
||||
|
||||
#### Input config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_suffix = "_total"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
This will emit measurements with the name `foobar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_override = "foobar"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
#### Input config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
|
||||
plugin definition.
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
[inputs.cpu.tags]
|
||||
tag1 = "foo"
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
#### Multiple inputs of the same type
|
||||
|
||||
Additional inputs (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file. It is highly recommended that
|
||||
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
||||
to avoid measurement collisions:
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
name_override = "percpu_usage"
|
||||
fielddrop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## Output Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
namedrop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
namepass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
362
docs/DATA_FORMATS_INPUT.md
Normal file
362
docs/DATA_FORMATS_INPUT.md
Normal file
@@ -0,0 +1,362 @@
|
||||
# Telegraf Input Data Formats
|
||||
|
||||
Telegraf is able to parse the following input data formats into metrics:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
These four parts are easily defined when using InfluxDB line-protocol as a
|
||||
data format. But there are other data formats that users may want to use which
|
||||
require more advanced configuration to create usable Telegraf metrics.
|
||||
|
||||
Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now,
|
||||
these plugins were statically configured to parse just a single
|
||||
data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only
|
||||
supported data in InfluxDB line-protocol.
|
||||
|
||||
But now we are normalizing the parsing of various data formats across all
|
||||
plugins that can support it. You will be able to identify a plugin that supports
|
||||
different data formats by the presence of a `data_format` config option, for
|
||||
example, in the exec plugin:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are parsed directly into Telegraf metrics.
|
||||
|
||||
#### Influx Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
# JSON:
|
||||
|
||||
The JSON data format flattens JSON into metric _fields_.
|
||||
NOTE: Only numerical values are converted to fields, and they are converted
|
||||
into a float. strings are ignored unless specified as a tag_key (see below).
|
||||
|
||||
So for example, this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"ignored": "I'm a string"
|
||||
}
|
||||
```
|
||||
|
||||
Would get translated into _fields_ of a measurement:
|
||||
|
||||
```
|
||||
myjsonmetric a=5,b_c=6
|
||||
```
|
||||
|
||||
The _measurement_ _name_ is usually the name of the plugin,
|
||||
but can be overridden using the `name_override` config option.
|
||||
|
||||
#### JSON Configuration:
|
||||
|
||||
The JSON data format supports specifying "tag keys". If specified, keys
|
||||
will be searched for in the root-level of the JSON blob. If the key(s) exist,
|
||||
they will be applied as tags to the Telegraf metrics.
|
||||
|
||||
For example, if you had this configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
tag_keys = [
|
||||
"my_tag_1",
|
||||
"my_tag_2"
|
||||
]
|
||||
```
|
||||
|
||||
with this JSON output from a command:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"my_tag_1": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
Your Telegraf metrics would get tagged with "my_tag_1"
|
||||
|
||||
```
|
||||
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||
```
|
||||
|
||||
# Value:
|
||||
|
||||
The "value" data format translates single values into Telegraf metrics. This
|
||||
is done by assigning a measurement name and setting a single field ("value")
|
||||
as the parsed metric.
|
||||
|
||||
#### Value Configuration:
|
||||
|
||||
You **must** tell Telegraf what type of metric to collect by using the
|
||||
`data_type` configuration option. Available options are:
|
||||
|
||||
1. integer
|
||||
2. float or long
|
||||
3. string
|
||||
4. boolean
|
||||
|
||||
**Note:** It is also recommended that you set `name_override` to a measurement
|
||||
name that makes sense for your metric, otherwise it will just be set to the
|
||||
name of the plugin.
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
|
||||
|
||||
## override the default metric name of "exec"
|
||||
name_override = "entropy_available"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "value"
|
||||
data_type = "integer" # required
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
|
||||
The Graphite data format translates graphite _dot_ buckets directly into
|
||||
telegraf measurement names, with a single value field, and without any tags. For
|
||||
more advanced options, Telegraf supports specifying "templates" to translate
|
||||
graphite buckets into Telegraf metrics.
|
||||
|
||||
#### Separator:
|
||||
|
||||
You can specify a separator to use for the parsed metrics.
|
||||
By default, it will leave the metrics with a "." separator.
|
||||
Setting `separator = "_"` will translate:
|
||||
|
||||
```
|
||||
cpu.usage.idle 99
|
||||
=> cpu_usage_idle value=99
|
||||
```
|
||||
|
||||
#### Measurement/Tag Templates:
|
||||
|
||||
The most basic template is to specify a single transformation to apply to all
|
||||
incoming metrics. _measurement_ is a special keyword that tells Telegraf which
|
||||
parts of the graphite bucket to combine into the measurement name. It can have a
|
||||
trailing `*` to indicate that the remainder of the metric should be used.
|
||||
Other words are considered tag keys. So the following template:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"region.measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
us-west.cpu.load 100
|
||||
=> cpu.load,region=us-west value=100
|
||||
```
|
||||
|
||||
#### Field Templates:
|
||||
|
||||
There is also a _field_ keyword, which can only be specified once.
|
||||
The field keyword tells Telegraf to give the metric that field name.
|
||||
So the following template:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"measurement.measurement.field.field.region"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.percent.us-west 100
|
||||
=> cpu_usage,region=us-west idle_percent=100
|
||||
```
|
||||
|
||||
The field key can also be derived from the second "half" of the input metric-name by specifying ```field*```:
|
||||
```toml
|
||||
templates = [
|
||||
"measurement.measurement.region.field*"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.us-west.idle.percentage 100
|
||||
=> cpu_usage,region=us-west idle_percentage=100
|
||||
```
|
||||
(This cannot be used in conjunction with "measurement*"!)
|
||||
|
||||
#### Filter Templates:
|
||||
|
||||
Users can also filter the template(s) to use based on the name of the bucket,
|
||||
using glob matching, like so:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"cpu.* measurement.measurement.region",
|
||||
"mem.* measurement.measurement.host"
|
||||
]
|
||||
```
|
||||
|
||||
which would result in the following transformation:
|
||||
|
||||
```
|
||||
cpu.load.us-west 100
|
||||
=> cpu_load,region=us-west value=100
|
||||
|
||||
mem.cached.localhost 256
|
||||
=> mem_cached,host=localhost value=256
|
||||
```
|
||||
|
||||
#### Adding Tags:
|
||||
|
||||
Additional tags can be added to a metric that don't exist on the received metric.
|
||||
You can add additional tags by specifying them after the pattern.
|
||||
Tags have the same format as the line protocol.
|
||||
Multiple tags are separated by commas.
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"measurement.measurement.field.region datacenter=1a"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.us-west 100
|
||||
=> cpu_usage,region=us-west,datacenter=1a idle=100
|
||||
```
|
||||
|
||||
There are many more options available,
|
||||
[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
|
||||
|
||||
#### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "graphite"
|
||||
|
||||
## This string will be used to join the matched values.
|
||||
separator = "_"
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. There can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
# Nagios:
|
||||
|
||||
There are no additional configuration options for Nagios line-protocol. The
|
||||
metrics are parsed directly into Telegraf metrics.
|
||||
|
||||
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||
|
||||
#### Nagios Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "nagios"
|
||||
```
|
||||
150
docs/DATA_FORMATS_OUTPUT.md
Normal file
150
docs/DATA_FORMATS_OUTPUT.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# Telegraf Output Data Formats
|
||||
|
||||
Telegraf is able to serialize metrics into the following output data formats:
|
||||
|
||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
||||
|
||||
```
|
||||
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
||||
```
|
||||
|
||||
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
||||
InfluxDB line protocol was originally the only available output format. But now
|
||||
we are normalizing telegraf metric "serializers" into a
|
||||
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
||||
across all output plugins that can support it.
|
||||
You will be able to identify a plugin that supports different data formats
|
||||
by the presence of a `data_format`
|
||||
config option, for example, in the `file` output plugin:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
# Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are serialized directly into InfluxDB line-protocol.
|
||||
|
||||
### Influx Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
# Graphite:
|
||||
|
||||
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
||||
template can be specified for the output of Telegraf metrics into Graphite
|
||||
buckets. The default template is:
|
||||
|
||||
```
|
||||
template = "host.tags.measurement.field"
|
||||
```
|
||||
|
||||
In the above template, we have four parts:
|
||||
|
||||
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
|
||||
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
|
||||
tag value will be filled in.
|
||||
1. _tags_ is a special keyword that outputs all remaining tag values, separated
|
||||
by dots and in alphabetical order (by tag key). These will be filled after all
|
||||
tag keys are filled.
|
||||
1. _measurement_ is a special keyword that outputs the measurement name.
|
||||
1. _field_ is a special keyword that outputs the field name.
|
||||
|
||||
Which means the following influx metric -> graphite conversion would happen:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||
=>
|
||||
tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "graphite"
|
||||
|
||||
# prefix each graphite bucket
|
||||
prefix = "telegraf"
|
||||
# graphite template
|
||||
template = "host.tags.measurement.field"
|
||||
```
|
||||
|
||||
# JSON:
|
||||
|
||||
The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||
|
||||
```json
|
||||
{
|
||||
"fields":{
|
||||
"field_1":30,
|
||||
"field_2":4,
|
||||
"field_N":59,
|
||||
"n_images":660
|
||||
},
|
||||
"name":"docker",
|
||||
"tags":{
|
||||
"host":"raynor"
|
||||
},
|
||||
"timestamp":1458229140
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "json"
|
||||
```
|
||||
32
docs/LICENSE_OF_DEPENDENCIES.md
Normal file
32
docs/LICENSE_OF_DEPENDENCIES.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# List
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
|
||||
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
|
||||
36
docs/WINDOWS_SERVICE.md
Normal file
36
docs/WINDOWS_SERVICE.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Running Telegraf as a Windows Service
|
||||
|
||||
If you have tried to install Go binaries as Windows Services with the **sc.exe**
|
||||
tool you may have seen that the service errors and stops running after a while.
|
||||
|
||||
**NSSM** (the Non-Sucking Service Manager) is a tool that helps you in a
|
||||
[number of scenarios](http://nssm.cc/scenarios) including running Go binaries
|
||||
that were not specifically designed to run only in Windows platforms.
|
||||
|
||||
## NSSM Installation via Chocolatey
|
||||
|
||||
You can install [Chocolatey](https://chocolatey.org/) and [NSSM](http://nssm.cc/)
|
||||
with these commands
|
||||
|
||||
```powershell
|
||||
iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))
|
||||
choco install -y nssm
|
||||
```
|
||||
|
||||
## Installing Telegraf as a Windows Service with NSSM
|
||||
|
||||
You can download the latest Telegraf Windows binaries (still Experimental at
|
||||
the moment) from [the Telegraf Github repo](https://github.com/influxdata/telegraf).
|
||||
|
||||
Then you can create a C:\telegraf folder, unzip the binary there and modify the
|
||||
**telegraf.conf** sample to allocate the metrics you want to send to **InfluxDB**.
|
||||
|
||||
Once you have NSSM installed in your system, the process is quite straightforward.
|
||||
You only need to type this command in your Windows shell
|
||||
|
||||
```powershell
|
||||
nssm install Telegraf c:\telegraf\telegraf.exe -config c:\telegraf\telegraf.config
|
||||
```
|
||||
|
||||
And now your service will be installed in Windows and you will be able to start and
|
||||
stop it gracefully
|
||||
@@ -1,134 +0,0 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# If this file is missing an [agent] section, you must first generate a
|
||||
# valid config with 'telegraf -sample-config > telegraf.toml'
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared plugins.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[influxdb]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
url = "http://localhost:8086" # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# tags = { "dc": "us-east-1" }
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
|
||||
# [influxdb.tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf itself
|
||||
# [agent]
|
||||
# interval = "10s"
|
||||
# debug = false
|
||||
# hostname = "prod3241"
|
||||
|
||||
# PLUGINS
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[cpu]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[disk]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about docker containers
|
||||
[docker]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[io]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about memory usage
|
||||
[mem]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[mysql]
|
||||
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g. root:root@http://10.0.0.18/?tls=false
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[net]
|
||||
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
#
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[postgresql]
|
||||
|
||||
# specify servers via an array of tables
|
||||
[[postgresql.servers]]
|
||||
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=...
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
#
|
||||
|
||||
address = "sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[redis]
|
||||
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:18832, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[swap]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load
|
||||
[system]
|
||||
# no configuration
|
||||
|
||||
11
etc/logrotate.d/telegraf
Normal file
11
etc/logrotate.d/telegraf
Normal file
@@ -0,0 +1,11 @@
|
||||
/var/log/telegraf/telegraf.log
|
||||
{
|
||||
rotate 6
|
||||
daily
|
||||
missingok
|
||||
dateext
|
||||
copytruncate
|
||||
notifempty
|
||||
compress
|
||||
}
|
||||
|
||||
1507
etc/telegraf.conf
Normal file
1507
etc/telegraf.conf
Normal file
File diff suppressed because it is too large
Load Diff
164
etc/telegraf_windows.conf
Normal file
164
etc/telegraf_windows.conf
Normal file
@@ -0,0 +1,164 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[global_tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 1000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. You shouldn't set this below
|
||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||
timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
# which utilizes a lot of system resources.
|
||||
#
|
||||
# See more configuration examples at:
|
||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||
|
||||
[[inputs.win_perf_counters]]
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Processor usage, alternative to native, reports on a per core.
|
||||
ObjectName = "Processor"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
||||
Measurement = "win_cpu"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
ObjectName = "LogicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
||||
Measurement = "win_disk"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
||||
ObjectName = "Memory"
|
||||
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
||||
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
||||
Measurement = "win_mem"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
# Read metrics about cpu usage
|
||||
#[[inputs.cpu]]
|
||||
## Whether to report per-cpu stats or not
|
||||
#percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
#totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
#fielddrop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
#[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## mount_points=["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
#ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
#[[inputs.diskio]]
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
## devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you do not need disk serial numbers.
|
||||
## skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
#[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
#[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
31
input.go
Normal file
31
input.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package telegraf
|
||||
|
||||
type Input interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type ServiceInput interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
|
||||
// Start starts the ServiceInput's service, whatever that may be
|
||||
Start(Accumulator) error
|
||||
|
||||
// Stop stops the services and closes any necessary channels and connections
|
||||
Stop()
|
||||
}
|
||||
77
internal/buffer/buffer.go
Normal file
77
internal/buffer/buffer.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// Buffer is an object for storing metrics in a circular buffer.
|
||||
type Buffer struct {
|
||||
buf chan telegraf.Metric
|
||||
// total dropped metrics
|
||||
drops int
|
||||
// total metrics added
|
||||
total int
|
||||
}
|
||||
|
||||
// NewBuffer returns a Buffer
|
||||
// size is the maximum number of metrics that Buffer will cache. If Add is
|
||||
// called when the buffer is full, then the oldest metric(s) will be dropped.
|
||||
func NewBuffer(size int) *Buffer {
|
||||
return &Buffer{
|
||||
buf: make(chan telegraf.Metric, size),
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmpty returns true if Buffer is empty.
|
||||
func (b *Buffer) IsEmpty() bool {
|
||||
return len(b.buf) == 0
|
||||
}
|
||||
|
||||
// Len returns the current length of the buffer.
|
||||
func (b *Buffer) Len() int {
|
||||
return len(b.buf)
|
||||
}
|
||||
|
||||
// Drops returns the total number of dropped metrics that have occured in this
|
||||
// buffer since instantiation.
|
||||
func (b *Buffer) Drops() int {
|
||||
return b.drops
|
||||
}
|
||||
|
||||
// Total returns the total number of metrics that have been added to this buffer.
|
||||
func (b *Buffer) Total() int {
|
||||
return b.total
|
||||
}
|
||||
|
||||
// Add adds metrics to the buffer.
|
||||
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
for i, _ := range metrics {
|
||||
b.total++
|
||||
select {
|
||||
case b.buf <- metrics[i]:
|
||||
default:
|
||||
b.drops++
|
||||
<-b.buf
|
||||
b.buf <- metrics[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch returns a batch of metrics of size batchSize.
|
||||
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
||||
// if the length of Buffer is less than batchSize.
|
||||
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
||||
n := min(len(b.buf), batchSize)
|
||||
out := make([]telegraf.Metric, n)
|
||||
for i := 0; i < n; i++ {
|
||||
out[i] = <-b.buf
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if b < a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
94
internal/buffer/buffer_test.go
Normal file
94
internal/buffer/buffer_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var metricList = []telegraf.Metric{
|
||||
testutil.TestMetric(2, "mymetric1"),
|
||||
testutil.TestMetric(1, "mymetric2"),
|
||||
testutil.TestMetric(11, "mymetric3"),
|
||||
testutil.TestMetric(15, "mymetric4"),
|
||||
testutil.TestMetric(8, "mymetric5"),
|
||||
}
|
||||
|
||||
func BenchmarkAddMetrics(b *testing.B) {
|
||||
buf := NewBuffer(10000)
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
for n := 0; n < b.N; n++ {
|
||||
buf.Add(m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, b.Drops())
|
||||
assert.Zero(t, b.Total())
|
||||
|
||||
m := testutil.TestMetric(1, "mymetric")
|
||||
b.Add(m)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 1)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 1)
|
||||
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 6)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 6)
|
||||
}
|
||||
|
||||
func TestDroppingMetrics(t *testing.T) {
|
||||
b := NewBuffer(10)
|
||||
|
||||
// Add up to the size of the buffer
|
||||
b.Add(metricList...)
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 10)
|
||||
|
||||
// Add 5 more and verify they were dropped
|
||||
b.Add(metricList...)
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 10)
|
||||
assert.Equal(t, b.Drops(), 5)
|
||||
assert.Equal(t, b.Total(), 15)
|
||||
}
|
||||
|
||||
func TestGettingBatches(t *testing.T) {
|
||||
b := NewBuffer(20)
|
||||
|
||||
// Verify that the buffer returned is smaller than requested when there are
|
||||
// not as many items as requested.
|
||||
b.Add(metricList...)
|
||||
batch := b.Batch(10)
|
||||
assert.Len(t, batch, 5)
|
||||
|
||||
// Verify that the buffer is now empty
|
||||
assert.True(t, b.IsEmpty())
|
||||
assert.Zero(t, b.Len())
|
||||
assert.Zero(t, b.Drops())
|
||||
assert.Equal(t, b.Total(), 5)
|
||||
|
||||
// Verify that the buffer returned is not more than the size requested
|
||||
b.Add(metricList...)
|
||||
batch = b.Batch(3)
|
||||
assert.Len(t, batch, 3)
|
||||
|
||||
// Verify that buffer is not empty
|
||||
assert.False(t, b.IsEmpty())
|
||||
assert.Equal(t, b.Len(), 2)
|
||||
assert.Equal(t, b.Drops(), 0)
|
||||
assert.Equal(t, b.Total(), 10)
|
||||
}
|
||||
971
internal/config/config.go
Normal file
971
internal/config/config.go
Normal file
@@ -0,0 +1,971 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
"github.com/influxdata/toml"
|
||||
"github.com/influxdata/toml/ast"
|
||||
)
|
||||
|
||||
var (
|
||||
// Default input plugins
|
||||
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
|
||||
"processes", "disk", "diskio"}
|
||||
|
||||
// Default output plugins
|
||||
outputDefaults = []string{"influxdb"}
|
||||
|
||||
// envVarRe is a regex to find environment variables in the config file
|
||||
envVarRe = regexp.MustCompile(`\$\w+`)
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
// will be logging to, as well as all the plugins that the user has
|
||||
// specified
|
||||
type Config struct {
|
||||
Tags map[string]string
|
||||
InputFilters []string
|
||||
OutputFilters []string
|
||||
|
||||
Agent *AgentConfig
|
||||
Inputs []*internal_models.RunningInput
|
||||
Outputs []*internal_models.RunningOutput
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
c := &Config{
|
||||
// Agent defaults:
|
||||
Agent: &AgentConfig{
|
||||
Interval: internal.Duration{Duration: 10 * time.Second},
|
||||
RoundInterval: true,
|
||||
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
||||
FlushJitter: internal.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
|
||||
Tags: make(map[string]string),
|
||||
Inputs: make([]*internal_models.RunningInput, 0),
|
||||
Outputs: make([]*internal_models.RunningOutput, 0),
|
||||
InputFilters: make([]string, 0),
|
||||
OutputFilters: make([]string, 0),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type AgentConfig struct {
|
||||
// Interval at which to gather information
|
||||
Interval internal.Duration
|
||||
|
||||
// RoundInterval rounds collection interval to 'interval'.
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// CollectionJitter is used to jitter the collection by a random amount.
|
||||
// Each plugin will sleep for a random time within jitter before collecting.
|
||||
// This can be used to avoid many plugins querying things like sysfs at the
|
||||
// same time, which can have a measurable effect on the system.
|
||||
CollectionJitter internal.Duration
|
||||
|
||||
// FlushInterval is the Interval at which to flush data
|
||||
FlushInterval internal.Duration
|
||||
|
||||
// FlushJitter Jitters the flush interval by a random amount.
|
||||
// This is primarily to avoid large write spikes for users running a large
|
||||
// number of telegraf instances.
|
||||
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
FlushJitter internal.Duration
|
||||
|
||||
// MetricBatchSize is the maximum number of metrics that is wrote to an
|
||||
// output plugin in one call.
|
||||
MetricBatchSize int
|
||||
|
||||
// MetricBufferLimit is the max number of metrics that each output plugin
|
||||
// will cache. The buffer is cleared when a successful write occurs. When
|
||||
// full, the oldest metrics will be overwritten. This number should be a
|
||||
// multiple of MetricBatchSize. Due to current implementation, this could
|
||||
// not be less than 2 times MetricBatchSize.
|
||||
MetricBufferLimit int
|
||||
|
||||
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
|
||||
// it fills up, regardless of FlushInterval. Setting this option to true
|
||||
// does _not_ deactivate FlushInterval.
|
||||
FlushBufferWhenFull bool
|
||||
|
||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
||||
// valid for the agent config. Leaving them here for now for backwards-
|
||||
// compatability
|
||||
UTC bool `toml:"utc"`
|
||||
Precision string
|
||||
|
||||
// Debug is the option for running in debug mode
|
||||
Debug bool
|
||||
|
||||
// Quiet is the option for running in quiet mode
|
||||
Quiet bool
|
||||
Hostname string
|
||||
OmitHostname bool
|
||||
}
|
||||
|
||||
// Inputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) InputNames() []string {
|
||||
var name []string
|
||||
for _, input := range c.Inputs {
|
||||
name = append(name, input.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) OutputNames() []string {
|
||||
var name []string
|
||||
for _, output := range c.Outputs {
|
||||
name = append(name, output.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ListTags returns a string of tags specified in the config,
|
||||
// line-protocol style
|
||||
func (c *Config) ListTags() string {
|
||||
var tags []string
|
||||
|
||||
for k, v := range c.Tags {
|
||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
var header = `# Telegraf Configuration
|
||||
#
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
#
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
#
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
#
|
||||
# Environment variables can be used anywhere in this config file, simply prepend
|
||||
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[global_tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
## Environment variables can be used as tags, and throughout the config file
|
||||
# user = "$USER"
|
||||
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will send metrics to outputs in batches of at
|
||||
## most metric_batch_size metrics.
|
||||
metric_batch_size = 1000
|
||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||
## are dropped first when this buffer fills.
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. You shouldn't set this below
|
||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
||||
omit_hostname = false
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUT PLUGINS #
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
var inputHeader = `
|
||||
|
||||
###############################################################################
|
||||
# INPUT PLUGINS #
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
var serviceInputHeader = `
|
||||
|
||||
###############################################################################
|
||||
# SERVICE INPUT PLUGINS #
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
// PrintSampleConfig prints the sample config
|
||||
func PrintSampleConfig(inputFilters []string, outputFilters []string) {
|
||||
fmt.Printf(header)
|
||||
|
||||
if len(outputFilters) != 0 {
|
||||
printFilteredOutputs(outputFilters, false)
|
||||
} else {
|
||||
printFilteredOutputs(outputDefaults, false)
|
||||
// Print non-default outputs, commented
|
||||
var pnames []string
|
||||
for pname := range outputs.Outputs {
|
||||
if !sliceContains(pname, outputDefaults) {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
printFilteredOutputs(pnames, true)
|
||||
}
|
||||
|
||||
fmt.Printf(inputHeader)
|
||||
if len(inputFilters) != 0 {
|
||||
printFilteredInputs(inputFilters, false)
|
||||
} else {
|
||||
printFilteredInputs(inputDefaults, false)
|
||||
// Print non-default inputs, commented
|
||||
var pnames []string
|
||||
for pname := range inputs.Inputs {
|
||||
if !sliceContains(pname, inputDefaults) {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
printFilteredInputs(pnames, true)
|
||||
}
|
||||
}
|
||||
|
||||
func printFilteredInputs(inputFilters []string, commented bool) {
|
||||
// Filter inputs
|
||||
var pnames []string
|
||||
for pname := range inputs.Inputs {
|
||||
if sliceContains(pname, inputFilters) {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
|
||||
// cache service inputs to print them at the end
|
||||
servInputs := make(map[string]telegraf.ServiceInput)
|
||||
// for alphabetical looping:
|
||||
servInputNames := []string{}
|
||||
|
||||
// Print Inputs
|
||||
for _, pname := range pnames {
|
||||
creator := inputs.Inputs[pname]
|
||||
input := creator()
|
||||
|
||||
switch p := input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
servInputs[pname] = p
|
||||
servInputNames = append(servInputNames, pname)
|
||||
continue
|
||||
}
|
||||
|
||||
printConfig(pname, input, "inputs", commented)
|
||||
}
|
||||
|
||||
// Print Service Inputs
|
||||
if len(servInputs) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Strings(servInputNames)
|
||||
fmt.Printf(serviceInputHeader)
|
||||
for _, name := range servInputNames {
|
||||
printConfig(name, servInputs[name], "inputs", commented)
|
||||
}
|
||||
}
|
||||
|
||||
func printFilteredOutputs(outputFilters []string, commented bool) {
|
||||
// Filter outputs
|
||||
var onames []string
|
||||
for oname := range outputs.Outputs {
|
||||
if sliceContains(oname, outputFilters) {
|
||||
onames = append(onames, oname)
|
||||
}
|
||||
}
|
||||
sort.Strings(onames)
|
||||
|
||||
// Print Outputs
|
||||
for _, oname := range onames {
|
||||
creator := outputs.Outputs[oname]
|
||||
output := creator()
|
||||
printConfig(oname, output, "outputs", commented)
|
||||
}
|
||||
}
|
||||
|
||||
type printer interface {
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
}
|
||||
|
||||
func printConfig(name string, p printer, op string, commented bool) {
|
||||
comment := ""
|
||||
if commented {
|
||||
comment = "# "
|
||||
}
|
||||
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
|
||||
op, name)
|
||||
|
||||
config := p.SampleConfig()
|
||||
if config == "" {
|
||||
fmt.Printf("\n%s # no configuration\n\n", comment)
|
||||
} else {
|
||||
lines := strings.Split(config, "\n")
|
||||
for i, line := range lines {
|
||||
if i == 0 || i == len(lines)-1 {
|
||||
fmt.Print("\n")
|
||||
continue
|
||||
}
|
||||
fmt.Print(comment + line + "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sliceContains(name string, list []string) bool {
|
||||
for _, b := range list {
|
||||
if b == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PrintInputConfig prints the config usage of a single input.
|
||||
func PrintInputConfig(name string) error {
|
||||
if creator, ok := inputs.Inputs[name]; ok {
|
||||
printConfig(name, creator(), "inputs", false)
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Input %s not found", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrintOutputConfig prints the config usage of a single output.
|
||||
func PrintOutputConfig(name string) error {
|
||||
if creator, ok := outputs.Outputs[name]; ok {
|
||||
printConfig(name, creator(), "outputs", false)
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Output %s not found", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
directoryEntries, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range directoryEntries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
if len(name) < 6 || name[len(name)-5:] != ".conf" {
|
||||
continue
|
||||
}
|
||||
err := c.LoadConfig(filepath.Join(path, name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to find a default config file at these locations (in order):
|
||||
// 1. $TELEGRAF_CONFIG_PATH
|
||||
// 2. $HOME/.telegraf/telegraf.conf
|
||||
// 3. /etc/telegraf/telegraf.conf
|
||||
//
|
||||
func getDefaultConfigPath() (string, error) {
|
||||
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
|
||||
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
|
||||
etcfile := "/etc/telegraf/telegraf.conf"
|
||||
for _, path := range []string{envfile, homefile, etcfile} {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
log.Printf("Using config file: %s", path)
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
// if we got here, we didn't find a file in a default location
|
||||
return "", fmt.Errorf("No config file specified, and could not find one"+
|
||||
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
|
||||
}
|
||||
|
||||
// LoadConfig loads the given config file and applies it to c
|
||||
func (c *Config) LoadConfig(path string) error {
|
||||
var err error
|
||||
if path == "" {
|
||||
if path, err = getDefaultConfigPath(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tbl, err := parseFile(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
|
||||
// Parse tags tables first:
|
||||
for _, tableName := range []string{"tags", "global_tags"} {
|
||||
if val, ok := tbl.Fields[tableName]; ok {
|
||||
subTable, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("Could not parse [global_tags] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse agent table:
|
||||
if val, ok := tbl.Fields["agent"]; ok {
|
||||
subTable, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("Could not parse [agent] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse all the rest of the plugins:
|
||||
for name, val := range tbl.Fields {
|
||||
subTable, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "agent", "global_tags", "tags":
|
||||
case "outputs":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addOutput(pluginName, t); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s, file %s",
|
||||
pluginName, path)
|
||||
}
|
||||
}
|
||||
case "inputs", "plugins":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addInput(pluginName, pluginSubTable); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addInput(pluginName, t); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s, file %s",
|
||||
pluginName, path)
|
||||
}
|
||||
}
|
||||
// Assume it's an input input for legacy config file support if no other
|
||||
// identifiers are present
|
||||
default:
|
||||
if err = c.addInput(name, subTable); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseFile loads a TOML configuration from a provided path and
|
||||
// returns the AST produced from the TOML parser. When loading the file, it
|
||||
// will find environment variables and replace them.
|
||||
func parseFile(fpath string) (*ast.Table, error) {
|
||||
contents, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
env_vars := envVarRe.FindAll(contents, -1)
|
||||
for _, env_var := range env_vars {
|
||||
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
|
||||
if env_val != "" {
|
||||
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
|
||||
}
|
||||
}
|
||||
|
||||
return toml.Parse(contents)
|
||||
}
|
||||
|
||||
func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
|
||||
return nil
|
||||
}
|
||||
creator, ok := outputs.Outputs[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested output: %s", name)
|
||||
}
|
||||
output := creator()
|
||||
|
||||
// If the output has a SetSerializer function, then this means it can write
|
||||
// arbitrary types of output, so build the serializer and set it.
|
||||
switch t := output.(type) {
|
||||
case serializers.SerializerOutput:
|
||||
serializer, err := buildSerializer(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.SetSerializer(serializer)
|
||||
}
|
||||
|
||||
outputConfig, err := buildOutput(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ro := internal_models.NewRunningOutput(name, output, outputConfig,
|
||||
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
|
||||
c.Outputs = append(c.Outputs, ro)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
|
||||
return nil
|
||||
}
|
||||
// Legacy support renaming io input to diskio
|
||||
if name == "io" {
|
||||
name = "diskio"
|
||||
}
|
||||
|
||||
creator, ok := inputs.Inputs[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested input: %s", name)
|
||||
}
|
||||
input := creator()
|
||||
|
||||
// If the input has a SetParser function, then this means it can accept
|
||||
// arbitrary types of input, so build the parser and set it.
|
||||
switch t := input.(type) {
|
||||
case parsers.ParserInput:
|
||||
parser, err := buildParser(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.SetParser(parser)
|
||||
}
|
||||
|
||||
pluginConfig, err := buildInput(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &internal_models.RunningInput{
|
||||
Name: name,
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
}
|
||||
c.Inputs = append(c.Inputs, rp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildFilter builds a Filter
|
||||
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
||||
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig
|
||||
// to be used for glob filtering on tags and measurements
|
||||
func buildFilter(tbl *ast.Table) (internal_models.Filter, error) {
|
||||
f := internal_models.Filter{}
|
||||
|
||||
if node, ok := tbl.Fields["namepass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NamePass = append(f.NamePass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["namedrop"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NameDrop = append(f.NameDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fields := []string{"pass", "fieldpass"}
|
||||
for _, field := range fields {
|
||||
if node, ok := tbl.Fields[field]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldPass = append(f.FieldPass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fields = []string{"drop", "fielddrop"}
|
||||
for _, field := range fields {
|
||||
if node, ok := tbl.Fields[field]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldDrop = append(f.FieldDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tagpass"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.TagPass = append(f.TagPass, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tagdrop"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.TagDrop = append(f.TagDrop, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tagexclude"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.TagExclude = append(f.TagExclude, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["taginclude"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.TagInclude = append(f.TagInclude, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := f.CompileFilter(); err != nil {
|
||||
return f, err
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "namedrop")
|
||||
delete(tbl.Fields, "namepass")
|
||||
delete(tbl.Fields, "fielddrop")
|
||||
delete(tbl.Fields, "fieldpass")
|
||||
delete(tbl.Fields, "drop")
|
||||
delete(tbl.Fields, "pass")
|
||||
delete(tbl.Fields, "tagdrop")
|
||||
delete(tbl.Fields, "tagpass")
|
||||
delete(tbl.Fields, "tagexclude")
|
||||
delete(tbl.Fields, "taginclude")
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// buildInput parses input specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// internal_models.InputConfig to be inserted into internal_models.RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) {
|
||||
cp := &internal_models.InputConfig{Name: name}
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp.Interval = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementPrefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_suffix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementSuffix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_override"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.NameOverride = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "name_prefix")
|
||||
delete(tbl.Fields, "name_suffix")
|
||||
delete(tbl.Fields, "name_override")
|
||||
delete(tbl.Fields, "interval")
|
||||
delete(tbl.Fields, "tags")
|
||||
var err error
|
||||
cp.Filter, err = buildFilter(tbl)
|
||||
if err != nil {
|
||||
return cp, err
|
||||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// buildParser grabs the necessary entries from the ast.Table for creating
|
||||
// a parsers.Parser object, and creates it, which can then be added onto
|
||||
// an Input object.
|
||||
func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
c := &parsers.Config{}
|
||||
|
||||
if node, ok := tbl.Fields["data_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DataFormat = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy support, exec plugin originally parsed JSON by default.
|
||||
if name == "exec" && c.DataFormat == "" {
|
||||
c.DataFormat = "json"
|
||||
} else if c.DataFormat == "" {
|
||||
c.DataFormat = "influx"
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["separator"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.Separator = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["templates"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
c.Templates = append(c.Templates, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tag_keys"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
c.TagKeys = append(c.TagKeys, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["data_type"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DataType = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.MetricName = name
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "separator")
|
||||
delete(tbl.Fields, "templates")
|
||||
delete(tbl.Fields, "tag_keys")
|
||||
delete(tbl.Fields, "data_type")
|
||||
|
||||
return parsers.NewParser(c)
|
||||
}
|
||||
|
||||
// buildSerializer grabs the necessary entries from the ast.Table for creating
|
||||
// a serializers.Serializer object, and creates it, which can then be added onto
|
||||
// an Output object.
|
||||
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
|
||||
c := &serializers.Config{}
|
||||
|
||||
if node, ok := tbl.Fields["data_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DataFormat = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.DataFormat == "" {
|
||||
c.DataFormat = "influx"
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.Prefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["template"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.Template = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
delete(tbl.Fields, "template")
|
||||
return serializers.NewSerializer(c)
|
||||
}
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table,
|
||||
// builds the filter and returns an
|
||||
// internal_models.OutputConfig to be inserted into internal_models.RunningInput
|
||||
// Note: error exists in the return for future calls that might require error
|
||||
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) {
|
||||
filter, err := buildFilter(tbl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oc := &internal_models.OutputConfig{
|
||||
Name: name,
|
||||
Filter: filter,
|
||||
}
|
||||
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
|
||||
if len(oc.Filter.FieldDrop) > 0 {
|
||||
oc.Filter.NameDrop = oc.Filter.FieldDrop
|
||||
}
|
||||
if len(oc.Filter.FieldPass) > 0 {
|
||||
oc.Filter.NamePass = oc.Filter.FieldPass
|
||||
}
|
||||
return oc, nil
|
||||
}
|
||||
179
internal/config/config_test.go
Normal file
179
internal/config/config_test.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||
c := NewConfig()
|
||||
err := os.Setenv("MY_TEST_SERVER", "192.168.1.1")
|
||||
assert.NoError(t, err)
|
||||
err = os.Setenv("TEST_INTERVAL", "10s")
|
||||
assert.NoError(t, err)
|
||||
c.LoadConfig("./testdata/single_plugin_env_vars.toml")
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
|
||||
filter := internal_models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 10 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
c := NewConfig()
|
||||
c.LoadConfig("./testdata/single_plugin.toml")
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
filter := internal_models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
func TestConfig_LoadDirectory(t *testing.T) {
|
||||
c := NewConfig()
|
||||
err := c.LoadConfig("./testdata/single_plugin.toml")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = c.LoadDirectory("./testdata/subconfig")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
filter := internal_models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
}
|
||||
assert.NoError(t, filter.CompileFilter())
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: filter,
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||
p, err := parsers.NewJSONParser("exec", nil, nil)
|
||||
assert.NoError(t, err)
|
||||
ex.SetParser(p)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &internal_models.InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
}
|
||||
eConfig.Tags = make(map[string]string)
|
||||
assert.Equal(t, ex, c.Inputs[1].Input,
|
||||
"Merged Testdata did not produce a correct exec struct.")
|
||||
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
||||
"Merged Testdata did not produce correct exec metadata.")
|
||||
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
assert.Equal(t, memcached, c.Inputs[2].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[2].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
|
||||
pConfig := &internal_models.InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
"Merged Testdata did not produce a correct procstat struct.")
|
||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||
"Merged Testdata did not produce correct procstat metadata.")
|
||||
}
|
||||
11
internal/config/testdata/single_plugin.toml
vendored
Normal file
11
internal/config/testdata/single_plugin.toml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["localhost"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
fieldpass = ["some", "strings"]
|
||||
fielddrop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
11
internal/config/testdata/single_plugin_env_vars.toml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["$MY_TEST_SERVER"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
fieldpass = ["some", "strings"]
|
||||
fielddrop = ["other", "stuff"]
|
||||
interval = "$TEST_INTERVAL"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
4
internal/config/testdata/subconfig/exec.conf
vendored
Normal file
4
internal/config/testdata/subconfig/exec.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/myothercollector --foo=bar"
|
||||
name_suffix = "_myothercollector"
|
||||
11
internal/config/testdata/subconfig/memcached.conf
vendored
Normal file
11
internal/config/testdata/subconfig/memcached.conf
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["192.168.1.1"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
2
internal/config/testdata/subconfig/procstat.conf
vendored
Normal file
2
internal/config/testdata/subconfig/procstat.conf
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
[[inputs.procstat]]
|
||||
pid_file = "/var/run/grafana-server.pid"
|
||||
310
internal/config/testdata/telegraf-agent.toml
vendored
Normal file
310
internal/config/testdata/telegraf-agent.toml
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[global_tags]
|
||||
dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all plugins
|
||||
interval = "10s"
|
||||
|
||||
# run telegraf in debug mode
|
||||
debug = false
|
||||
|
||||
# Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
# Multiple urls can be specified for InfluxDB cluster support. Server to
|
||||
# write to will be randomly chosen each interval.
|
||||
urls = ["http://localhost:8086"] # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = ["udp://localhost:8089"]
|
||||
database = "udp-telegraf"
|
||||
|
||||
# Configuration for the Kafka server to send metrics to
|
||||
[[outputs.kafka]]
|
||||
# URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
|
||||
###############################################################################
|
||||
# PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# Read Apache status information (mod_status)
|
||||
[[inputs.apache]]
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[inputs.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many disque servers
|
||||
[[inputs.disque]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read stats from one or more Elasticsearch servers or clusters
|
||||
[[inputs.elasticsearch]]
|
||||
# specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
# set local to false when you want to read the indices stats from all nodes
|
||||
# within the cluster
|
||||
local = true
|
||||
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
# Read metrics of haproxy, via socket or csv stats page
|
||||
[[inputs.haproxy]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1:1936
|
||||
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
||||
# Or you can also use local socket(not work yet)
|
||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||
|
||||
# Read flattened metrics from one or more JSON HTTP endpoints
|
||||
[[inputs.httpjson]]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# read metrics from a Kafka topic
|
||||
[[inputs.kafka_consumer]]
|
||||
# topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
# the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
# Maximum number of points to buffer between collection intervals
|
||||
point_buffer = 100000
|
||||
# Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
# Read metrics from a LeoFS Server via SNMP
|
||||
[[inputs.leofs]]
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
|
||||
servers = ["127.0.0.1:4021"]
|
||||
|
||||
# Read metrics from local Lustre service on OST, MDS
|
||||
[[inputs.lustre2]]
|
||||
# An array of /proc globs to search for Lustre stats
|
||||
# If not specified, the default will work on Lustre 2.5.x
|
||||
#
|
||||
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
|
||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many memcached servers
|
||||
[[inputs.memcached]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Telegraf plugin for gathering metrics from N Mesos masters
|
||||
[[inputs.mesos]]
|
||||
# Timeout, in ms.
|
||||
timeout = 100
|
||||
# A list of Mesos masters, default value is localhost:5050.
|
||||
masters = ["localhost:5050"]
|
||||
# Metrics groups to be collected, by default, all enabled.
|
||||
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"]
|
||||
|
||||
# Read metrics from one or many MongoDB servers
|
||||
[[inputs.mongodb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
|
||||
servers = ["127.0.0.1:27017"]
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[[inputs.mysql]]
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g.
|
||||
# servers = ["root:root@http://10.0.0.18/?tls=false"]
|
||||
# servers = ["root:passwd@tcp(127.0.0.1:3306)/"]
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[[inputs.net]]
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
#
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
[[inputs.nginx]]
|
||||
# An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/status"]
|
||||
|
||||
# Ping given url(s) and return statistics
|
||||
[[inputs.ping]]
|
||||
# urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
# number of pings to send (ping -c <COUNT>)
|
||||
count = 1 # required
|
||||
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||
ping_interval = 0.0
|
||||
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
|
||||
timeout = 0.0
|
||||
# interface to send ping from (ping -I <INTERFACE>)
|
||||
interface = ""
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[[inputs.postgresql]]
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
#
|
||||
# Without the dbname parameter, the driver will default to a database
|
||||
# with the same name as the user. This dbname is just for instantiating a
|
||||
# connection with the server and doesn't restrict the databases we are trying
|
||||
# to grab metrics for.
|
||||
#
|
||||
|
||||
address = "sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many prometheus clients
|
||||
[[inputs.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
|
||||
# Read metrics from one or many RabbitMQ servers via the management API
|
||||
[[inputs.rabbitmq]]
|
||||
# Specify servers via an array of tables
|
||||
# name = "rmq-server-1" # optional tag
|
||||
# url = "http://localhost:15672"
|
||||
# username = "guest"
|
||||
# password = "guest"
|
||||
|
||||
# A list of nodes to pull metrics about. If not specified, metrics for
|
||||
# all nodes are gathered.
|
||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[[inputs.redis]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics from one or many RethinkDB servers
|
||||
[[inputs.rethinkdb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
||||
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
|
||||
servers = ["127.0.0.1:28015"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
# no configuration
|
||||
98
internal/globpath/globpath.go
Normal file
98
internal/globpath/globpath.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||
|
||||
type GlobPath struct {
|
||||
path string
|
||||
hasMeta bool
|
||||
g glob.Glob
|
||||
root string
|
||||
}
|
||||
|
||||
func Compile(path string) (*GlobPath, error) {
|
||||
out := GlobPath{
|
||||
hasMeta: hasMeta(path),
|
||||
path: path,
|
||||
}
|
||||
|
||||
// if there are no glob meta characters in the path, don't bother compiling
|
||||
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||
if !out.hasMeta {
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get the root directory for this filepath
|
||||
out.root = findRootDir(path)
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
if !g.hasMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
info, err := os.Stat(g.path)
|
||||
if !os.IsNotExist(err) {
|
||||
out[g.path] = info
|
||||
}
|
||||
return out
|
||||
}
|
||||
return walkFilePath(g.root, g.g)
|
||||
}
|
||||
|
||||
// walk the filepath from the given root and return a list of files that match
|
||||
// the given glob.
|
||||
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
|
||||
matchedFiles := make(map[string]os.FileInfo)
|
||||
walkfn := func(path string, info os.FileInfo, _ error) error {
|
||||
if g.Match(path) {
|
||||
matchedFiles[path] = info
|
||||
}
|
||||
return nil
|
||||
}
|
||||
filepath.Walk(root, walkfn)
|
||||
return matchedFiles
|
||||
}
|
||||
|
||||
// find the root dir of the given path (could include globs).
|
||||
// ie:
|
||||
// /var/log/telegraf.conf -> /var/log
|
||||
// /home/** -> /home
|
||||
// /home/*/** -> /home
|
||||
// /lib/share/*/*/**.txt -> /lib/share
|
||||
func findRootDir(path string) string {
|
||||
pathItems := strings.Split(path, sepStr)
|
||||
out := sepStr
|
||||
for i, item := range pathItems {
|
||||
if i == len(pathItems)-1 {
|
||||
break
|
||||
}
|
||||
if item == "" {
|
||||
continue
|
||||
}
|
||||
if hasMeta(item) {
|
||||
break
|
||||
}
|
||||
out += item + sepStr
|
||||
}
|
||||
if out != "/" {
|
||||
out = strings.TrimSuffix(out, "/")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// hasMeta reports whether path contains any magic glob characters.
|
||||
func hasMeta(path string) bool {
|
||||
return strings.IndexAny(path, "*?[") >= 0
|
||||
}
|
||||
62
internal/globpath/globpath_test.go
Normal file
62
internal/globpath/globpath_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package globpath
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCompileAndMatch(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
// test super asterisk
|
||||
g1, err := Compile(dir + "/**")
|
||||
require.NoError(t, err)
|
||||
// test single asterisk
|
||||
g2, err := Compile(dir + "/*.log")
|
||||
require.NoError(t, err)
|
||||
// test no meta characters (file exists)
|
||||
g3, err := Compile(dir + "/log1.log")
|
||||
require.NoError(t, err)
|
||||
// test file that doesn't exist
|
||||
g4, err := Compile(dir + "/i_dont_exist.log")
|
||||
require.NoError(t, err)
|
||||
// test super asterisk that doesn't exist
|
||||
g5, err := Compile(dir + "/dir_doesnt_exist/**")
|
||||
require.NoError(t, err)
|
||||
|
||||
matches := g1.Match()
|
||||
assert.Len(t, matches, 3)
|
||||
matches = g2.Match()
|
||||
assert.Len(t, matches, 2)
|
||||
matches = g3.Match()
|
||||
assert.Len(t, matches, 1)
|
||||
matches = g4.Match()
|
||||
assert.Len(t, matches, 0)
|
||||
matches = g5.Match()
|
||||
assert.Len(t, matches, 0)
|
||||
}
|
||||
|
||||
func TestFindRootDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{"/var/log/telegraf.conf", "/var/log"},
|
||||
{"/home/**", "/home"},
|
||||
{"/home/*/**", "/home"},
|
||||
{"/lib/share/*/*/**.txt", "/lib/share"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := findRootDir(test.input)
|
||||
assert.Equal(t, test.output, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
||||
}
|
||||
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log1.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
0
internal/globpath/testdata/log2.log
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
5
internal/globpath/testdata/test.conf
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# this is a fake testing config file
|
||||
# for testing the filestat plugin
|
||||
|
||||
option1 = "foo"
|
||||
option2 = "bar"
|
||||
193
internal/internal.go
Normal file
193
internal/internal.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
var (
|
||||
TimeoutErr = errors.New("Command timed out.")
|
||||
|
||||
NotImplementedError = errors.New("not implemented yet")
|
||||
)
|
||||
|
||||
// Duration just wraps time.Duration
|
||||
type Duration struct {
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadLines reads contents from a file and splits them by new lines.
|
||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||
func ReadLines(filename string) ([]string, error) {
|
||||
return ReadLinesOffsetN(filename, 0, -1)
|
||||
}
|
||||
|
||||
// ReadLines reads contents from file and splits them by new line.
|
||||
// The offset tells at which line number to start.
|
||||
// The count determines the number of lines to read (starting from offset):
|
||||
// n >= 0: at most n lines
|
||||
// n < 0: whole file
|
||||
func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return []string{""}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var ret []string
|
||||
|
||||
r := bufio.NewReader(f)
|
||||
for i := 0; i < n+int(offset) || n < 0; i++ {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if i < int(offset) {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, strings.Trim(line, "\n"))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// RandomString returns a random string of alpha-numeric characters
|
||||
func RandomString(n int) string {
|
||||
var bytes = make([]byte, n)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
) (*tls.Config, error) {
|
||||
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t := &tls.Config{
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
|
||||
if SSLCA != "" {
|
||||
caCert, err := ioutil.ReadFile(SSLCA)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
t.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
if SSLCert != "" && SSLKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
t.Certificates = []tls.Certificate{cert}
|
||||
t.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// will be nil by default if nothing is provided
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SnakeCase converts the given string to snake case following the Golang format:
|
||||
// acronyms are converted to lower-case and preceded by an underscore.
|
||||
func SnakeCase(in string) string {
|
||||
runes := []rune(in)
|
||||
length := len(runes)
|
||||
|
||||
var out []rune
|
||||
for i := 0; i < length; i++ {
|
||||
if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {
|
||||
out = append(out, '_')
|
||||
}
|
||||
out = append(out, unicode.ToLower(runes[i]))
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// CombinedOutputTimeout runs the given command with the given timeout and
|
||||
// returns the combined output of stdout and stderr.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
c.Stdout = &b
|
||||
c.Stderr = &b
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := WaitTimeout(c, timeout)
|
||||
return b.Bytes(), err
|
||||
}
|
||||
|
||||
// RunTimeout runs the given command with the given timeout.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return WaitTimeout(c, timeout)
|
||||
}
|
||||
|
||||
// WaitTimeout waits for the given command to finish with a timeout.
|
||||
// It assumes the command has already been started.
|
||||
// If the command times out, it attempts to kill the process.
|
||||
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
timer := time.NewTimer(timeout)
|
||||
done := make(chan error)
|
||||
go func() { done <- c.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
timer.Stop()
|
||||
return err
|
||||
case <-timer.C:
|
||||
if err := c.Process.Kill(); err != nil {
|
||||
log.Printf("FATAL error killing process: %s", err)
|
||||
return err
|
||||
}
|
||||
// wait for the command to return after killing it
|
||||
<-done
|
||||
return TimeoutErr
|
||||
}
|
||||
}
|
||||
108
internal/internal_test.go
Normal file
108
internal/internal_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type SnakeTest struct {
|
||||
input string
|
||||
output string
|
||||
}
|
||||
|
||||
var tests = []SnakeTest{
|
||||
{"a", "a"},
|
||||
{"snake", "snake"},
|
||||
{"A", "a"},
|
||||
{"ID", "id"},
|
||||
{"MOTD", "motd"},
|
||||
{"Snake", "snake"},
|
||||
{"SnakeTest", "snake_test"},
|
||||
{"APIResponse", "api_response"},
|
||||
{"SnakeID", "snake_id"},
|
||||
{"SnakeIDGoogle", "snake_id_google"},
|
||||
{"LinuxMOTD", "linux_motd"},
|
||||
{"OMGWTFBBQ", "omgwtfbbq"},
|
||||
{"omg_wtf_bbq", "omg_wtf_bbq"},
|
||||
}
|
||||
|
||||
func TestSnakeCase(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
if SnakeCase(test.input) != test.output {
|
||||
t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
sleepbin, _ = exec.LookPath("sleep")
|
||||
echobin, _ = exec.LookPath("echo")
|
||||
)
|
||||
|
||||
func TestRunTimeout(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "10")
|
||||
start := time.Now()
|
||||
err := RunTimeout(cmd, time.Millisecond*20)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
assert.Equal(t, TimeoutErr, err)
|
||||
// Verify that command gets killed in 20ms, with some breathing room
|
||||
assert.True(t, elapsed < time.Millisecond*75)
|
||||
}
|
||||
|
||||
func TestCombinedOutputTimeout(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "10")
|
||||
start := time.Now()
|
||||
_, err := CombinedOutputTimeout(cmd, time.Millisecond*20)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
assert.Equal(t, TimeoutErr, err)
|
||||
// Verify that command gets killed in 20ms, with some breathing room
|
||||
assert.True(t, elapsed < time.Millisecond*75)
|
||||
}
|
||||
|
||||
func TestCombinedOutput(t *testing.T) {
|
||||
if echobin == "" {
|
||||
t.Skip("'echo' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(echobin, "foo")
|
||||
out, err := CombinedOutputTimeout(cmd, time.Second)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "foo\n", string(out))
|
||||
}
|
||||
|
||||
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
|
||||
// the same output from a failed command.
|
||||
func TestCombinedOutputError(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "foo")
|
||||
expected, err := cmd.CombinedOutput()
|
||||
|
||||
cmd2 := exec.Command(sleepbin, "foo")
|
||||
actual, err := CombinedOutputTimeout(cmd2, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestRunError(t *testing.T) {
|
||||
if sleepbin == "" {
|
||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
||||
}
|
||||
cmd := exec.Command(sleepbin, "foo")
|
||||
err := RunTimeout(cmd, time.Second)
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
196
internal/models/filter.go
Normal file
196
internal/models/filter.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
filter glob.Glob
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
NameDrop []string
|
||||
nameDrop glob.Glob
|
||||
NamePass []string
|
||||
namePass glob.Glob
|
||||
|
||||
FieldDrop []string
|
||||
fieldDrop glob.Glob
|
||||
FieldPass []string
|
||||
fieldPass glob.Glob
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
TagExclude []string
|
||||
tagExclude glob.Glob
|
||||
TagInclude []string
|
||||
tagInclude glob.Glob
|
||||
|
||||
IsActive bool
|
||||
}
|
||||
|
||||
// Compile all Filter lists into glob.Glob objects.
|
||||
func (f *Filter) CompileFilter() error {
|
||||
var err error
|
||||
f.nameDrop, err = compileFilter(f.NameDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||
}
|
||||
f.namePass, err = compileFilter(f.NamePass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||
}
|
||||
|
||||
f.fieldDrop, err = compileFilter(f.FieldDrop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||
}
|
||||
f.fieldPass, err = compileFilter(f.FieldPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||
}
|
||||
|
||||
f.tagExclude, err = compileFilter(f.TagExclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||
}
|
||||
f.tagInclude, err = compileFilter(f.TagInclude)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||
}
|
||||
|
||||
for i, _ := range f.TagDrop {
|
||||
f.TagDrop[i].filter, err = compileFilter(f.TagDrop[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||
}
|
||||
}
|
||||
for i, _ := range f.TagPass {
|
||||
f.TagPass[i].filter, err = compileFilter(f.TagPass[i].Filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compileFilter(filter []string) (glob.Glob, error) {
|
||||
if len(filter) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var g glob.Glob
|
||||
var err error
|
||||
if len(filter) == 1 {
|
||||
g, err = glob.Compile(filter[0])
|
||||
} else {
|
||||
g, err = glob.Compile("{" + strings.Join(filter, ",") + "}")
|
||||
}
|
||||
return g, err
|
||||
}
|
||||
|
||||
func (f *Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) ShouldNamePass(key string) bool {
|
||||
if f.namePass != nil {
|
||||
if f.namePass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.nameDrop != nil {
|
||||
if f.nameDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f *Filter) ShouldFieldsPass(key string) bool {
|
||||
if f.fieldPass != nil {
|
||||
if f.fieldPass.Match(key) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.fieldDrop != nil {
|
||||
if f.fieldDrop.Match(key) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f *Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
}
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
if pat.filter.Match(tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if pat.filter == nil {
|
||||
continue
|
||||
}
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
if pat.filter.Match(tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Apply TagInclude and TagExclude filters.
|
||||
// modifies the tags map in-place.
|
||||
func (f *Filter) FilterTags(tags map[string]string) {
|
||||
if f.tagInclude != nil {
|
||||
for k, _ := range tags {
|
||||
if !f.tagInclude.Match(k) {
|
||||
delete(tags, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if f.tagExclude != nil {
|
||||
for k, _ := range tags {
|
||||
if f.tagExclude.Match(k) {
|
||||
delete(tags, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
366
internal/models/filter_test.go
Normal file
366
internal/models/filter_test.go
Normal file
@@ -0,0 +1,366 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_NamePass(t *testing.T) {
|
||||
f := Filter{
|
||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_NameDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FieldPass(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FieldDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_CompileFilterError(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
NamePass: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
FieldDrop: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
FieldPass: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
TagExclude: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
f = Filter{
|
||||
TagInclude: []string{"", ""},
|
||||
}
|
||||
assert.Error(t, f.CompileFilter())
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"{foobar}"},
|
||||
}}
|
||||
f = Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
require.Error(t, f.CompileFilter())
|
||||
filters = []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"{foobar}"},
|
||||
}}
|
||||
f = Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
require.Error(t, f.CompileFilter())
|
||||
}
|
||||
|
||||
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
||||
m := testutil.TestMetric(1, "testmetric")
|
||||
f := Filter{
|
||||
NameDrop: []string{"foobar"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
require.True(t, f.ShouldMetricPass(m))
|
||||
|
||||
m = testutil.TestMetric(1, "foobar")
|
||||
require.False(t, f.ShouldMetricPass(m))
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f := Filter{
|
||||
TagExclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
f = Filter{
|
||||
TagInclude: []string{"nomatch"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
assert.Equal(t, map[string]string{}, pretags)
|
||||
}
|
||||
|
||||
func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||
pretags := map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f := Filter{
|
||||
TagExclude: []string{"ho*"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
|
||||
pretags = map[string]string{
|
||||
"host": "localhost",
|
||||
"mytag": "foobar",
|
||||
}
|
||||
f = Filter{
|
||||
TagInclude: []string{"my*"},
|
||||
}
|
||||
require.NoError(t, f.CompileFilter())
|
||||
|
||||
f.FilterTags(pretags)
|
||||
assert.Equal(t, map[string]string{
|
||||
"mytag": "foobar",
|
||||
}, pretags)
|
||||
}
|
||||
24
internal/models/running_input.go
Normal file
24
internal/models/running_input.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
type InputConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
160
internal/models/running_output.go
Normal file
160
internal/models/running_output.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/buffer"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default size of metrics batch size.
|
||||
DEFAULT_METRIC_BATCH_SIZE = 1000
|
||||
|
||||
// Default number of metrics kept. It should be a multiple of batch size.
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||
)
|
||||
|
||||
// RunningOutput contains the output configuration
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
Quiet bool
|
||||
MetricBufferLimit int
|
||||
MetricBatchSize int
|
||||
|
||||
metrics *buffer.Buffer
|
||||
failMetrics *buffer.Buffer
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
name string,
|
||||
output telegraf.Output,
|
||||
conf *OutputConfig,
|
||||
batchSize int,
|
||||
bufferLimit int,
|
||||
) *RunningOutput {
|
||||
if bufferLimit == 0 {
|
||||
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||
}
|
||||
if batchSize == 0 {
|
||||
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||
}
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
metrics: buffer.NewBuffer(batchSize),
|
||||
failMetrics: buffer.NewBuffer(bufferLimit),
|
||||
Output: output,
|
||||
Config: conf,
|
||||
MetricBufferLimit: bufferLimit,
|
||||
MetricBatchSize: batchSize,
|
||||
}
|
||||
return ro
|
||||
}
|
||||
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
if ro.Config.Filter.IsActive {
|
||||
if !ro.Config.Filter.ShouldMetricPass(metric) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Filter any tagexclude/taginclude parameters before adding metric
|
||||
if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
|
||||
// In order to filter out tags, we need to create a new metric, since
|
||||
// metrics are immutable once created.
|
||||
tags := metric.Tags()
|
||||
fields := metric.Fields()
|
||||
t := metric.Time()
|
||||
name := metric.Name()
|
||||
ro.Config.Filter.FilterTags(tags)
|
||||
// error is not possible if creating from another metric, so ignore.
|
||||
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
}
|
||||
|
||||
ro.metrics.Add(metric)
|
||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
err := ro.write(batch)
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+
|
||||
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||
ro.Name,
|
||||
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||
ro.MetricBufferLimit,
|
||||
ro.metrics.Total(),
|
||||
ro.metrics.Drops()+ro.failMetrics.Drops())
|
||||
}
|
||||
|
||||
var err error
|
||||
if !ro.failMetrics.IsEmpty() {
|
||||
bufLen := ro.failMetrics.Len()
|
||||
// how many batches of failed writes we need to write.
|
||||
nBatches := bufLen/ro.MetricBatchSize + 1
|
||||
batchSize := ro.MetricBatchSize
|
||||
|
||||
for i := 0; i < nBatches; i++ {
|
||||
// If it's the last batch, only grab the metrics that have not had
|
||||
// a write attempt already (this is primarily to preserve order).
|
||||
if i == nBatches-1 {
|
||||
batchSize = bufLen % ro.MetricBatchSize
|
||||
}
|
||||
batch := ro.failMetrics.Batch(batchSize)
|
||||
// If we've already failed previous writes, don't bother trying to
|
||||
// write to this output again. We are not exiting the loop just so
|
||||
// that we can rotate the metrics to preserve order.
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||
// see comment above about not trying to write to an already failed output.
|
||||
// if ro.failMetrics is empty then err will always be nil at this point.
|
||||
if err == nil {
|
||||
err = ro.write(batch)
|
||||
}
|
||||
if err != nil {
|
||||
ro.failMetrics.Add(batch...)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, len(metrics), elapsed)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
568
internal/models/running_output_test.go
Normal file
568
internal/models/running_output_test.go
Normal file
@@ -0,0 +1,568 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var first5 = []telegraf.Metric{
|
||||
testutil.TestMetric(101, "metric1"),
|
||||
testutil.TestMetric(101, "metric2"),
|
||||
testutil.TestMetric(101, "metric3"),
|
||||
testutil.TestMetric(101, "metric4"),
|
||||
testutil.TestMetric(101, "metric5"),
|
||||
}
|
||||
|
||||
var next5 = []telegraf.Metric{
|
||||
testutil.TestMetric(101, "metric6"),
|
||||
testutil.TestMetric(101, "metric7"),
|
||||
testutil.TestMetric(101, "metric8"),
|
||||
testutil.TestMetric(101, "metric9"),
|
||||
testutil.TestMetric(101, "metric10"),
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
if n%100 == 0 {
|
||||
ro.Write()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark adding metrics.
|
||||
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &perfOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
ro.Quiet = true
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
ro.AddMetric(first5[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Test that NameDrop filters ger properly applied.
|
||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
NameDrop: []string{"metric1", "metric2"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 8)
|
||||
}
|
||||
|
||||
// Test that NameDrop filters without a match do nothing.
|
||||
func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
NameDrop: []string{"metric1000", "foo*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Test that tags are properly included
|
||||
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
TagInclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Empty(t, m.Metrics()[0].Tags())
|
||||
}
|
||||
|
||||
// Test that tags are properly excluded
|
||||
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
TagExclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 0)
|
||||
}
|
||||
|
||||
// Test that tags are properly Excluded
|
||||
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
TagExclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||
}
|
||||
|
||||
// Test that tags are properly included
|
||||
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: true,
|
||||
TagInclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, conf.Filter.CompileFilter())
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
ro.AddMetric(first5[0])
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 1)
|
||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||
}
|
||||
|
||||
// Test that we can write metrics with simple default setup.
|
||||
func TestRunningOutputDefault(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Test that running output doesn't flush until it's full when
|
||||
// FlushBufferWhenFull is set.
|
||||
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 6, 10)
|
||||
|
||||
// Fill buffer to 1 under limit
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add one more metric
|
||||
ro.AddMetric(next5[0])
|
||||
// now it flushed
|
||||
assert.Len(t, m.Metrics(), 6)
|
||||
|
||||
// add one more metric and write it manually
|
||||
ro.AddMetric(next5[1])
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 7)
|
||||
}
|
||||
|
||||
// Test that running output doesn't flush until it's full when
|
||||
// FlushBufferWhenFull is set, twice.
|
||||
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||
|
||||
// Fill buffer past limit twive
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// flushed twice
|
||||
assert.Len(t, m.Metrics(), 8)
|
||||
}
|
||||
|
||||
func TestRunningOutputWriteFail(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||
|
||||
// Fill buffer to limit twice
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// manual write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved during a write failure.
|
||||
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
// add 5 more metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 10 metrics were written
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5...)
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved during many write failures.
|
||||
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 5, 100)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// Write fails
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 10 metrics were written
|
||||
assert.Len(t, m.Metrics(), 20)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5...)
|
||||
expected = append(expected, first5...)
|
||||
expected = append(expected, next5...)
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
// Verify that the order of points is preserved when there is a remainder
|
||||
// of points for the batch.
|
||||
//
|
||||
// ie, with a batch size of 5:
|
||||
//
|
||||
// 1 2 3 4 5 6 <-- order, failed points
|
||||
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
|
||||
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
|
||||
//
|
||||
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
||||
|
||||
// add 5 metrics
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// Write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add and attempt to write a single metric:
|
||||
ro.AddMetric(next5[0])
|
||||
err = ro.Write()
|
||||
require.Error(t, err)
|
||||
|
||||
// unset fail and write metrics
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that 6 metrics were written
|
||||
assert.Len(t, m.Metrics(), 6)
|
||||
// Verify that they are in order
|
||||
expected := append(first5, next5[0])
|
||||
assert.Equal(t, expected, m.Metrics())
|
||||
}
|
||||
|
||||
type mockOutput struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics []telegraf.Metric
|
||||
|
||||
// if true, mock a write failure
|
||||
failWrite bool
|
||||
}
|
||||
|
||||
func (m *mockOutput) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Description() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *mockOutput) SampleConfig() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *mockOutput) Write(metrics []telegraf.Metric) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if m.failWrite {
|
||||
return fmt.Errorf("Failed Write!")
|
||||
}
|
||||
|
||||
if m.metrics == nil {
|
||||
m.metrics = []telegraf.Metric{}
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
m.metrics = append(m.metrics, metric)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Metrics() []telegraf.Metric {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.metrics
|
||||
}
|
||||
|
||||
type perfOutput struct {
|
||||
// if true, mock a write failure
|
||||
failWrite bool
|
||||
}
|
||||
|
||||
func (m *perfOutput) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *perfOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *perfOutput) Description() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *perfOutput) SampleConfig() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
||||
if m.failWrite {
|
||||
return fmt.Errorf("Failed Write!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
94
metric.go
Normal file
94
metric.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
Name() string
|
||||
|
||||
// Name returns the tags associated with the metric
|
||||
Tags() map[string]string
|
||||
|
||||
// Time return the timestamp for the metric
|
||||
Time() time.Time
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
// Fields returns the fields for the metric
|
||||
Fields() map[string]interface{}
|
||||
|
||||
// String returns a line-protocol string of the metric
|
||||
String() string
|
||||
|
||||
// PrecisionString returns a line-protocol string of the metric, at precision
|
||||
PrecisionString(precison string) string
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
Point() *client.Point
|
||||
}
|
||||
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt *client.Point
|
||||
}
|
||||
|
||||
// NewMetric returns a metric with the given timestamp. If a timestamp is not
|
||||
// given, then data is sent to the database without a timestamp, in which case
|
||||
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||
// send data with a timestamp.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t ...time.Time,
|
||||
) (Metric, error) {
|
||||
var T time.Time
|
||||
if len(t) > 0 {
|
||||
T = t[0]
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(name, tags, fields, T)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.pt.Name()
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
return m.pt.Tags()
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
return m.pt.Fields()
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return m.pt.String()
|
||||
}
|
||||
|
||||
func (m *metric) PrecisionString(precison string) string {
|
||||
return m.pt.PrecisionString(precison)
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
return m.pt
|
||||
}
|
||||
83
metric_test.go
Normal file
83
metric_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.Unix())
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricStringNoTime(t *testing.T) {
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
31
output.go
Normal file
31
output.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package telegraf
|
||||
|
||||
type Output interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(metrics []Metric) error
|
||||
}
|
||||
|
||||
type ServiceOutput interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(metrics []Metric) error
|
||||
// Start the "service" that will provide an Output
|
||||
Start() error
|
||||
// Stop the "service" that will provide an Output
|
||||
Stop()
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/plugins/mysql"
|
||||
_ "github.com/influxdb/telegraf/plugins/postgresql"
|
||||
_ "github.com/influxdb/telegraf/plugins/redis"
|
||||
_ "github.com/influxdb/telegraf/plugins/system"
|
||||
)
|
||||
37
plugins/inputs/EXAMPLE_README.md
Normal file
37
plugins/inputs/EXAMPLE_README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Example Input Plugin
|
||||
|
||||
The example plugin gathers metrics about example things
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Description
|
||||
[[inputs.example]]
|
||||
# SampleConfig
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
<optional description>
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
- field2 (float, percent)
|
||||
- measurement2
|
||||
- field3 (integer, bytes)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- tag1 (optional description)
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
265
plugins/inputs/aerospike/README.md
Normal file
265
plugins/inputs/aerospike/README.md
Normal file
@@ -0,0 +1,265 @@
|
||||
## Telegraf Plugin: Aerospike
|
||||
|
||||
#### Plugin arguments:
|
||||
- **servers** string array: List of aerospike servers to query (def: 127.0.0.1:3000)
|
||||
|
||||
#### Description
|
||||
|
||||
The aerospike plugin queries aerospike server(s) and get node statistics. It also collects stats for
|
||||
all the configured namespaces.
|
||||
|
||||
For what the measurements mean, please consult the [Aerospike Metrics Reference Docs](http://www.aerospike.com/docs/reference/metrics).
|
||||
|
||||
The metric names, to make it less complicated in querying, have replaced all `-` with `_` as Aerospike metrics come in both forms (no idea why).
|
||||
|
||||
# Measurements:
|
||||
#### Aerospike Statistics [values]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
|
||||
Measurement names:
|
||||
- batch_index_queue
|
||||
- batch_index_unused_buffers
|
||||
- batch_queue
|
||||
- batch_tree_count
|
||||
- client_connections
|
||||
- data_used_bytes_memory
|
||||
- index_used_bytes_memory
|
||||
- info_queue
|
||||
- migrate_progress_recv
|
||||
- migrate_progress_send
|
||||
- migrate_rx_objs
|
||||
- migrate_tx_objs
|
||||
- objects
|
||||
- ongoing_write_reqs
|
||||
- partition_absent
|
||||
- partition_actual
|
||||
- partition_desync
|
||||
- partition_object_count
|
||||
- partition_ref_count
|
||||
- partition_replica
|
||||
- proxy_in_progress
|
||||
- query_agg_avg_rec_count
|
||||
- query_avg_rec_count
|
||||
- query_lookup_avg_rec_count
|
||||
- queue
|
||||
- record_locks
|
||||
- record_refs
|
||||
- sindex_used_bytes_memory
|
||||
- sindex_gc_garbage_cleaned
|
||||
- system_free_mem_pct
|
||||
- total_bytes_disk
|
||||
- total_bytes_memory
|
||||
- tree_count
|
||||
- scans_active
|
||||
- uptime
|
||||
- used_bytes_disk
|
||||
- used_bytes_memory
|
||||
- cluster_size
|
||||
- waiting_transactions
|
||||
|
||||
#### Aerospike Statistics [cumulative]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
|
||||
Measurement names:
|
||||
- batch_errors
|
||||
- batch_index_complete
|
||||
- batch_index_errors
|
||||
- batch_index_initiate
|
||||
- batch_index_timeout
|
||||
- batch_initiate
|
||||
- batch_timeout
|
||||
- err_duplicate_proxy_request
|
||||
- err_out_of_space
|
||||
- err_replica_non_null_node
|
||||
- err_replica_null_node
|
||||
- err_rw_cant_put_unique
|
||||
- err_rw_pending_limit
|
||||
- err_rw_request_not_found
|
||||
- err_storage_queue_full
|
||||
- err_sync_copy_null_master
|
||||
- err_sync_copy_null_node
|
||||
- err_tsvc_requests
|
||||
- err_write_fail_bin_exists
|
||||
- err_write_fail_generation
|
||||
- err_write_fail_generation_xdr
|
||||
- err_write_fail_incompatible_type
|
||||
- err_write_fail_key_exists
|
||||
- err_write_fail_key_mismatch
|
||||
- err_write_fail_not_found
|
||||
- err_write_fail_noxdr
|
||||
- err_write_fail_parameter
|
||||
- err_write_fail_prole_delete
|
||||
- err_write_fail_prole_generation
|
||||
- err_write_fail_prole_unknown
|
||||
- err_write_fail_unknown
|
||||
- fabric_msgs_rcvd
|
||||
- fabric_msgs_sent
|
||||
- heartbeat_received_foreign
|
||||
- heartbeat_received_self
|
||||
- migrate_msgs_recv
|
||||
- migrate_msgs_sent
|
||||
- migrate_num_incoming_accepted
|
||||
- migrate_num_incoming_refused
|
||||
- proxy_action
|
||||
- proxy_initiate
|
||||
- proxy_retry
|
||||
- proxy_retry_new_dest
|
||||
- proxy_retry_q_full
|
||||
- proxy_retry_same_dest
|
||||
- proxy_unproxy
|
||||
- query_abort
|
||||
- query_agg
|
||||
- query_agg_abort
|
||||
- query_agg_err
|
||||
- query_agg_success
|
||||
- query_bad_records
|
||||
- query_fail
|
||||
- query_long_queue_full
|
||||
- query_long_running
|
||||
- query_lookup_abort
|
||||
- query_lookup_err
|
||||
- query_lookups
|
||||
- query_lookup_success
|
||||
- query_reqs
|
||||
- query_short_queue_full
|
||||
- query_short_running
|
||||
- query_success
|
||||
- query_tracked
|
||||
- read_dup_prole
|
||||
- reaped_fds
|
||||
- rw_err_ack_badnode
|
||||
- rw_err_ack_internal
|
||||
- rw_err_ack_nomatch
|
||||
- rw_err_dup_cluster_key
|
||||
- rw_err_dup_internal
|
||||
- rw_err_dup_send
|
||||
- rw_err_write_cluster_key
|
||||
- rw_err_write_internal
|
||||
- rw_err_write_send
|
||||
- sindex_ucgarbage_found
|
||||
- sindex_gc_locktimedout
|
||||
- sindex_gc_inactivity_dur
|
||||
- sindex_gc_activity_dur
|
||||
- sindex_gc_list_creation_time
|
||||
- sindex_gc_list_deletion_time
|
||||
- sindex_gc_objects_validated
|
||||
- sindex_gc_garbage_found
|
||||
- stat_cluster_key_err_ack_dup_trans_reenqueue
|
||||
- stat_cluster_key_err_ack_rw_trans_reenqueue
|
||||
- stat_cluster_key_prole_retry
|
||||
- stat_cluster_key_regular_processed
|
||||
- stat_cluster_key_trans_to_proxy_retry
|
||||
- stat_deleted_set_object
|
||||
- stat_delete_success
|
||||
- stat_duplicate_operation
|
||||
- stat_evicted_objects
|
||||
- stat_evicted_objects_time
|
||||
- stat_evicted_set_objects
|
||||
- stat_expired_objects
|
||||
- stat_nsup_deletes_not_shipped
|
||||
- stat_proxy_errs
|
||||
- stat_proxy_reqs
|
||||
- stat_proxy_reqs_xdr
|
||||
- stat_proxy_success
|
||||
- stat_read_errs_notfound
|
||||
- stat_read_errs_other
|
||||
- stat_read_reqs
|
||||
- stat_read_reqs_xdr
|
||||
- stat_read_success
|
||||
- stat_rw_timeout
|
||||
- stat_slow_trans_queue_batch_pop
|
||||
- stat_slow_trans_queue_pop
|
||||
- stat_slow_trans_queue_push
|
||||
- stat_write_errs
|
||||
- stat_write_errs_notfound
|
||||
- stat_write_errs_other
|
||||
- stat_write_reqs
|
||||
- stat_write_reqs_xdr
|
||||
- stat_write_success
|
||||
- stat_xdr_pipe_miss
|
||||
- stat_xdr_pipe_writes
|
||||
- stat_zero_bin_records
|
||||
- storage_defrag_corrupt_record
|
||||
- storage_defrag_wait
|
||||
- transactions
|
||||
- basic_scans_succeeded
|
||||
- basic_scans_failed
|
||||
- aggr_scans_succeeded
|
||||
- aggr_scans_failed
|
||||
- udf_bg_scans_succeeded
|
||||
- udf_bg_scans_failed
|
||||
- udf_delete_err_others
|
||||
- udf_delete_reqs
|
||||
- udf_delete_success
|
||||
- udf_lua_errs
|
||||
- udf_query_rec_reqs
|
||||
- udf_read_errs_other
|
||||
- udf_read_reqs
|
||||
- udf_read_success
|
||||
- udf_replica_writes
|
||||
- udf_scan_rec_reqs
|
||||
- udf_write_err_others
|
||||
- udf_write_reqs
|
||||
- udf_write_success
|
||||
- write_master
|
||||
- write_prole
|
||||
|
||||
#### Aerospike Statistics [percentage]:
|
||||
|
||||
Meta:
|
||||
- units: percent (out of 100)
|
||||
|
||||
Measurement names:
|
||||
- free_pct_disk
|
||||
- free_pct_memory
|
||||
|
||||
# Measurements:
|
||||
#### Aerospike Namespace Statistics [values]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- available_bin_names
|
||||
- available_pct
|
||||
- current_time
|
||||
- data_used_bytes_memory
|
||||
- index_used_bytes_memory
|
||||
- master_objects
|
||||
- max_evicted_ttl
|
||||
- max_void_time
|
||||
- non_expirable_objects
|
||||
- objects
|
||||
- prole_objects
|
||||
- sindex_used_bytes_memory
|
||||
- total_bytes_disk
|
||||
- total_bytes_memory
|
||||
- used_bytes_disk
|
||||
- used_bytes_memory
|
||||
|
||||
#### Aerospike Namespace Statistics [cumulative]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- evicted_objects
|
||||
- expired_objects
|
||||
- set_deleted_objects
|
||||
- set_evicted_objects
|
||||
|
||||
#### Aerospike Namespace Statistics [percentage]:
|
||||
|
||||
Meta:
|
||||
- units: percent (out of 100)
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- free_pct_disk
|
||||
- free_pct_memory
|
||||
341
plugins/inputs/aerospike/aerospike.go
Normal file
341
plugins/inputs/aerospike/aerospike.go
Normal file
@@ -0,0 +1,341 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
MSG_HEADER_SIZE = 8
|
||||
MSG_TYPE = 1 // Info is 1
|
||||
MSG_VERSION = 2
|
||||
)
|
||||
|
||||
var (
|
||||
STATISTICS_COMMAND = []byte("statistics\n")
|
||||
NAMESPACES_COMMAND = []byte("namespaces\n")
|
||||
)
|
||||
|
||||
type aerospikeMessageHeader struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
DataLen [6]byte
|
||||
}
|
||||
|
||||
type aerospikeMessage struct {
|
||||
aerospikeMessageHeader
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func (msg *aerospikeMessage) Serialize() []byte {
|
||||
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
||||
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type aerospikeInfoCommand struct {
|
||||
msg *aerospikeMessage
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/info.go
|
||||
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
||||
responses := make(map[string]string)
|
||||
offset := int64(0)
|
||||
begin := int64(0)
|
||||
|
||||
dataLen := int64(len(nfo.msg.Data))
|
||||
|
||||
// Create reusable StringBuilder for performance.
|
||||
for offset < dataLen {
|
||||
b := nfo.msg.Data[offset]
|
||||
|
||||
if b == '\t' {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
offset++
|
||||
begin = offset
|
||||
|
||||
// Parse field value.
|
||||
for offset < dataLen {
|
||||
if nfo.msg.Data[offset] == '\n' {
|
||||
break
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
value := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = string(value)
|
||||
} else {
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else if b == '\n' {
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else {
|
||||
offset++
|
||||
}
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Aerospike servers to connect to (with port)
|
||||
## This plugin will query all namespaces the aerospike
|
||||
## server has configured and get stats for them.
|
||||
servers = ["localhost:3000"]
|
||||
`
|
||||
|
||||
func (a *Aerospike) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
if len(a.Servers) == 0 {
|
||||
return a.gatherServer("127.0.0.1:3000", acc)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, server := range a.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
outerr = a.gatherServer(server, acc)
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
}
|
||||
readAerospikeStats(aerospikeInfo, acc, host, "")
|
||||
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
||||
}
|
||||
for ix := range namespaces {
|
||||
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
||||
}
|
||||
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMap(key []byte, host string) (map[string]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
}
|
||||
parsed, err := unmarshalMapInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func getList(key []byte, host string) ([]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
}
|
||||
parsed, err := unmarshalListInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func get(key []byte, host string) (map[string]string, error) {
|
||||
var err error
|
||||
var data map[string]string
|
||||
|
||||
asInfo := &aerospikeInfoCommand{
|
||||
msg: &aerospikeMessage{
|
||||
aerospikeMessageHeader: aerospikeMessageHeader{
|
||||
Version: uint8(MSG_VERSION),
|
||||
Type: uint8(MSG_TYPE),
|
||||
DataLen: msgLenToBytes(int64(len(key))),
|
||||
},
|
||||
Data: key,
|
||||
},
|
||||
}
|
||||
|
||||
cmd := asInfo.msg.Serialize()
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Write(cmd)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
||||
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read header: %s", err)
|
||||
}
|
||||
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
||||
}
|
||||
|
||||
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
||||
|
||||
if int64(len(asInfo.msg.Data)) != msgLen {
|
||||
asInfo.msg.Data = make([]byte, msgLen)
|
||||
}
|
||||
|
||||
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
data, err = asInfo.parseMultiResponse()
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc telegraf.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
res := map[string]string{}
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return res, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
for i := range values {
|
||||
kv := strings.Split(values[i], "=")
|
||||
if len(kv) > 1 {
|
||||
res[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
||||
var r int
|
||||
for total < length {
|
||||
r, err = c.Read(buffer[total:length])
|
||||
total += r
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenToBytes(DataLen int64) [6]byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
||||
res := [6]byte{}
|
||||
copy(res[:], b[2:])
|
||||
return res
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenFromBytes(buf [6]byte) int64 {
|
||||
nbytes := append([]byte{0, 0}, buf[:]...)
|
||||
DataLen := binary.BigEndian.Uint64(nbytes)
|
||||
return int64(DataLen)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("aerospike", func() telegraf.Input {
|
||||
return &Aerospike{}
|
||||
})
|
||||
}
|
||||
118
plugins/inputs/aerospike/aerospike_test.go
Normal file
118
plugins/inputs/aerospike/aerospike_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAerospikeStatistics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
Servers: []string{testutil.GetLocalHost() + ":3000"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only use a few of the metrics
|
||||
asMetrics := []string{
|
||||
"transactions",
|
||||
"stat_write_errs",
|
||||
"stat_read_reqs",
|
||||
"stat_write_reqs",
|
||||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
||||
var i int64 = 8
|
||||
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||
// Also test for re-writing
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat-write-errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat_write_errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "one;two;three",
|
||||
}
|
||||
|
||||
expected := []string{"one", "two", "three"}
|
||||
|
||||
list, err := unmarshalListInfo(i, "test2")
|
||||
assert.True(t, err != nil)
|
||||
|
||||
list, err = unmarshalListInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
equal := true
|
||||
for ix := range expected {
|
||||
if list[ix] != expected[ix] {
|
||||
equal = false
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, equal)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "key1=value1;key2=value2",
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
m, err := unmarshalMapInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
assert.True(t, reflect.DeepEqual(m, expected))
|
||||
}
|
||||
67
plugins/inputs/all/all.go
Normal file
67
plugins/inputs/all/all.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/raindrops"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
)
|
||||
45
plugins/inputs/apache/README.md
Normal file
45
plugins/inputs/apache/README.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Telegraf plugin: Apache
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from.
|
||||
|
||||
#### Description
|
||||
|
||||
The Apache plugin collects from the /server-status?auto URL. See
|
||||
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
|
||||
example. And
|
||||
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
|
||||
mod_status documentation.
|
||||
|
||||
# Measurements:
|
||||
|
||||
Meta:
|
||||
- tags: `port=<port>`, `server=url`
|
||||
|
||||
- apache_TotalAccesses
|
||||
- apache_TotalkBytes
|
||||
- apache_CPULoad
|
||||
- apache_Uptime
|
||||
- apache_ReqPerSec
|
||||
- apache_BytesPerSec
|
||||
- apache_BytesPerReq
|
||||
- apache_BusyWorkers
|
||||
- apache_IdleWorkers
|
||||
- apache_ConnsTotal
|
||||
- apache_ConnsAsyncWriting
|
||||
- apache_ConnsAsyncKeepAlive
|
||||
- apache_ConnsAsyncClosing
|
||||
|
||||
### Scoreboard measurements
|
||||
|
||||
- apache_scboard_waiting
|
||||
- apache_scboard_starting
|
||||
- apache_scboard_reading
|
||||
- apache_scboard_sending
|
||||
- apache_scboard_keepalive
|
||||
- apache_scboard_dnslookup
|
||||
- apache_scboard_closing
|
||||
- apache_scboard_logging
|
||||
- apache_scboard_finishing
|
||||
- apache_scboard_idle_cleanup
|
||||
- apache_scboard_open
|
||||
174
plugins/inputs/apache/apache.go
Normal file
174
plugins/inputs/apache/apache.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package apache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
Urls []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
`
|
||||
|
||||
func (n *Apache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *Apache) Description() string {
|
||||
return "Read Apache status information (mod_status)"
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
|
||||
tags := getTags(addr)
|
||||
|
||||
sc := bufio.NewScanner(resp.Body)
|
||||
fields := make(map[string]interface{})
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if strings.Contains(line, ":") {
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
|
||||
|
||||
switch key {
|
||||
case "Scoreboard":
|
||||
for field, value := range n.gatherScores(part) {
|
||||
fields[field] = value
|
||||
}
|
||||
default:
|
||||
value, err := strconv.ParseFloat(part, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("apache", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherScores(data string) map[string]interface{} {
|
||||
var waiting, open int = 0, 0
|
||||
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
for _, s := range strings.Split(data, "") {
|
||||
|
||||
switch s {
|
||||
case "_":
|
||||
waiting++
|
||||
case "S":
|
||||
S++
|
||||
case "R":
|
||||
R++
|
||||
case "W":
|
||||
W++
|
||||
case "K":
|
||||
K++
|
||||
case "D":
|
||||
D++
|
||||
case "C":
|
||||
C++
|
||||
case "L":
|
||||
L++
|
||||
case "G":
|
||||
G++
|
||||
case "I":
|
||||
I++
|
||||
case ".":
|
||||
open++
|
||||
}
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"scboard_waiting": float64(waiting),
|
||||
"scboard_starting": float64(S),
|
||||
"scboard_reading": float64(R),
|
||||
"scboard_sending": float64(W),
|
||||
"scboard_keepalive": float64(K),
|
||||
"scboard_dnslookup": float64(D),
|
||||
"scboard_closing": float64(C),
|
||||
"scboard_logging": float64(L),
|
||||
"scboard_finishing": float64(G),
|
||||
"scboard_idle_cleanup": float64(I),
|
||||
"scboard_open": float64(open),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// Get tag(s) for the apache plugin
|
||||
func getTags(addr *url.URL) map[string]string {
|
||||
h := addr.Host
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
host = addr.Host
|
||||
if addr.Scheme == "http" {
|
||||
port = "80"
|
||||
} else if addr.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = ""
|
||||
}
|
||||
}
|
||||
return map[string]string{"server": host, "port": port}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("apache", func() telegraf.Input {
|
||||
return &Apache{}
|
||||
})
|
||||
}
|
||||
73
plugins/inputs/apache/apache_test.go
Normal file
73
plugins/inputs/apache/apache_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package apache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var apacheStatus = `
|
||||
Total Accesses: 129811861
|
||||
Total kBytes: 5213701865
|
||||
CPULoad: 6.51929
|
||||
Uptime: 941553
|
||||
ReqPerSec: 137.87
|
||||
BytesPerSec: 5670240
|
||||
BytesPerReq: 41127.4
|
||||
BusyWorkers: 270
|
||||
IdleWorkers: 630
|
||||
ConnsTotal: 1451
|
||||
ConnsAsyncWriting: 32
|
||||
ConnsAsyncKeepAlive: 945
|
||||
ConnsAsyncClosing: 205
|
||||
Scoreboard: WW_____W_RW_R_W__RRR____WR_W___WW________W_WW_W_____R__R_WR__WRWR_RRRW___R_RWW__WWWRW__R_RW___RR_RW_R__W__WR_WWW______WWR__R___R_WR_W___RW______RR________________W______R__RR______W________________R____R__________________________RW_W____R_____W_R_________________R____RR__W___R_R____RW______R____W______W_W_R_R______R__R_R__________R____W_______WW____W____RR__W_____W_R_______W__________W___W____________W_______WRR_R_W____W_____R____W_WW_R____RRW__W............................................................................................................................................................................................................................................................................................................WRRWR____WR__RR_R___RWR_________W_R____RWRRR____R_R__RW_R___WWW_RW__WR_RRR____W___R____WW_R__R___RR_W_W_RRRRWR__RRWR__RRW_W_RRRW_R_RR_W__RR_RWRR_R__R___RR_RR______R__RR____R_____W_R_R_R__R__R__________W____WW_R___R_R___R_________RR__RR____RWWWW___W_R________R_R____R_W___W___R___W_WRRWW_______R__W_RW_______R________RR__R________W_______________________W_W______________RW_________WR__R___R__R_______________WR_R_________W___RW_____R____________W____......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
|
||||
`
|
||||
|
||||
func TestHTTPApache(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, apacheStatus)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Apache{
|
||||
Urls: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"TotalAccesses": float64(1.29811861e+08),
|
||||
"TotalkBytes": float64(5.213701865e+09),
|
||||
"CPULoad": float64(6.51929),
|
||||
"Uptime": float64(941553),
|
||||
"ReqPerSec": float64(137.87),
|
||||
"BytesPerSec": float64(5.67024e+06),
|
||||
"BytesPerReq": float64(41127.4),
|
||||
"BusyWorkers": float64(270),
|
||||
"IdleWorkers": float64(630),
|
||||
"ConnsTotal": float64(1451),
|
||||
"ConnsAsyncWriting": float64(32),
|
||||
"ConnsAsyncKeepAlive": float64(945),
|
||||
"ConnsAsyncClosing": float64(205),
|
||||
"scboard_waiting": float64(630),
|
||||
"scboard_starting": float64(0),
|
||||
"scboard_reading": float64(157),
|
||||
"scboard_sending": float64(113),
|
||||
"scboard_keepalive": float64(0),
|
||||
"scboard_dnslookup": float64(0),
|
||||
"scboard_closing": float64(0),
|
||||
"scboard_logging": float64(0),
|
||||
"scboard_finishing": float64(0),
|
||||
"scboard_idle_cleanup": float64(0),
|
||||
"scboard_open": float64(2850),
|
||||
}
|
||||
acc.AssertContainsFields(t, "apache", fields)
|
||||
}
|
||||
89
plugins/inputs/bcache/README.md
Normal file
89
plugins/inputs/bcache/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Telegraf plugin: bcache
|
||||
|
||||
Get bcache stat from stats_total directory and dirty_data file.
|
||||
|
||||
# Measurements
|
||||
|
||||
Meta:
|
||||
|
||||
- tags: `backing_dev=dev bcache_dev=dev`
|
||||
|
||||
Measurement names:
|
||||
|
||||
- dirty_data
|
||||
- bypassed
|
||||
- cache_bypass_hits
|
||||
- cache_bypass_misses
|
||||
- cache_hit_ratio
|
||||
- cache_hits
|
||||
- cache_miss_collisions
|
||||
- cache_misses
|
||||
- cache_readaheads
|
||||
|
||||
### Description
|
||||
|
||||
```
|
||||
dirty_data
|
||||
Amount of dirty data for this backing device in the cache. Continuously
|
||||
updated unlike the cache set's version, but may be slightly off.
|
||||
|
||||
bypassed
|
||||
Amount of IO (both reads and writes) that has bypassed the cache
|
||||
|
||||
|
||||
cache_bypass_hits
|
||||
cache_bypass_misses
|
||||
Hits and misses for IO that is intended to skip the cache are still counted,
|
||||
but broken out here.
|
||||
|
||||
cache_hits
|
||||
cache_misses
|
||||
cache_hit_ratio
|
||||
Hits and misses are counted per individual IO as bcache sees them; a
|
||||
partial hit is counted as a miss.
|
||||
|
||||
cache_miss_collisions
|
||||
Counts instances where data was going to be inserted into the cache from a
|
||||
cache miss, but raced with a write and data was already present (usually 0
|
||||
since the synchronization for cache misses was rewritten)
|
||||
|
||||
cache_readaheads
|
||||
Count of times readahead occurred.
|
||||
```
|
||||
|
||||
# Example output
|
||||
|
||||
Using this configuration:
|
||||
|
||||
```
|
||||
[bcache]
|
||||
# Bcache sets path
|
||||
# If not specified, then default is:
|
||||
# bcachePath = "/sys/fs/bcache"
|
||||
#
|
||||
# By default, telegraf gather stats for all bcache devices
|
||||
# Setting devices will restrict the stats to the specified
|
||||
# bcache devices.
|
||||
# bcacheDevs = ["bcache0", ...]
|
||||
```
|
||||
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter bcache -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
```
|
||||
* Plugin: bcache, Collection 1
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_hits value=146270986
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_misses value=0
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hit_ratio value=90
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hits value=511941651
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_miss_collisions value=157678
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_misses value=50647396
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_readaheads value=0
|
||||
```
|
||||
142
plugins/inputs/bcache/bcache.go
Normal file
142
plugins/inputs/bcache/bcache.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package bcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Bcache struct {
|
||||
BcachePath string
|
||||
BcacheDevs []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Bcache sets path
|
||||
## If not specified, then default is:
|
||||
bcachePath = "/sys/fs/bcache"
|
||||
|
||||
## By default, telegraf gather stats for all bcache devices
|
||||
## Setting devices will restrict the stats to the specified
|
||||
## bcache devices.
|
||||
bcacheDevs = ["bcache0"]
|
||||
`
|
||||
|
||||
func (b *Bcache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (b *Bcache) Description() string {
|
||||
return "Read metrics of bcache from stats_total and dirty_data"
|
||||
}
|
||||
|
||||
func getTags(bdev string) map[string]string {
|
||||
backingDevFile, _ := os.Readlink(bdev)
|
||||
backingDevPath := strings.Split(backingDevFile, "/")
|
||||
backingDev := backingDevPath[len(backingDevPath)-2]
|
||||
|
||||
bcacheDevFile, _ := os.Readlink(bdev + "/dev")
|
||||
bcacheDevPath := strings.Split(bcacheDevFile, "/")
|
||||
bcacheDev := bcacheDevPath[len(bcacheDevPath)-1]
|
||||
|
||||
return map[string]string{"backing_dev": backingDev, "bcache_dev": bcacheDev}
|
||||
}
|
||||
|
||||
func prettyToBytes(v string) uint64 {
|
||||
var factors = map[string]uint64{
|
||||
"k": 1 << 10,
|
||||
"M": 1 << 20,
|
||||
"G": 1 << 30,
|
||||
"T": 1 << 40,
|
||||
"P": 1 << 50,
|
||||
"E": 1 << 60,
|
||||
}
|
||||
var factor uint64
|
||||
factor = 1
|
||||
prefix := v[len(v)-1 : len(v)]
|
||||
if factors[prefix] != 0 {
|
||||
v = v[:len(v)-1]
|
||||
factor = factors[prefix]
|
||||
}
|
||||
result, _ := strconv.ParseFloat(v, 32)
|
||||
result = result * float64(factor)
|
||||
|
||||
return uint64(result)
|
||||
}
|
||||
|
||||
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
|
||||
tags := getTags(bdev)
|
||||
metrics, err := filepath.Glob(bdev + "/stats_total/*")
|
||||
if len(metrics) < 0 {
|
||||
return errors.New("Can't read any stats file")
|
||||
}
|
||||
file, err := ioutil.ReadFile(bdev + "/dirty_data")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
value := prettyToBytes(rawValue)
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fields["dirty_data"] = value
|
||||
|
||||
for _, path := range metrics {
|
||||
key := filepath.Base(path)
|
||||
file, err := ioutil.ReadFile(path)
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if key == "bypassed" {
|
||||
value := prettyToBytes(rawValue)
|
||||
fields[key] = value
|
||||
} else {
|
||||
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("bcache", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bcache) Gather(acc telegraf.Accumulator) error {
|
||||
bcacheDevsChecked := make(map[string]bool)
|
||||
var restrictDevs bool
|
||||
if len(b.BcacheDevs) != 0 {
|
||||
restrictDevs = true
|
||||
for _, bcacheDev := range b.BcacheDevs {
|
||||
bcacheDevsChecked[bcacheDev] = true
|
||||
}
|
||||
}
|
||||
|
||||
bcachePath := b.BcachePath
|
||||
if len(bcachePath) == 0 {
|
||||
bcachePath = "/sys/fs/bcache"
|
||||
}
|
||||
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
||||
if len(bdevs) < 1 {
|
||||
return errors.New("Can't find any bcache device")
|
||||
}
|
||||
for _, bdev := range bdevs {
|
||||
if restrictDevs {
|
||||
bcacheDev := getTags(bdev)["bcache_dev"]
|
||||
if !bcacheDevsChecked[bcacheDev] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
b.gatherBcache(bdev, acc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("bcache", func() telegraf.Input {
|
||||
return &Bcache{}
|
||||
})
|
||||
}
|
||||
121
plugins/inputs/bcache/bcache_test.go
Normal file
121
plugins/inputs/bcache/bcache_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package bcache
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
dirty_data = "1.5G"
|
||||
bypassed = "4.7T"
|
||||
cache_bypass_hits = "146155333"
|
||||
cache_bypass_misses = "0"
|
||||
cache_hit_ratio = "90"
|
||||
cache_hits = "511469583"
|
||||
cache_miss_collisions = "157567"
|
||||
cache_misses = "50616331"
|
||||
cache_readaheads = "2"
|
||||
)
|
||||
|
||||
var (
|
||||
testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache"
|
||||
testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411"
|
||||
testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0"
|
||||
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
||||
)
|
||||
|
||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheDevPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
|
||||
[]byte(dirty_data), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
|
||||
[]byte(bypassed), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||
[]byte(cache_bypass_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||
[]byte(cache_bypass_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||
[]byte(cache_hit_ratio), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
|
||||
[]byte(cache_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||
[]byte(cache_miss_collisions), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
|
||||
[]byte(cache_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
|
||||
[]byte(cache_readaheads), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"dirty_data": uint64(1610612736),
|
||||
"bypassed": uint64(5167704440832),
|
||||
"cache_bypass_hits": uint64(146155333),
|
||||
"cache_bypass_misses": uint64(0),
|
||||
"cache_hit_ratio": uint64(90),
|
||||
"cache_hits": uint64(511469583),
|
||||
"cache_miss_collisions": uint64(157567),
|
||||
"cache_misses": uint64(50616331),
|
||||
"cache_readaheads": uint64(2),
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"backing_dev": "md10",
|
||||
"bcache_dev": "bcache0",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
// all devs
|
||||
b := &Bcache{BcachePath: testBcachePath}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
// one exist dev
|
||||
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
125
plugins/inputs/cassandra/README.md
Normal file
125
plugins/inputs/cassandra/README.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# Telegraf plugin: Cassandra
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used for jolokia url
|
||||
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"
|
||||
- **metrics** []string: List of Jmx paths that identify mbeans attributes
|
||||
|
||||
#### Description
|
||||
|
||||
The Cassandra plugin collects Cassandra/JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
|
||||
|
||||
See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
|
||||
|
||||
# Measurements:
|
||||
Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name.
|
||||
|
||||
Given a configuration like:
|
||||
|
||||
```toml
|
||||
[[inputs.cassandra]]
|
||||
context = "/jolokia/read"
|
||||
servers = [":8778"]
|
||||
metrics = ["/java.lang:type=Memory/HeapMemoryUsage"]
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
|
||||
```
|
||||
javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084
|
||||
```
|
||||
|
||||
# Useful Metrics:
|
||||
|
||||
Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web.
|
||||
|
||||
- [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics)
|
||||
- [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
|
||||
|
||||
####measurement = javaGarbageCollector
|
||||
|
||||
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
|
||||
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
|
||||
|
||||
####measurement = javaMemory
|
||||
|
||||
- /java.lang:type=Memory/HeapMemoryUsage
|
||||
- /java.lang:type=Memory/NonHeapMemoryUsage
|
||||
|
||||
####measurement = cassandraCache
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hit
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hit
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
|
||||
|
||||
####measurement = cassandraClient
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients
|
||||
|
||||
####measurement = cassandraClientRequest
|
||||
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
|
||||
|
||||
####measurement = cassandraCommitLog
|
||||
|
||||
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
|
||||
|
||||
####measurement = cassandraCompaction
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTask
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
|
||||
|
||||
####measurement = cassandraStorage
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Storage,name=Load
|
||||
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
|
||||
|
||||
####measurement = cassandraTable
|
||||
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
|
||||
|
||||
|
||||
####measurement = cassandraThreadPools
|
||||
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks
|
||||
|
||||
|
||||
309
plugins/inputs/cassandra/cassandra.go
Normal file
309
plugins/inputs/cassandra/cassandra.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package cassandra
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type JolokiaClient interface {
|
||||
MakeRequest(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
type JolokiaClientImpl struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
type Cassandra struct {
|
||||
jClient JolokiaClient
|
||||
Context string
|
||||
Servers []string
|
||||
Metrics []string
|
||||
}
|
||||
|
||||
type javaMetric struct {
|
||||
host string
|
||||
metric string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
type cassandraMetric struct {
|
||||
host string
|
||||
metric string
|
||||
acc telegraf.Accumulator
|
||||
}
|
||||
|
||||
type jmxMetric interface {
|
||||
addTagsFields(out map[string]interface{})
|
||||
}
|
||||
|
||||
func newJavaMetric(host string, metric string,
|
||||
acc telegraf.Accumulator) *javaMetric {
|
||||
return &javaMetric{host: host, metric: metric, acc: acc}
|
||||
}
|
||||
|
||||
func newCassandraMetric(host string, metric string,
|
||||
acc telegraf.Accumulator) *cassandraMetric {
|
||||
return &cassandraMetric{host: host, metric: metric, acc: acc}
|
||||
}
|
||||
|
||||
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
|
||||
mname string) {
|
||||
for k, v := range values {
|
||||
if v != nil {
|
||||
fields[mname+"_"+k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseJmxMetricRequest(mbean string) map[string]string {
|
||||
tokens := make(map[string]string)
|
||||
classAndPairs := strings.Split(mbean, ":")
|
||||
if classAndPairs[0] == "org.apache.cassandra.metrics" {
|
||||
tokens["class"] = "cassandra"
|
||||
} else if classAndPairs[0] == "java.lang" {
|
||||
tokens["class"] = "java"
|
||||
} else {
|
||||
return tokens
|
||||
}
|
||||
pairs := strings.Split(classAndPairs[1], ",")
|
||||
for _, pair := range pairs {
|
||||
p := strings.Split(pair, "=")
|
||||
tokens[p[0]] = p[1]
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
func addTokensToTags(tokens map[string]string, tags map[string]string) {
|
||||
for k, v := range tokens {
|
||||
if k == "name" {
|
||||
tags["mname"] = v // name seems to a reserved word in influxdb
|
||||
} else if k == "class" || k == "type" {
|
||||
continue // class and type are used in the metric name
|
||||
} else {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j javaMetric) addTagsFields(out map[string]interface{}) {
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
a := out["request"].(map[string]interface{})
|
||||
attribute := a["attribute"].(string)
|
||||
mbean := a["mbean"].(string)
|
||||
|
||||
tokens := parseJmxMetricRequest(mbean)
|
||||
addTokensToTags(tokens, tags)
|
||||
tags["cassandra_host"] = j.host
|
||||
|
||||
if _, ok := tags["mname"]; !ok {
|
||||
//Queries for a single value will not return a "name" tag in the response.
|
||||
tags["mname"] = attribute
|
||||
}
|
||||
|
||||
if values, ok := out["value"]; ok {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
|
||||
case interface{}:
|
||||
fields[attribute] = t
|
||||
}
|
||||
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
j.metric, out)
|
||||
}
|
||||
}
|
||||
|
||||
func addCassandraMetric(mbean string, c cassandraMetric,
|
||||
values map[string]interface{}) {
|
||||
|
||||
tags := make(map[string]string)
|
||||
fields := make(map[string]interface{})
|
||||
tokens := parseJmxMetricRequest(mbean)
|
||||
addTokensToTags(tokens, tags)
|
||||
tags["cassandra_host"] = c.host
|
||||
addValuesAsFields(values, fields, tags["mname"])
|
||||
c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
|
||||
|
||||
}
|
||||
|
||||
func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
|
||||
|
||||
r := out["request"]
|
||||
|
||||
tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string))
|
||||
// Requests with wildcards for keyspace or table names will return nested
|
||||
// maps in the json response
|
||||
if tokens["type"] == "Table" && (tokens["keyspace"] == "*" ||
|
||||
tokens["scope"] == "*") {
|
||||
if valuesMap, ok := out["value"]; ok {
|
||||
for k, v := range valuesMap.(map[string]interface{}) {
|
||||
addCassandraMetric(k, c, v.(map[string]interface{}))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if values, ok := out["value"]; ok {
|
||||
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
|
||||
c, values.(map[string]interface{}))
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
|
||||
c.metric, out)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Cassandra) SampleConfig() string {
|
||||
return `
|
||||
# This is the context root used to compose the jolokia url
|
||||
context = "/jolokia/read"
|
||||
## List of cassandra servers exposing jolokia read service
|
||||
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
|
||||
## List of metrics collected on above servers
|
||||
## Each metric consists of a jmx path.
|
||||
## This will collect all heap memory usage metrics from the jvm and
|
||||
## ReadLatency metrics for all keyspaces and tables.
|
||||
## "type=Table" in the query works with Cassandra3.0. Older versions might
|
||||
## need to use "type=ColumnFamily"
|
||||
metrics = [
|
||||
"/java.lang:type=Memory/HeapMemoryUsage",
|
||||
"/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
|
||||
]
|
||||
`
|
||||
}
|
||||
|
||||
func (j *Cassandra) Description() string {
|
||||
return "Read Cassandra metrics through Jolokia"
|
||||
}
|
||||
|
||||
func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Process response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestUrl,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read body
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal json
|
||||
var jsonOut map[string]interface{}
|
||||
if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
|
||||
return nil, errors.New("Error decoding JSON response")
|
||||
}
|
||||
|
||||
return jsonOut, nil
|
||||
}
|
||||
|
||||
func parseServerTokens(server string) map[string]string {
|
||||
serverTokens := make(map[string]string)
|
||||
|
||||
hostAndUser := strings.Split(server, "@")
|
||||
hostPort := ""
|
||||
userPasswd := ""
|
||||
if len(hostAndUser) == 2 {
|
||||
hostPort = hostAndUser[1]
|
||||
userPasswd = hostAndUser[0]
|
||||
} else {
|
||||
hostPort = hostAndUser[0]
|
||||
}
|
||||
hostTokens := strings.Split(hostPort, ":")
|
||||
serverTokens["host"] = hostTokens[0]
|
||||
serverTokens["port"] = hostTokens[1]
|
||||
|
||||
if userPasswd != "" {
|
||||
userTokens := strings.Split(userPasswd, ":")
|
||||
serverTokens["user"] = userTokens[0]
|
||||
serverTokens["passwd"] = userTokens[1]
|
||||
}
|
||||
return serverTokens
|
||||
}
|
||||
|
||||
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
context := c.Context
|
||||
servers := c.Servers
|
||||
metrics := c.Metrics
|
||||
|
||||
for _, server := range servers {
|
||||
for _, metric := range metrics {
|
||||
serverTokens := parseServerTokens(server)
|
||||
|
||||
var m jmxMetric
|
||||
if strings.HasPrefix(metric, "/java.lang:") {
|
||||
m = newJavaMetric(serverTokens["host"], metric, acc)
|
||||
} else if strings.HasPrefix(metric,
|
||||
"/org.apache.cassandra.metrics:") {
|
||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||
} else {
|
||||
// unsupported metric type
|
||||
log.Printf("Unsupported Cassandra metric [%s], skipping",
|
||||
metric)
|
||||
continue
|
||||
}
|
||||
|
||||
// Prepare URL
|
||||
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
|
||||
serverTokens["port"] + context + metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
|
||||
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||
serverTokens["passwd"])
|
||||
}
|
||||
fmt.Printf("host %s url %s\n", serverTokens["host"], requestUrl)
|
||||
|
||||
out, err := c.getAttr(requestUrl)
|
||||
if out["status"] != 200.0 {
|
||||
fmt.Printf("URL returned with status %v\n", out["status"])
|
||||
continue
|
||||
}
|
||||
m.addTagsFields(out)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cassandra", func() telegraf.Input {
|
||||
return &Cassandra{jClient: &JolokiaClientImpl{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
286
plugins/inputs/cassandra/cassandra_test.go
Normal file
286
plugins/inputs/cassandra/cassandra_test.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package cassandra
|
||||
|
||||
import (
|
||||
_ "fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
_ "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const validJavaMultiValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":{
|
||||
"init":67108864,
|
||||
"committed":456130560,
|
||||
"max":477626368,
|
||||
"used":203288528
|
||||
},
|
||||
"timestamp":1446129191,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const validCassandraMultiValueJSON = `
|
||||
{
|
||||
"request": {
|
||||
"mbean": "org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table,type=Table",
|
||||
"type": "read"},
|
||||
"status": 200,
|
||||
"timestamp": 1458089229,
|
||||
"value": {
|
||||
"999thPercentile": 20.0,
|
||||
"99thPercentile": 10.0,
|
||||
"Count": 400,
|
||||
"DurationUnit": "microseconds",
|
||||
"Max": 30.0,
|
||||
"Mean": null,
|
||||
"MeanRate": 3.0,
|
||||
"Min": 1.0,
|
||||
"RateUnit": "events/second",
|
||||
"StdDev": null
|
||||
}
|
||||
}`
|
||||
|
||||
const validCassandraNestedMultiValueJSON = `
|
||||
{
|
||||
"request": {
|
||||
"mbean": "org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=*,type=Table",
|
||||
"type": "read"},
|
||||
"status": 200,
|
||||
"timestamp": 1458089184,
|
||||
"value": {
|
||||
"org.apache.cassandra.metrics:keyspace=test_keyspace1,name=ReadLatency,scope=test_table1,type=Table":
|
||||
{ "999thPercentile": 1.0,
|
||||
"Count": 100,
|
||||
"DurationUnit": "microseconds",
|
||||
"OneMinuteRate": 1.0,
|
||||
"RateUnit": "events/second",
|
||||
"StdDev": null
|
||||
},
|
||||
"org.apache.cassandra.metrics:keyspace=test_keyspace2,name=ReadLatency,scope=test_table2,type=Table":
|
||||
{ "999thPercentile": 2.0,
|
||||
"Count": 200,
|
||||
"DurationUnit": "microseconds",
|
||||
"OneMinuteRate": 2.0,
|
||||
"RateUnit": "events/second",
|
||||
"StdDev": null
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
const validSingleValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"path":"used",
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":209274376,
|
||||
"timestamp":1446129256,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const validJavaMultiTypeJSON = `
|
||||
{
|
||||
"request":{
|
||||
"mbean":"java.lang:name=ConcurrentMarkSweep,type=GarbageCollector",
|
||||
"attribute":"CollectionCount",
|
||||
"type":"read"
|
||||
},
|
||||
"value":1,
|
||||
"timestamp":1459316570,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
|
||||
const empty = ""
|
||||
|
||||
var Servers = []string{"10.10.10.10:8778"}
|
||||
var AuthServers = []string{"user:passwd@10.10.10.10:8778"}
|
||||
var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"}
|
||||
var HeapMetric = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
var ReadLatencyMetric = "/org.apache.cassandra.metrics:type=Table,keyspace=test_keyspace1,scope=test_table,name=ReadLatency"
|
||||
var NestedReadLatencyMetric = "/org.apache.cassandra.metrics:type=Table,keyspace=test_keyspace1,scope=*,name=ReadLatency"
|
||||
var GarbageCollectorMetric1 = "/java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount"
|
||||
var GarbageCollectorMetric2 = "/java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime"
|
||||
var Context = "/jolokia/read"
|
||||
|
||||
type jolokiaClientStub struct {
|
||||
responseBody string
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
resp := http.Response{}
|
||||
resp.StatusCode = c.statusCode
|
||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
// statusCode: HTTP status code the mock HTTP client should return
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genJolokiaClientStub(response string, statusCode int, servers []string, metrics []string) *Cassandra {
|
||||
return &Cassandra{
|
||||
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
|
||||
Context: Context,
|
||||
Servers: servers,
|
||||
Metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Java
|
||||
func TestHttpJsonJavaMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validJavaMultiValueJSON, 200,
|
||||
MultipleServers, []string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"HeapMemoryUsage_init": 67108864.0,
|
||||
"HeapMemoryUsage_committed": 456130560.0,
|
||||
"HeapMemoryUsage_max": 477626368.0,
|
||||
"HeapMemoryUsage_used": 203288528.0,
|
||||
}
|
||||
tags1 := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "HeapMemoryUsage",
|
||||
}
|
||||
|
||||
tags2 := map[string]string{
|
||||
"cassandra_host": "10.10.10.11",
|
||||
"mname": "HeapMemoryUsage",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags1)
|
||||
acc.AssertContainsTaggedFields(t, "javaMemory", fields, tags2)
|
||||
}
|
||||
|
||||
func TestHttpJsonJavaMultiType(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validJavaMultiTypeJSON, 200, AuthServers, []string{GarbageCollectorMetric1, GarbageCollectorMetric2})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"CollectionCount": 1.0,
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ConcurrentMarkSweep",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "javaGarbageCollector", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
|
||||
[]string{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Cassandra
|
||||
func TestHttpJsonCassandraMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"ReadLatency_999thPercentile": 20.0,
|
||||
"ReadLatency_99thPercentile": 10.0,
|
||||
"ReadLatency_Count": 400.0,
|
||||
"ReadLatency_DurationUnit": "microseconds",
|
||||
"ReadLatency_Max": 30.0,
|
||||
"ReadLatency_MeanRate": 3.0,
|
||||
"ReadLatency_Min": 1.0,
|
||||
"ReadLatency_RateUnit": "events/second",
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ReadLatency",
|
||||
"keyspace": "test_keyspace1",
|
||||
"scope": "test_table",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected for class=Cassandra with
|
||||
// nested values
|
||||
func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
|
||||
cassandra := genJolokiaClientStub(validCassandraNestedMultiValueJSON, 200, Servers, []string{NestedReadLatencyMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := cassandra.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(acc.Metrics))
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
"ReadLatency_999thPercentile": 1.0,
|
||||
"ReadLatency_Count": 100.0,
|
||||
"ReadLatency_DurationUnit": "microseconds",
|
||||
"ReadLatency_OneMinuteRate": 1.0,
|
||||
"ReadLatency_RateUnit": "events/second",
|
||||
}
|
||||
|
||||
fields2 := map[string]interface{}{
|
||||
"ReadLatency_999thPercentile": 2.0,
|
||||
"ReadLatency_Count": 200.0,
|
||||
"ReadLatency_DurationUnit": "microseconds",
|
||||
"ReadLatency_OneMinuteRate": 2.0,
|
||||
"ReadLatency_RateUnit": "events/second",
|
||||
}
|
||||
|
||||
tags1 := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ReadLatency",
|
||||
"keyspace": "test_keyspace1",
|
||||
"scope": "test_table1",
|
||||
}
|
||||
|
||||
tags2 := map[string]string{
|
||||
"cassandra_host": "10.10.10.10",
|
||||
"mname": "ReadLatency",
|
||||
"keyspace": "test_keyspace2",
|
||||
"scope": "test_table2",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields1, tags1)
|
||||
acc.AssertContainsTaggedFields(t, "cassandraTable", fields2, tags2)
|
||||
}
|
||||
86
plugins/inputs/cloudwatch/README.md
Normal file
86
plugins/inputs/cloudwatch/README.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Amazon CloudWatch Statistics Input
|
||||
|
||||
This plugin will pull Metric Statistics from Amazon CloudWatch.
|
||||
|
||||
### Amazon Authentication
|
||||
|
||||
This plugin uses a credential chain for Authentication with the CloudWatch
|
||||
API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||
2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
||||
3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.cloudwatch]]
|
||||
## Amazon Region (required)
|
||||
region = 'us-east-1'
|
||||
|
||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
period = '1m'
|
||||
|
||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
delay = '1m'
|
||||
|
||||
## Override global run interval (optional - defaults to global interval)
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ['Latency', 'RequestCount']
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'LoadBalancerName'
|
||||
value = 'p-example'
|
||||
```
|
||||
#### Requirements and Terminology
|
||||
|
||||
Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html) and access pattern to allow monitoring of any CloudWatch Metric.
|
||||
|
||||
- `region` must be a valid AWS [Region](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchRegions) value
|
||||
- `period` must be a valid CloudWatch [Period](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchPeriods) value
|
||||
- `namespace` must be a valid CloudWatch [Namespace](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Namespace) value
|
||||
- `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names
|
||||
- `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs
|
||||
|
||||
#### Restrictions and Limitations
|
||||
- CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html)
|
||||
- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/)
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic
|
||||
Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case)
|
||||
|
||||
- cloudwatch_{namespace}
|
||||
- {metric}_sum (metric Sum value)
|
||||
- {metric}_average (metric Average value)
|
||||
- {metric}_minimum (metric Minimum value)
|
||||
- {metric}_maximum (metric Maximum value)
|
||||
- {metric}_sample_count (metric SampleCount value)
|
||||
|
||||
|
||||
### Tags:
|
||||
Each measurement is tagged with the following identifiers to uniquely identify the associated metric
|
||||
Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case)
|
||||
|
||||
- All measurements have the following tags:
|
||||
- region (CloudWatch Region)
|
||||
- unit (CloudWatch Metric Unit)
|
||||
- {dimension-name} (Cloudwatch Dimension value - one for each metric dimension)
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter cloudwatch -test
|
||||
> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000
|
||||
```
|
||||
311
plugins/inputs/cloudwatch/cloudwatch.go
Normal file
311
plugins/inputs/cloudwatch/cloudwatch.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type (
|
||||
CloudWatch struct {
|
||||
Region string `toml:"region"`
|
||||
AccessKey string `toml:"access_key"`
|
||||
SecretKey string `toml:"secret_key"`
|
||||
Period internal.Duration `toml:"period"`
|
||||
Delay internal.Duration `toml:"delay"`
|
||||
Namespace string `toml:"namespace"`
|
||||
Metrics []*Metric `toml:"metrics"`
|
||||
client cloudwatchClient
|
||||
metricCache *MetricCache
|
||||
}
|
||||
|
||||
Metric struct {
|
||||
MetricNames []string `toml:"names"`
|
||||
Dimensions []*Dimension `toml:"dimensions"`
|
||||
}
|
||||
|
||||
Dimension struct {
|
||||
Name string `toml:"name"`
|
||||
Value string `toml:"value"`
|
||||
}
|
||||
|
||||
MetricCache struct {
|
||||
TTL time.Duration
|
||||
Fetched time.Time
|
||||
Metrics []*cloudwatch.Metric
|
||||
}
|
||||
|
||||
cloudwatchClient interface {
|
||||
ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error)
|
||||
GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error)
|
||||
}
|
||||
)
|
||||
|
||||
func (c *CloudWatch) SampleConfig() string {
|
||||
return `
|
||||
## Amazon Region
|
||||
region = 'us-east-1'
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
## 1) explicit credentials from 'access_key' and 'secret_key'
|
||||
## 2) environment variables
|
||||
## 3) shared credentials file
|
||||
## 4) EC2 Instance Profile
|
||||
#access_key = ""
|
||||
#secret_key = ""
|
||||
|
||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
period = '1m'
|
||||
|
||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
delay = '1m'
|
||||
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
|
||||
## Metrics to Pull (optional)
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
#[[inputs.cloudwatch.metrics]]
|
||||
# names = ['Latency', 'RequestCount']
|
||||
#
|
||||
# ## Dimension filters for Metric (optional)
|
||||
# [[inputs.cloudwatch.metrics.dimensions]]
|
||||
# name = 'LoadBalancerName'
|
||||
# value = 'p-example'
|
||||
`
|
||||
}
|
||||
|
||||
func (c *CloudWatch) Description() string {
|
||||
return "Pull Metric Statistics from Amazon CloudWatch"
|
||||
}
|
||||
|
||||
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if c.client == nil {
|
||||
c.initializeCloudWatch()
|
||||
}
|
||||
|
||||
var metrics []*cloudwatch.Metric
|
||||
|
||||
// check for provided metric filter
|
||||
if c.Metrics != nil {
|
||||
metrics = []*cloudwatch.Metric{}
|
||||
for _, m := range c.Metrics {
|
||||
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
||||
for k, d := range m.Dimensions {
|
||||
dimensions[k] = &cloudwatch.Dimension{
|
||||
Name: aws.String(d.Name),
|
||||
Value: aws.String(d.Value),
|
||||
}
|
||||
}
|
||||
for _, name := range m.MetricNames {
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String(c.Namespace),
|
||||
MetricName: aws.String(name),
|
||||
Dimensions: dimensions,
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
metrics, err = c.fetchNamespaceMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
metricCount := len(metrics)
|
||||
var errChan = make(chan error, metricCount)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// limit concurrency or we can easily exhaust user connection limit
|
||||
semaphore := make(chan byte, 64)
|
||||
|
||||
for _, m := range metrics {
|
||||
semaphore <- 0x1
|
||||
go c.gatherMetric(acc, m, now, semaphore, errChan)
|
||||
}
|
||||
|
||||
for i := 1; i <= metricCount; i++ {
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("cloudwatch", func() telegraf.Input {
|
||||
return &CloudWatch{}
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize CloudWatch client
|
||||
*/
|
||||
func (c *CloudWatch) initializeCloudWatch() error {
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Region),
|
||||
}
|
||||
if c.AccessKey != "" || c.SecretKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, "")
|
||||
}
|
||||
|
||||
c.client = cloudwatch.New(session.New(config))
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch available metrics for given CloudWatch Namespace
|
||||
*/
|
||||
func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err error) {
|
||||
if c.metricCache != nil && c.metricCache.IsValid() {
|
||||
metrics = c.metricCache.Metrics
|
||||
return
|
||||
}
|
||||
|
||||
metrics = []*cloudwatch.Metric{}
|
||||
|
||||
var token *string
|
||||
for more := true; more; {
|
||||
params := &cloudwatch.ListMetricsInput{
|
||||
Namespace: aws.String(c.Namespace),
|
||||
Dimensions: []*cloudwatch.DimensionFilter{},
|
||||
NextToken: token,
|
||||
MetricName: nil,
|
||||
}
|
||||
|
||||
resp, err := c.client.ListMetrics(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metrics = append(metrics, resp.Metrics...)
|
||||
|
||||
token = resp.NextToken
|
||||
more = token != nil
|
||||
}
|
||||
|
||||
cacheTTL, _ := time.ParseDuration("1hr")
|
||||
c.metricCache = &MetricCache{
|
||||
Metrics: metrics,
|
||||
Fetched: time.Now(),
|
||||
TTL: cacheTTL,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Gather given Metric and emit any error
|
||||
*/
|
||||
func (c *CloudWatch) gatherMetric(acc telegraf.Accumulator, metric *cloudwatch.Metric, now time.Time, semaphore chan byte, errChan chan error) {
|
||||
params := c.getStatisticsInput(metric, now)
|
||||
resp, err := c.client.GetMetricStatistics(params)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
<-semaphore
|
||||
return
|
||||
}
|
||||
|
||||
for _, point := range resp.Datapoints {
|
||||
tags := map[string]string{
|
||||
"region": c.Region,
|
||||
"unit": snakeCase(*point.Unit),
|
||||
}
|
||||
|
||||
for _, d := range metric.Dimensions {
|
||||
tags[snakeCase(*d.Name)] = *d.Value
|
||||
}
|
||||
|
||||
// record field for each statistic
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
if point.Average != nil {
|
||||
fields[formatField(*metric.MetricName, cloudwatch.StatisticAverage)] = *point.Average
|
||||
}
|
||||
if point.Maximum != nil {
|
||||
fields[formatField(*metric.MetricName, cloudwatch.StatisticMaximum)] = *point.Maximum
|
||||
}
|
||||
if point.Minimum != nil {
|
||||
fields[formatField(*metric.MetricName, cloudwatch.StatisticMinimum)] = *point.Minimum
|
||||
}
|
||||
if point.SampleCount != nil {
|
||||
fields[formatField(*metric.MetricName, cloudwatch.StatisticSampleCount)] = *point.SampleCount
|
||||
}
|
||||
if point.Sum != nil {
|
||||
fields[formatField(*metric.MetricName, cloudwatch.StatisticSum)] = *point.Sum
|
||||
}
|
||||
|
||||
acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp)
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
<-semaphore
|
||||
}
|
||||
|
||||
/*
|
||||
* Formatting helpers
|
||||
*/
|
||||
func formatField(metricName string, statistic string) string {
|
||||
return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic))
|
||||
}
|
||||
|
||||
func formatMeasurement(namespace string) string {
|
||||
namespace = strings.Replace(namespace, "/", "_", -1)
|
||||
namespace = snakeCase(namespace)
|
||||
return fmt.Sprintf("cloudwatch_%s", namespace)
|
||||
}
|
||||
|
||||
func snakeCase(s string) string {
|
||||
s = internal.SnakeCase(s)
|
||||
s = strings.Replace(s, "__", "_", -1)
|
||||
return s
|
||||
}
|
||||
|
||||
/*
|
||||
* Map Metric to *cloudwatch.GetMetricStatisticsInput for given timeframe
|
||||
*/
|
||||
func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric, now time.Time) *cloudwatch.GetMetricStatisticsInput {
|
||||
end := now.Add(-c.Delay.Duration)
|
||||
|
||||
input := &cloudwatch.GetMetricStatisticsInput{
|
||||
StartTime: aws.Time(end.Add(-c.Period.Duration)),
|
||||
EndTime: aws.Time(end),
|
||||
MetricName: metric.MetricName,
|
||||
Namespace: metric.Namespace,
|
||||
Period: aws.Int64(int64(c.Period.Duration.Seconds())),
|
||||
Dimensions: metric.Dimensions,
|
||||
Statistics: []*string{
|
||||
aws.String(cloudwatch.StatisticAverage),
|
||||
aws.String(cloudwatch.StatisticMaximum),
|
||||
aws.String(cloudwatch.StatisticMinimum),
|
||||
aws.String(cloudwatch.StatisticSum),
|
||||
aws.String(cloudwatch.StatisticSampleCount)},
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
/*
|
||||
* Check Metric Cache validity
|
||||
*/
|
||||
func (c *MetricCache) IsValid() bool {
|
||||
return c.Metrics != nil && time.Since(c.Fetched) < c.TTL
|
||||
}
|
||||
131
plugins/inputs/cloudwatch/cloudwatch_test.go
Normal file
131
plugins/inputs/cloudwatch/cloudwatch_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockCloudWatchClient struct{}
|
||||
|
||||
func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||
metric := &cloudwatch.Metric{
|
||||
Namespace: params.Namespace,
|
||||
MetricName: aws.String("Latency"),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String("p-example"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := &cloudwatch.ListMetricsOutput{
|
||||
Metrics: []*cloudwatch.Metric{metric},
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
|
||||
dataPoint := &cloudwatch.Datapoint{
|
||||
Timestamp: params.EndTime,
|
||||
Minimum: aws.Float64(0.1),
|
||||
Maximum: aws.Float64(0.3),
|
||||
Average: aws.Float64(0.2),
|
||||
Sum: aws.Float64(123),
|
||||
SampleCount: aws.Float64(100),
|
||||
Unit: aws.String("Seconds"),
|
||||
}
|
||||
result := &cloudwatch.GetMetricStatisticsOutput{
|
||||
Label: aws.String("Latency"),
|
||||
Datapoints: []*cloudwatch.Datapoint{dataPoint},
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
internalDuration := internal.Duration{
|
||||
Duration: duration,
|
||||
}
|
||||
c := &CloudWatch{
|
||||
Region: "us-east-1",
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
c.client = &mockCloudWatchClient{}
|
||||
|
||||
c.Gather(&acc)
|
||||
|
||||
fields := map[string]interface{}{}
|
||||
fields["latency_minimum"] = 0.1
|
||||
fields["latency_maximum"] = 0.3
|
||||
fields["latency_average"] = 0.2
|
||||
fields["latency_sum"] = 123.0
|
||||
fields["latency_sample_count"] = 100.0
|
||||
|
||||
tags := map[string]string{}
|
||||
tags["unit"] = "seconds"
|
||||
tags["region"] = "us-east-1"
|
||||
tags["load_balancer_name"] = "p-example"
|
||||
|
||||
assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb"))
|
||||
acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||
d := &cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String("p-example"),
|
||||
}
|
||||
|
||||
m := &cloudwatch.Metric{
|
||||
MetricName: aws.String("Latency"),
|
||||
Dimensions: []*cloudwatch.Dimension{d},
|
||||
}
|
||||
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
internalDuration := internal.Duration{
|
||||
Duration: duration,
|
||||
}
|
||||
|
||||
c := &CloudWatch{
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
}
|
||||
|
||||
c.initializeCloudWatch()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
params := c.getStatisticsInput(m, now)
|
||||
|
||||
assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration))
|
||||
assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration))
|
||||
assert.Len(t, params.Dimensions, 1)
|
||||
assert.Len(t, params.Statistics, 5)
|
||||
assert.EqualValues(t, *params.Period, 60)
|
||||
}
|
||||
|
||||
func TestMetricsCacheTimeout(t *testing.T) {
|
||||
ttl, _ := time.ParseDuration("5ms")
|
||||
cache := &MetricCache{
|
||||
Metrics: []*cloudwatch.Metric{},
|
||||
Fetched: time.Now(),
|
||||
TTL: ttl,
|
||||
}
|
||||
|
||||
assert.True(t, cache.IsValid())
|
||||
time.Sleep(ttl)
|
||||
assert.False(t, cache.IsValid())
|
||||
}
|
||||
63
plugins/inputs/couchbase/README.md
Normal file
63
plugins/inputs/couchbase/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Telegraf Plugin: Couchbase
|
||||
|
||||
## Configuration:
|
||||
|
||||
```
|
||||
# Read per-node and per-bucket metrics from Couchbase
|
||||
[[inputs.couchbase]]
|
||||
## specify servers via a url matching:
|
||||
## [protocol://][:password]@address[:port]
|
||||
## e.g.
|
||||
## http://couchbase-0.example.com/
|
||||
## http://admin:secret@couchbase-0.example.com:8091/
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no protocol is specifed, HTTP is used.
|
||||
## If no port is specified, 8091 is used.
|
||||
servers = ["http://localhost:8091"]
|
||||
```
|
||||
|
||||
## Measurements:
|
||||
|
||||
### couchbase_node
|
||||
|
||||
Tags:
|
||||
- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`
|
||||
- hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091`
|
||||
|
||||
Fields:
|
||||
- memory_free (unit: bytes, example: 23181365248.0)
|
||||
- memory_total (unit: bytes, example: 64424656896.0)
|
||||
|
||||
### couchbase_bucket
|
||||
|
||||
Tags:
|
||||
- cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`)
|
||||
- bucket: the name of the couchbase bucket, e.g., `blastro-df`
|
||||
|
||||
Fields:
|
||||
- quota_percent_used (unit: percent, example: 68.85424936294555)
|
||||
- ops_per_sec (unit: count, example: 5686.789686789687)
|
||||
- disk_fetches (unit: count, example: 0.0)
|
||||
- item_count (unit: count, example: 943239752.0)
|
||||
- disk_used (unit: bytes, example: 409178772321.0)
|
||||
- data_used (unit: bytes, example: 212179309111.0)
|
||||
- mem_used (unit: bytes, example: 202156957464.0)
|
||||
|
||||
|
||||
## Example output
|
||||
|
||||
```
|
||||
$ telegraf -config telegraf.conf -input-filter couchbase -test
|
||||
* Plugin: couchbase, Collection 1
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.105:8091 memory_free=23531704320,memory_total=64424656896 1458381183695995259
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.173:8091 memory_free=23628767232,memory_total=64424656896 1458381183696010870
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.15.120:8091 memory_free=23616692224,memory_total=64424656896 1458381183696027406
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.127:8091 memory_free=23431770112,memory_total=64424656896 1458381183696041040
|
||||
> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.148:8091 memory_free=23811371008,memory_total=64424656896 1458381183696059060
|
||||
> couchbase_bucket,bucket=default,cluster=https://couchbase-0.example.com/ data_used=25743360,disk_fetches=0,disk_used=31744886,item_count=0,mem_used=77729224,ops_per_sec=0,quota_percent_used=10.58976636614118 1458381183696210074
|
||||
> couchbase_bucket,bucket=demoncat,cluster=https://couchbase-0.example.com/ data_used=38157584951,disk_fetches=0,disk_used=62730302441,item_count=14662532,mem_used=24015304256,ops_per_sec=1207.753207753208,quota_percent_used=79.87855353525707 1458381183696242695
|
||||
> couchbase_bucket,bucket=blastro-df,cluster=https://couchbase-0.example.com/ data_used=212552491622,disk_fetches=0,disk_used=413323157621,item_count=944655680,mem_used=202421103760,ops_per_sec=1692.176692176692,quota_percent_used=68.9442170551845 1458381183696272206
|
||||
```
|
||||
104
plugins/inputs/couchbase/couchbase.go
Normal file
104
plugins/inputs/couchbase/couchbase.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package couchbase
|
||||
|
||||
import (
|
||||
couchbase "github.com/couchbase/go-couchbase"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Couchbase struct {
|
||||
Servers []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## specify servers via a url matching:
|
||||
## [protocol://][:password]@address[:port]
|
||||
## e.g.
|
||||
## http://couchbase-0.example.com/
|
||||
## http://admin:secret@couchbase-0.example.com:8091/
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
## If no protocol is specifed, HTTP is used.
|
||||
## If no port is specified, 8091 is used.
|
||||
servers = ["http://localhost:8091"]
|
||||
`
|
||||
|
||||
func (r *Couchbase) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Couchbase) Description() string {
|
||||
return "Read metrics from one or many couchbase clusters"
|
||||
}
|
||||
|
||||
// Reads stats from all configured clusters. Accumulates stats.
|
||||
// Returns one of the errors encountered while gathering stats (if any).
|
||||
func (r *Couchbase) Gather(acc telegraf.Accumulator) error {
|
||||
if len(r.Servers) == 0 {
|
||||
r.gatherServer("http://localhost:8091/", acc, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range r.Servers {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = r.gatherServer(serv, acc, nil)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error {
|
||||
if pool == nil {
|
||||
client, err := couchbase.Connect(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// `default` is the only possible pool name. It's a
|
||||
// placeholder for a possible future Couchbase feature. See
|
||||
// http://stackoverflow.com/a/16990911/17498.
|
||||
p, err := client.GetPool("default")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pool = &p
|
||||
}
|
||||
for i := 0; i < len(pool.Nodes); i++ {
|
||||
node := pool.Nodes[i]
|
||||
tags := map[string]string{"cluster": addr, "hostname": node.Hostname}
|
||||
fields := make(map[string]interface{})
|
||||
fields["memory_free"] = node.MemoryFree
|
||||
fields["memory_total"] = node.MemoryTotal
|
||||
acc.AddFields("couchbase_node", fields, tags)
|
||||
}
|
||||
for bucketName, _ := range pool.BucketMap {
|
||||
tags := map[string]string{"cluster": addr, "bucket": bucketName}
|
||||
bs := pool.BucketMap[bucketName].BasicStats
|
||||
fields := make(map[string]interface{})
|
||||
fields["quota_percent_used"] = bs["quotaPercentUsed"]
|
||||
fields["ops_per_sec"] = bs["opsPerSec"]
|
||||
fields["disk_fetches"] = bs["diskFetches"]
|
||||
fields["item_count"] = bs["itemCount"]
|
||||
fields["disk_used"] = bs["diskUsed"]
|
||||
fields["data_used"] = bs["dataUsed"]
|
||||
fields["mem_used"] = bs["memUsed"]
|
||||
acc.AddFields("couchbase_bucket", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("couchbase", func() telegraf.Input {
|
||||
return &Couchbase{}
|
||||
})
|
||||
}
|
||||
50
plugins/inputs/couchbase/couchbase_test.go
Normal file
50
plugins/inputs/couchbase/couchbase_test.go
Normal file
File diff suppressed because one or more lines are too long
255
plugins/inputs/couchdb/README.md
Normal file
255
plugins/inputs/couchdb/README.md
Normal file
@@ -0,0 +1,255 @@
|
||||
# CouchDB Input Plugin
|
||||
---
|
||||
|
||||
The CouchDB plugin gathers metrics of CouchDB using [_stats](http://docs.couchdb.org/en/1.6.1/api/server/common.html?highlight=stats#get--_stats) endpoint.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Sample Config:
|
||||
[[inputs.couchdb]]
|
||||
hosts = ["http://localhost:5984/_stats"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Statistics specific to the internals of CouchDB:
|
||||
|
||||
- couchdb_auth_cache_misses
|
||||
- couchdb_database_writes
|
||||
- couchdb_open_databases
|
||||
- couchdb_auth_cache_hits
|
||||
- couchdb_request_time
|
||||
- couchdb_database_reads
|
||||
- couchdb_open_os_files
|
||||
|
||||
Statistics of HTTP requests by method:
|
||||
|
||||
- httpd_request_methods_put
|
||||
- httpd_request_methods_get
|
||||
- httpd_request_methods_copy
|
||||
- httpd_request_methods_delete
|
||||
- httpd_request_methods_post
|
||||
- httpd_request_methods_head
|
||||
|
||||
Statistics of HTTP requests by response code:
|
||||
|
||||
- httpd_status_codes_200
|
||||
- httpd_status_codes_201
|
||||
- httpd_status_codes_202
|
||||
- httpd_status_codes_301
|
||||
- httpd_status_codes_304
|
||||
- httpd_status_codes_400
|
||||
- httpd_status_codes_401
|
||||
- httpd_status_codes_403
|
||||
- httpd_status_codes_404
|
||||
- httpd_status_codes_405
|
||||
- httpd_status_codes_409
|
||||
- httpd_status_codes_412
|
||||
- httpd_status_codes_500
|
||||
|
||||
httpd statistics:
|
||||
|
||||
- httpd_clients_requesting_changes
|
||||
- httpd_temporary_view_reads
|
||||
- httpd_requests
|
||||
- httpd_bulk_requests
|
||||
- httpd_view_reads
|
||||
|
||||
### Tags:
|
||||
|
||||
- server (url of the couchdb _stats endpoint)
|
||||
|
||||
### Example output:
|
||||
|
||||
```
|
||||
➜ telegraf git:(master) ✗ ./telegraf -config ./config.conf -input-filter couchdb -test
|
||||
* Plugin: couchdb,
|
||||
Collection 1
|
||||
> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0,
|
||||
couchdb_auth_cache_hits_max=0,
|
||||
couchdb_auth_cache_hits_mean=0,
|
||||
couchdb_auth_cache_hits_min=0,
|
||||
couchdb_auth_cache_hits_stddev=0,
|
||||
couchdb_auth_cache_hits_sum=0,
|
||||
couchdb_auth_cache_misses_current=0,
|
||||
couchdb_auth_cache_misses_max=0,
|
||||
couchdb_auth_cache_misses_mean=0,
|
||||
couchdb_auth_cache_misses_min=0,
|
||||
couchdb_auth_cache_misses_stddev=0,
|
||||
couchdb_auth_cache_misses_sum=0,
|
||||
couchdb_database_reads_current=0,
|
||||
couchdb_database_reads_max=0,
|
||||
couchdb_database_reads_mean=0,
|
||||
couchdb_database_reads_min=0,
|
||||
couchdb_database_reads_stddev=0,
|
||||
couchdb_database_reads_sum=0,
|
||||
couchdb_database_writes_current=1102,
|
||||
couchdb_database_writes_max=131,
|
||||
couchdb_database_writes_mean=0.116,
|
||||
couchdb_database_writes_min=0,
|
||||
couchdb_database_writes_stddev=3.536,
|
||||
couchdb_database_writes_sum=1102,
|
||||
couchdb_open_databases_current=1,
|
||||
couchdb_open_databases_max=1,
|
||||
couchdb_open_databases_mean=0,
|
||||
couchdb_open_databases_min=0,
|
||||
couchdb_open_databases_stddev=0.01,
|
||||
couchdb_open_databases_sum=1,
|
||||
couchdb_open_os_files_current=2,
|
||||
couchdb_open_os_files_max=2,
|
||||
couchdb_open_os_files_mean=0,
|
||||
couchdb_open_os_files_min=0,
|
||||
couchdb_open_os_files_stddev=0.02,
|
||||
couchdb_open_os_files_sum=2,
|
||||
couchdb_request_time_current=242.21,
|
||||
couchdb_request_time_max=102,
|
||||
couchdb_request_time_mean=5.767,
|
||||
couchdb_request_time_min=1,
|
||||
couchdb_request_time_stddev=17.369,
|
||||
couchdb_request_time_sum=242.21,
|
||||
httpd_bulk_requests_current=0,
|
||||
httpd_bulk_requests_max=0,
|
||||
httpd_bulk_requests_mean=0,
|
||||
httpd_bulk_requests_min=0,
|
||||
httpd_bulk_requests_stddev=0,
|
||||
httpd_bulk_requests_sum=0,
|
||||
httpd_clients_requesting_changes_current=0,
|
||||
httpd_clients_requesting_changes_max=0,
|
||||
httpd_clients_requesting_changes_mean=0,
|
||||
httpd_clients_requesting_changes_min=0,
|
||||
httpd_clients_requesting_changes_stddev=0,
|
||||
httpd_clients_requesting_changes_sum=0,
|
||||
httpd_request_methods_copy_current=0,
|
||||
httpd_request_methods_copy_max=0,
|
||||
httpd_request_methods_copy_mean=0,
|
||||
httpd_request_methods_copy_min=0,
|
||||
httpd_request_methods_copy_stddev=0,
|
||||
httpd_request_methods_copy_sum=0,
|
||||
httpd_request_methods_delete_current=0,
|
||||
httpd_request_methods_delete_max=0,
|
||||
httpd_request_methods_delete_mean=0,
|
||||
httpd_request_methods_delete_min=0,
|
||||
httpd_request_methods_delete_stddev=0,
|
||||
httpd_request_methods_delete_sum=0,
|
||||
httpd_request_methods_get_current=31,
|
||||
httpd_request_methods_get_max=1,
|
||||
httpd_request_methods_get_mean=0.003,
|
||||
httpd_request_methods_get_min=0,
|
||||
httpd_request_methods_get_stddev=0.057,
|
||||
httpd_request_methods_get_sum=31,
|
||||
httpd_request_methods_head_current=0,
|
||||
httpd_request_methods_head_max=0,
|
||||
httpd_request_methods_head_mean=0,
|
||||
httpd_request_methods_head_min=0,
|
||||
httpd_request_methods_head_stddev=0,
|
||||
httpd_request_methods_head_sum=0,
|
||||
httpd_request_methods_post_current=1102,
|
||||
httpd_request_methods_post_max=131,
|
||||
httpd_request_methods_post_mean=0.116,
|
||||
httpd_request_methods_post_min=0,
|
||||
httpd_request_methods_post_stddev=3.536,
|
||||
httpd_request_methods_post_sum=1102,
|
||||
httpd_request_methods_put_current=1,
|
||||
httpd_request_methods_put_max=1,
|
||||
httpd_request_methods_put_mean=0,
|
||||
httpd_request_methods_put_min=0,
|
||||
httpd_request_methods_put_stddev=0.01,
|
||||
httpd_request_methods_put_sum=1,
|
||||
httpd_requests_current=1133,
|
||||
httpd_requests_max=130,
|
||||
httpd_requests_mean=0.118,
|
||||
httpd_requests_min=0,
|
||||
httpd_requests_stddev=3.512,
|
||||
httpd_requests_sum=1133,
|
||||
httpd_status_codes_200_current=31,
|
||||
httpd_status_codes_200_max=1,
|
||||
httpd_status_codes_200_mean=0.003,
|
||||
httpd_status_codes_200_min=0,
|
||||
httpd_status_codes_200_stddev=0.057,
|
||||
httpd_status_codes_200_sum=31,
|
||||
httpd_status_codes_201_current=1103,
|
||||
httpd_status_codes_201_max=130,
|
||||
httpd_status_codes_201_mean=0.116,
|
||||
httpd_status_codes_201_min=0,
|
||||
httpd_status_codes_201_stddev=3.532,
|
||||
httpd_status_codes_201_sum=1103,
|
||||
httpd_status_codes_202_current=0,
|
||||
httpd_status_codes_202_max=0,
|
||||
httpd_status_codes_202_mean=0,
|
||||
httpd_status_codes_202_min=0,
|
||||
httpd_status_codes_202_stddev=0,
|
||||
httpd_status_codes_202_sum=0,
|
||||
httpd_status_codes_301_current=0,
|
||||
httpd_status_codes_301_max=0,
|
||||
httpd_status_codes_301_mean=0,
|
||||
httpd_status_codes_301_min=0,
|
||||
httpd_status_codes_301_stddev=0,
|
||||
httpd_status_codes_301_sum=0,
|
||||
httpd_status_codes_304_current=0,
|
||||
httpd_status_codes_304_max=0,
|
||||
httpd_status_codes_304_mean=0,
|
||||
httpd_status_codes_304_min=0,
|
||||
httpd_status_codes_304_stddev=0,
|
||||
httpd_status_codes_304_sum=0,
|
||||
httpd_status_codes_400_current=0,
|
||||
httpd_status_codes_400_max=0,
|
||||
httpd_status_codes_400_mean=0,
|
||||
httpd_status_codes_400_min=0,
|
||||
httpd_status_codes_400_stddev=0,
|
||||
httpd_status_codes_400_sum=0,
|
||||
httpd_status_codes_401_current=0,
|
||||
httpd_status_codes_401_max=0,
|
||||
httpd_status_codes_401_mean=0,
|
||||
httpd_status_codes_401_min=0,
|
||||
httpd_status_codes_401_stddev=0,
|
||||
httpd_status_codes_401_sum=0,
|
||||
httpd_status_codes_403_current=0,
|
||||
httpd_status_codes_403_max=0,
|
||||
httpd_status_codes_403_mean=0,
|
||||
httpd_status_codes_403_min=0,
|
||||
httpd_status_codes_403_stddev=0,
|
||||
httpd_status_codes_403_sum=0,
|
||||
httpd_status_codes_404_current=0,
|
||||
httpd_status_codes_404_max=0,
|
||||
httpd_status_codes_404_mean=0,
|
||||
httpd_status_codes_404_min=0,
|
||||
httpd_status_codes_404_stddev=0,
|
||||
httpd_status_codes_404_sum=0,
|
||||
httpd_status_codes_405_current=0,
|
||||
httpd_status_codes_405_max=0,
|
||||
httpd_status_codes_405_mean=0,
|
||||
httpd_status_codes_405_min=0,
|
||||
httpd_status_codes_405_stddev=0,
|
||||
httpd_status_codes_405_sum=0,
|
||||
httpd_status_codes_409_current=0,
|
||||
httpd_status_codes_409_max=0,
|
||||
httpd_status_codes_409_mean=0,
|
||||
httpd_status_codes_409_min=0,
|
||||
httpd_status_codes_409_stddev=0,
|
||||
httpd_status_codes_409_sum=0,
|
||||
httpd_status_codes_412_current=0,
|
||||
httpd_status_codes_412_max=0,
|
||||
httpd_status_codes_412_mean=0,
|
||||
httpd_status_codes_412_min=0,
|
||||
httpd_status_codes_412_stddev=0,
|
||||
httpd_status_codes_412_sum=0,
|
||||
httpd_status_codes_500_current=0,
|
||||
httpd_status_codes_500_max=0,
|
||||
httpd_status_codes_500_mean=0,
|
||||
httpd_status_codes_500_min=0,
|
||||
httpd_status_codes_500_stddev=0,
|
||||
httpd_status_codes_500_sum=0,
|
||||
httpd_temporary_view_reads_current=0,
|
||||
httpd_temporary_view_reads_max=0,
|
||||
httpd_temporary_view_reads_mean=0,
|
||||
httpd_temporary_view_reads_min=0,
|
||||
httpd_temporary_view_reads_stddev=0,
|
||||
httpd_temporary_view_reads_sum=0,
|
||||
httpd_view_reads_current=0,
|
||||
httpd_view_reads_max=0,
|
||||
httpd_view_reads_mean=0,
|
||||
httpd_view_reads_min=0,
|
||||
httpd_view_reads_stddev=0,
|
||||
httpd_view_reads_sum=0 1454692257621938169
|
||||
```
|
||||
215
plugins/inputs/couchdb/couchdb.go
Normal file
215
plugins/inputs/couchdb/couchdb.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package couchdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Schema:
|
||||
type metaData struct {
|
||||
Description string `json:"description"`
|
||||
Current float64 `json:"current"`
|
||||
Sum float64 `json:"sum"`
|
||||
Mean float64 `json:"mean"`
|
||||
Stddev float64 `json:"stddev"`
|
||||
Min float64 `json:"min"`
|
||||
Max float64 `json:"max"`
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Couchdb struct {
|
||||
AuthCacheMisses metaData `json:"auth_cache_misses"`
|
||||
DatabaseWrites metaData `json:"database_writes"`
|
||||
OpenDatabases metaData `json:"open_databases"`
|
||||
AuthCacheHits metaData `json:"auth_cache_hits"`
|
||||
RequestTime metaData `json:"request_time"`
|
||||
DatabaseReads metaData `json:"database_reads"`
|
||||
OpenOsFiles metaData `json:"open_os_files"`
|
||||
} `json:"couchdb"`
|
||||
HttpdRequestMethods struct {
|
||||
Put metaData `json:"PUT"`
|
||||
Get metaData `json:"GET"`
|
||||
Copy metaData `json:"COPY"`
|
||||
Delete metaData `json:"DELETE"`
|
||||
Post metaData `json:"POST"`
|
||||
Head metaData `json:"HEAD"`
|
||||
} `json:"httpd_request_methods"`
|
||||
HttpdStatusCodes struct {
|
||||
Status200 metaData `json:"200"`
|
||||
Status201 metaData `json:"201"`
|
||||
Status202 metaData `json:"202"`
|
||||
Status301 metaData `json:"301"`
|
||||
Status304 metaData `json:"304"`
|
||||
Status400 metaData `json:"400"`
|
||||
Status401 metaData `json:"401"`
|
||||
Status403 metaData `json:"403"`
|
||||
Status404 metaData `json:"404"`
|
||||
Status405 metaData `json:"405"`
|
||||
Status409 metaData `json:"409"`
|
||||
Status412 metaData `json:"412"`
|
||||
Status500 metaData `json:"500"`
|
||||
} `json:"httpd_status_codes"`
|
||||
Httpd struct {
|
||||
ClientsRequestingChanges metaData `json:"clients_requesting_changes"`
|
||||
TemporaryViewReads metaData `json:"temporary_view_reads"`
|
||||
Requests metaData `json:"requests"`
|
||||
BulkRequests metaData `json:"bulk_requests"`
|
||||
ViewReads metaData `json:"view_reads"`
|
||||
} `json:"httpd"`
|
||||
}
|
||||
|
||||
type CouchDB struct {
|
||||
HOSTs []string `toml:"hosts"`
|
||||
}
|
||||
|
||||
func (*CouchDB) Description() string {
|
||||
return "Read CouchDB Stats from one or more servers"
|
||||
}
|
||||
|
||||
func (*CouchDB) SampleConfig() string {
|
||||
return `
|
||||
## Works with CouchDB stats endpoints out of the box
|
||||
## Multiple HOSTs from which to read CouchDB stats:
|
||||
hosts = ["http://localhost:8086/_stats"]
|
||||
`
|
||||
}
|
||||
|
||||
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
|
||||
errorChannel := make(chan error, len(c.HOSTs))
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range c.HOSTs {
|
||||
wg.Add(1)
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if err := c.fetchAndInsertData(accumulator, host); err != nil {
|
||||
errorChannel <- fmt.Errorf("[host=%s]: %s", host, err)
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error {
|
||||
|
||||
response, error := client.Get(host)
|
||||
if error != nil {
|
||||
return error
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
var stats Stats
|
||||
decoder := json.NewDecoder(response.Body)
|
||||
decoder.Decode(&stats)
|
||||
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
// CouchDB meta stats:
|
||||
c.MapCopy(fields, c.generateFields("couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_database_writes", stats.Couchdb.DatabaseWrites))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_open_databases", stats.Couchdb.OpenDatabases))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_request_time", stats.Couchdb.RequestTime))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_database_reads", stats.Couchdb.DatabaseReads))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_open_os_files", stats.Couchdb.OpenOsFiles))
|
||||
|
||||
// http request methods stats:
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_put", stats.HttpdRequestMethods.Put))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_get", stats.HttpdRequestMethods.Get))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_copy", stats.HttpdRequestMethods.Copy))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_delete", stats.HttpdRequestMethods.Delete))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_post", stats.HttpdRequestMethods.Post))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_head", stats.HttpdRequestMethods.Head))
|
||||
|
||||
// status code stats:
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_200", stats.HttpdStatusCodes.Status200))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_201", stats.HttpdStatusCodes.Status201))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_202", stats.HttpdStatusCodes.Status202))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_301", stats.HttpdStatusCodes.Status301))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_304", stats.HttpdStatusCodes.Status304))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_400", stats.HttpdStatusCodes.Status400))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_401", stats.HttpdStatusCodes.Status401))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_403", stats.HttpdStatusCodes.Status403))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_404", stats.HttpdStatusCodes.Status404))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_405", stats.HttpdStatusCodes.Status405))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_409", stats.HttpdStatusCodes.Status409))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_412", stats.HttpdStatusCodes.Status412))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_500", stats.HttpdStatusCodes.Status500))
|
||||
|
||||
// httpd stats:
|
||||
c.MapCopy(fields, c.generateFields("httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges))
|
||||
c.MapCopy(fields, c.generateFields("httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads))
|
||||
c.MapCopy(fields, c.generateFields("httpd_requests", stats.Httpd.Requests))
|
||||
c.MapCopy(fields, c.generateFields("httpd_bulk_requests", stats.Httpd.BulkRequests))
|
||||
c.MapCopy(fields, c.generateFields("httpd_view_reads", stats.Httpd.ViewReads))
|
||||
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
}
|
||||
accumulator.AddFields("couchdb", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*CouchDB) MapCopy(dst, src interface{}) {
|
||||
dv, sv := reflect.ValueOf(dst), reflect.ValueOf(src)
|
||||
for _, k := range sv.MapKeys() {
|
||||
dv.SetMapIndex(k, sv.MapIndex(k))
|
||||
}
|
||||
}
|
||||
|
||||
func (*CouchDB) safeCheck(value interface{}) interface{} {
|
||||
if value == nil {
|
||||
return 0.0
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (c *CouchDB) generateFields(prefix string, obj metaData) map[string]interface{} {
|
||||
fields := map[string]interface{}{
|
||||
prefix + "_current": c.safeCheck(obj.Current),
|
||||
prefix + "_sum": c.safeCheck(obj.Sum),
|
||||
prefix + "_mean": c.safeCheck(obj.Mean),
|
||||
prefix + "_stddev": c.safeCheck(obj.Stddev),
|
||||
prefix + "_min": c.safeCheck(obj.Min),
|
||||
prefix + "_max": c.safeCheck(obj.Max),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("couchdb", func() telegraf.Input {
|
||||
return &CouchDB{}
|
||||
})
|
||||
}
|
||||
320
plugins/inputs/couchdb/couchdb_test.go
Normal file
320
plugins/inputs/couchdb/couchdb_test.go
Normal file
@@ -0,0 +1,320 @@
|
||||
package couchdb_test
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
js := `
|
||||
{
|
||||
"couchdb": {
|
||||
"auth_cache_misses": {
|
||||
"description": "number of authentication cache misses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"database_writes": {
|
||||
"description": "number of times a database was changed",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"open_databases": {
|
||||
"description": "number of open databases",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"auth_cache_hits": {
|
||||
"description": "number of authentication cache hits",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"request_time": {
|
||||
"description": "length of a request inside CouchDB without MochiWeb",
|
||||
"current": 18.0,
|
||||
"sum": 18.0,
|
||||
"mean": 18.0,
|
||||
"stddev": null,
|
||||
"min": 18.0,
|
||||
"max": 18.0
|
||||
},
|
||||
"database_reads": {
|
||||
"description": "number of times a document was read from a database",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"open_os_files": {
|
||||
"description": "number of file descriptors CouchDB has open",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
},
|
||||
"httpd_request_methods": {
|
||||
"PUT": {
|
||||
"description": "number of HTTP PUT requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"GET": {
|
||||
"description": "number of HTTP GET requests",
|
||||
"current": 2.0,
|
||||
"sum": 2.0,
|
||||
"mean": 0.25,
|
||||
"stddev": 0.70699999999999996181,
|
||||
"min": 0,
|
||||
"max": 2
|
||||
},
|
||||
"COPY": {
|
||||
"description": "number of HTTP COPY requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"DELETE": {
|
||||
"description": "number of HTTP DELETE requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"POST": {
|
||||
"description": "number of HTTP POST requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"HEAD": {
|
||||
"description": "number of HTTP HEAD requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
},
|
||||
"httpd_status_codes": {
|
||||
"403": {
|
||||
"description": "number of HTTP 403 Forbidden responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"202": {
|
||||
"description": "number of HTTP 202 Accepted responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"401": {
|
||||
"description": "number of HTTP 401 Unauthorized responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"409": {
|
||||
"description": "number of HTTP 409 Conflict responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"200": {
|
||||
"description": "number of HTTP 200 OK responses",
|
||||
"current": 1.0,
|
||||
"sum": 1.0,
|
||||
"mean": 0.125,
|
||||
"stddev": 0.35399999999999998135,
|
||||
"min": 0,
|
||||
"max": 1
|
||||
},
|
||||
"405": {
|
||||
"description": "number of HTTP 405 Method Not Allowed responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"400": {
|
||||
"description": "number of HTTP 400 Bad Request responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"201": {
|
||||
"description": "number of HTTP 201 Created responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"404": {
|
||||
"description": "number of HTTP 404 Not Found responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"500": {
|
||||
"description": "number of HTTP 500 Internal Server Error responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"412": {
|
||||
"description": "number of HTTP 412 Precondition Failed responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"301": {
|
||||
"description": "number of HTTP 301 Moved Permanently responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"304": {
|
||||
"description": "number of HTTP 304 Not Modified responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
},
|
||||
"httpd": {
|
||||
"clients_requesting_changes": {
|
||||
"description": "number of clients for continuous _changes",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"temporary_view_reads": {
|
||||
"description": "number of temporary view reads",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"requests": {
|
||||
"description": "number of HTTP requests",
|
||||
"current": 2.0,
|
||||
"sum": 2.0,
|
||||
"mean": 0.25,
|
||||
"stddev": 0.70699999999999996181,
|
||||
"min": 0,
|
||||
"max": 2
|
||||
},
|
||||
"bulk_requests": {
|
||||
"description": "number of bulk requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"view_reads": {
|
||||
"description": "number of view reads",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
`
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/_stats" {
|
||||
_, _ = w.Write([]byte(js))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
plugin := &couchdb.CouchDB{
|
||||
HOSTs: []string{fakeServer.URL + "/_stats"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
}
|
||||
210
plugins/inputs/disque/disque.go
Normal file
210
plugins/inputs/disque/disque.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package disque
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Disque struct {
|
||||
Servers []string
|
||||
|
||||
c net.Conn
|
||||
buf []byte
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of URI to gather stats about. Specify an ip or hostname
|
||||
## with optional port and password.
|
||||
## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
`
|
||||
|
||||
var defaultTimeout = 5 * time.Second
|
||||
|
||||
func (r *Disque) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Disque) Description() string {
|
||||
return "Read metrics from one or many disque servers"
|
||||
}
|
||||
|
||||
var Tracking = map[string]string{
|
||||
"uptime_in_seconds": "uptime",
|
||||
"connected_clients": "clients",
|
||||
"blocked_clients": "blocked_clients",
|
||||
"used_memory": "used_memory",
|
||||
"used_memory_rss": "used_memory_rss",
|
||||
"used_memory_peak": "used_memory_peak",
|
||||
"total_connections_received": "total_connections_received",
|
||||
"total_commands_processed": "total_commands_processed",
|
||||
"instantaneous_ops_per_sec": "instantaneous_ops_per_sec",
|
||||
"latest_fork_usec": "latest_fork_usec",
|
||||
"mem_fragmentation_ratio": "mem_fragmentation_ratio",
|
||||
"used_cpu_sys": "used_cpu_sys",
|
||||
"used_cpu_user": "used_cpu_user",
|
||||
"used_cpu_sys_children": "used_cpu_sys_children",
|
||||
"used_cpu_user_children": "used_cpu_user_children",
|
||||
"registered_jobs": "registered_jobs",
|
||||
"registered_queues": "registered_queues",
|
||||
}
|
||||
|
||||
var ErrProtocolError = errors.New("disque protocol error")
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
url := &url.URL{
|
||||
Host: ":7711",
|
||||
}
|
||||
g.gatherServer(url, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
} else if u.Scheme == "" {
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
u.Scheme = "tcp"
|
||||
u.Host = serv
|
||||
u.Path = ""
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(u, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
if g.c == nil {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
addr.Host = addr.Host + ":" + defaultPort
|
||||
}
|
||||
|
||||
c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err)
|
||||
}
|
||||
|
||||
if addr.User != nil {
|
||||
pwd, set := addr.User.Password()
|
||||
if set && pwd != "" {
|
||||
c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd)))
|
||||
|
||||
r := bufio.NewReader(c)
|
||||
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if line[0] != '+' {
|
||||
return fmt.Errorf("%s", strings.TrimSpace(line)[1:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
g.c = c
|
||||
}
|
||||
|
||||
// Extend connection
|
||||
g.c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
g.c.Write([]byte("info\r\n"))
|
||||
|
||||
r := bufio.NewReader(g.c)
|
||||
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if line[0] != '$' {
|
||||
return fmt.Errorf("bad line start: %s", ErrProtocolError)
|
||||
}
|
||||
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
szStr := line[1:]
|
||||
|
||||
sz, err := strconv.Atoi(szStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad size string <<%s>>: %s", szStr, ErrProtocolError)
|
||||
}
|
||||
|
||||
var read int
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"disque_host": addr.String()}
|
||||
for read < sz {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
read += len(line)
|
||||
|
||||
if len(line) == 1 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
|
||||
name := string(parts[0])
|
||||
|
||||
metric, ok := Tracking[name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
val := strings.TrimSpace(parts[1])
|
||||
|
||||
ival, err := strconv.ParseUint(val, 10, 64)
|
||||
if err == nil {
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
fval, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields[metric] = fval
|
||||
}
|
||||
acc.AddFields("disque", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("disque", func() telegraf.Input {
|
||||
return &Disque{}
|
||||
})
|
||||
}
|
||||
217
plugins/inputs/disque/disque_test.go
Normal file
217
plugins/inputs/disque/disque_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package disque
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDisqueGeneratesMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(c)
|
||||
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if line != "info\r\n" {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(c, "$%d\n", len(testOutput))
|
||||
c.Write([]byte(testOutput))
|
||||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(c)
|
||||
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if line != "info\r\n" {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(c, "$%d\n", len(testOutput))
|
||||
c.Write([]byte(testOutput))
|
||||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
const testOutput = `# Server
|
||||
disque_version:0.0.1
|
||||
disque_git_sha1:b5247598
|
||||
disque_git_dirty:0
|
||||
disque_build_id:379fda78983a60c6
|
||||
os:Linux 3.13.0-44-generic x86_64
|
||||
arch_bits:64
|
||||
multiplexing_api:epoll
|
||||
gcc_version:4.8.2
|
||||
process_id:32420
|
||||
run_id:1cfdfa4c6bc3f285182db5427522a8a4c16e42e4
|
||||
tcp_port:7711
|
||||
uptime_in_seconds:1452705
|
||||
uptime_in_days:16
|
||||
hz:10
|
||||
config_file:/usr/local/etc/disque/disque.conf
|
||||
|
||||
# Clients
|
||||
connected_clients:31
|
||||
client_longest_output_list:0
|
||||
client_biggest_input_buf:0
|
||||
blocked_clients:13
|
||||
|
||||
# Memory
|
||||
used_memory:1840104
|
||||
used_memory_human:1.75M
|
||||
used_memory_rss:3227648
|
||||
used_memory_peak:89603656
|
||||
used_memory_peak_human:85.45M
|
||||
mem_fragmentation_ratio:1.75
|
||||
mem_allocator:jemalloc-3.6.0
|
||||
|
||||
# Jobs
|
||||
registered_jobs:360
|
||||
|
||||
# Queues
|
||||
registered_queues:12
|
||||
|
||||
# Persistence
|
||||
loading:0
|
||||
aof_enabled:1
|
||||
aof_state:on
|
||||
aof_rewrite_in_progress:0
|
||||
aof_rewrite_scheduled:0
|
||||
aof_last_rewrite_time_sec:0
|
||||
aof_current_rewrite_time_sec:-1
|
||||
aof_last_bgrewrite_status:ok
|
||||
aof_last_write_status:ok
|
||||
aof_current_size:41952430
|
||||
aof_base_size:9808
|
||||
aof_pending_rewrite:0
|
||||
aof_buffer_length:0
|
||||
aof_rewrite_buffer_length:0
|
||||
aof_pending_bio_fsync:0
|
||||
aof_delayed_fsync:1
|
||||
|
||||
# Stats
|
||||
total_connections_received:5062777
|
||||
total_commands_processed:12308396
|
||||
instantaneous_ops_per_sec:18
|
||||
total_net_input_bytes:1346996528
|
||||
total_net_output_bytes:1967551763
|
||||
instantaneous_input_kbps:1.38
|
||||
instantaneous_output_kbps:1.78
|
||||
rejected_connections:0
|
||||
latest_fork_usec:1644
|
||||
|
||||
# CPU
|
||||
used_cpu_sys:19585.73
|
||||
used_cpu_user:11255.96
|
||||
used_cpu_sys_children:1.75
|
||||
used_cpu_user_children:1.91
|
||||
`
|
||||
51
plugins/inputs/dns_query/README.md
Normal file
51
plugins/inputs/dns_query/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# DNS Query Input Plugin
|
||||
|
||||
The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\))
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Sample Config:
|
||||
[[inputs.dns_query]]
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"] # required
|
||||
|
||||
## Domains or subdomains to query. "." (root) is default
|
||||
domains = ["."] # optional
|
||||
|
||||
## Query record type. Posible values: A, AAAA, ANY, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT. Default is "NS"
|
||||
record_type = "A" # optional
|
||||
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
```
|
||||
|
||||
For querying more than one record type make:
|
||||
|
||||
```
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "A"
|
||||
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "MX"
|
||||
```
|
||||
|
||||
### Tags:
|
||||
|
||||
- server
|
||||
- domain
|
||||
- record_type
|
||||
|
||||
### Example output:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -test -input-filter dns_query -test
|
||||
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
```
|
||||
160
plugins/inputs/dns_query/dns_query.go
Normal file
160
plugins/inputs/dns_query/dns_query.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package dns_query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/miekg/dns"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DnsQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
|
||||
// Server to query
|
||||
Servers []string
|
||||
|
||||
// Record type
|
||||
RecordType string `toml:"record_type"`
|
||||
|
||||
// DNS server port number
|
||||
Port int
|
||||
|
||||
// Dns query timeout in seconds. 0 means no timeout
|
||||
Timeout int
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"] # required
|
||||
|
||||
## Domains or subdomains to query. "."(root) is default
|
||||
domains = ["."] # optional
|
||||
|
||||
## Query record type. Default is "A"
|
||||
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
|
||||
record_type = "A" # optional
|
||||
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
`
|
||||
|
||||
func (d *DnsQuery) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (d *DnsQuery) Description() string {
|
||||
return "Query given DNS server and gives statistics"
|
||||
}
|
||||
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
d.setDefaultValues()
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
"record_type": d.RecordType,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{"query_time_ms": dnsQueryTime}
|
||||
acc.AddFields("dns_query", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
if len(d.RecordType) == 0 {
|
||||
d.RecordType = "NS"
|
||||
}
|
||||
|
||||
if len(d.Domains) == 0 {
|
||||
d.Domains = []string{"."}
|
||||
d.RecordType = "NS"
|
||||
}
|
||||
|
||||
if d.Port == 0 {
|
||||
d.Port = 53
|
||||
}
|
||||
|
||||
if d.Timeout == 0 {
|
||||
d.Timeout = 2
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error) {
|
||||
dnsQueryTime := float64(0)
|
||||
|
||||
c := new(dns.Client)
|
||||
c.ReadTimeout = time.Duration(d.Timeout) * time.Second
|
||||
|
||||
m := new(dns.Msg)
|
||||
recordType, err := d.parseRecordType()
|
||||
if err != nil {
|
||||
return dnsQueryTime, err
|
||||
}
|
||||
m.SetQuestion(dns.Fqdn(domain), recordType)
|
||||
m.RecursionDesired = true
|
||||
|
||||
r, rtt, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port)))
|
||||
if err != nil {
|
||||
return dnsQueryTime, err
|
||||
}
|
||||
if r.Rcode != dns.RcodeSuccess {
|
||||
return dnsQueryTime, errors.New(fmt.Sprintf("Invalid answer name %s after %s query for %s\n", domain, d.RecordType, domain))
|
||||
}
|
||||
dnsQueryTime = float64(rtt.Nanoseconds()) / 1e6
|
||||
return dnsQueryTime, nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) parseRecordType() (uint16, error) {
|
||||
var recordType uint16
|
||||
var error error
|
||||
|
||||
switch d.RecordType {
|
||||
case "A":
|
||||
recordType = dns.TypeA
|
||||
case "AAAA":
|
||||
recordType = dns.TypeAAAA
|
||||
case "ANY":
|
||||
recordType = dns.TypeANY
|
||||
case "CNAME":
|
||||
recordType = dns.TypeCNAME
|
||||
case "MX":
|
||||
recordType = dns.TypeMX
|
||||
case "NS":
|
||||
recordType = dns.TypeNS
|
||||
case "PTR":
|
||||
recordType = dns.TypePTR
|
||||
case "SOA":
|
||||
recordType = dns.TypeSOA
|
||||
case "SPF":
|
||||
recordType = dns.TypeSPF
|
||||
case "SRV":
|
||||
recordType = dns.TypeSRV
|
||||
case "TXT":
|
||||
recordType = dns.TypeTXT
|
||||
default:
|
||||
error = errors.New(fmt.Sprintf("Record type %s not recognized", d.RecordType))
|
||||
}
|
||||
|
||||
return recordType, error
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dns_query", func() telegraf.Input {
|
||||
return &DnsQuery{}
|
||||
})
|
||||
}
|
||||
210
plugins/inputs/dns_query/dns_query_test.go
Normal file
210
plugins/inputs/dns_query/dns_query_test.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package dns_query
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var servers = []string{"8.8.8.8"}
|
||||
var domains = []string{"google.com"}
|
||||
|
||||
func TestGathering(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
}
|
||||
|
||||
func TestGatheringMxRecord(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.RecordType = "MX"
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
}
|
||||
|
||||
func TestGatheringRootDomain(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: []string{"."},
|
||||
RecordType: "MX",
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
tags := map[string]string{
|
||||
"server": "8.8.8.8",
|
||||
"domain": ".",
|
||||
"record_type": "MX",
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
acc.AssertContainsTaggedFields(t, "dns_query", fields, tags)
|
||||
}
|
||||
|
||||
func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
tags := map[string]string{
|
||||
"server": "8.8.8.8",
|
||||
"domain": "google.com",
|
||||
"record_type": "NS",
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
require.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
acc.AssertContainsTaggedFields(t, "dns_query", fields, tags)
|
||||
}
|
||||
|
||||
func TestGatheringTimeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping network-dependent test in short mode.")
|
||||
}
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.Port = 60054
|
||||
dnsConfig.Timeout = 1
|
||||
var err error
|
||||
|
||||
channel := make(chan error, 1)
|
||||
go func() {
|
||||
channel <- dnsConfig.Gather(&acc)
|
||||
}()
|
||||
select {
|
||||
case res := <-channel:
|
||||
err = res
|
||||
case <-time.After(time.Second * 2):
|
||||
err = nil
|
||||
}
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "i/o timeout")
|
||||
}
|
||||
|
||||
func TestSettingDefaultValues(t *testing.T) {
|
||||
dnsConfig := DnsQuery{}
|
||||
|
||||
dnsConfig.setDefaultValues()
|
||||
|
||||
assert.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"")
|
||||
assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'")
|
||||
assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53")
|
||||
assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2")
|
||||
|
||||
dnsConfig = DnsQuery{Domains: []string{"."}}
|
||||
|
||||
dnsConfig.setDefaultValues()
|
||||
|
||||
assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'")
|
||||
}
|
||||
|
||||
func TestRecordTypeParser(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{}
|
||||
var recordType uint16
|
||||
|
||||
dnsConfig.RecordType = "A"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeA, recordType)
|
||||
|
||||
dnsConfig.RecordType = "AAAA"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeAAAA, recordType)
|
||||
|
||||
dnsConfig.RecordType = "ANY"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeANY, recordType)
|
||||
|
||||
dnsConfig.RecordType = "CNAME"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeCNAME, recordType)
|
||||
|
||||
dnsConfig.RecordType = "MX"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeMX, recordType)
|
||||
|
||||
dnsConfig.RecordType = "NS"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeNS, recordType)
|
||||
|
||||
dnsConfig.RecordType = "PTR"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypePTR, recordType)
|
||||
|
||||
dnsConfig.RecordType = "SOA"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeSOA, recordType)
|
||||
|
||||
dnsConfig.RecordType = "SPF"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeSPF, recordType)
|
||||
|
||||
dnsConfig.RecordType = "SRV"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeSRV, recordType)
|
||||
|
||||
dnsConfig.RecordType = "TXT"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeTXT, recordType)
|
||||
}
|
||||
|
||||
func TestRecordTypeParserError(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{}
|
||||
var err error
|
||||
|
||||
dnsConfig.RecordType = "nil"
|
||||
_, err = dnsConfig.parseRecordType()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
195
plugins/inputs/docker/README.md
Normal file
195
plugins/inputs/docker/README.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# Docker Input Plugin
|
||||
|
||||
The docker plugin uses the docker remote API to gather metrics on running
|
||||
docker containers. You can read Docker's documentation for their remote API
|
||||
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
|
||||
|
||||
The docker plugin uses the excellent
|
||||
[docker engine-api](https://github.com/docker/engine-api) library to
|
||||
gather stats. Documentation for the library can be found
|
||||
[here](https://godoc.org/github.com/docker/engine-api) and documentation
|
||||
for the stat structure can be found
|
||||
[here](https://godoc.org/github.com/docker/engine-api/types#Stats)
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_container_cpu metric may appear multiple times per collection,
|
||||
based on the availability of per-cpu stats on your system.
|
||||
|
||||
- docker_container_mem
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
- total_inactive_file
|
||||
- pgpgout
|
||||
- rss
|
||||
- total_mapped_file
|
||||
- writeback
|
||||
- unevictable
|
||||
- pgpgin
|
||||
- total_unevictable
|
||||
- pgmajfault
|
||||
- total_rss
|
||||
- total_rss_huge
|
||||
- total_writeback
|
||||
- total_inactive_anon
|
||||
- rss_huge
|
||||
- hierarchical_memory_limit
|
||||
- total_pgfault
|
||||
- total_active_file
|
||||
- active_anon
|
||||
- total_active_anon
|
||||
- total_pgpgout
|
||||
- total_cache
|
||||
- inactive_anon
|
||||
- active_file
|
||||
- pgfault
|
||||
- inactive_file
|
||||
- total_pgpgin
|
||||
- max_usage
|
||||
- usage
|
||||
- failcnt
|
||||
- limit
|
||||
- container_id
|
||||
- docker_container_cpu
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
- usage_in_kernelmode
|
||||
- usage_in_usermode
|
||||
- usage_system
|
||||
- usage_total
|
||||
- usage_percent
|
||||
- container_id
|
||||
- docker_container_net
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
- tx_packets
|
||||
- tx_dropped
|
||||
- rx_packets
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- container_id
|
||||
- docker_container_blkio
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
- io_service_bytes_recursive_total
|
||||
- io_service_bytes_recursive_write
|
||||
- io_serviced_recursive_async
|
||||
- io_serviced_recursive_read
|
||||
- io_serviced_recursive_sync
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
- container_id
|
||||
- docker_
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
- memory_total
|
||||
- pool_blocksize
|
||||
- docker_data
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
- docker_metadata
|
||||
- available
|
||||
- total
|
||||
- used
|
||||
|
||||
|
||||
### Tags:
|
||||
|
||||
- docker (memory_total)
|
||||
- unit=bytes
|
||||
- docker (pool_blocksize)
|
||||
- unit=bytes
|
||||
- docker_data
|
||||
- unit=bytes
|
||||
- docker_metadata
|
||||
- unit=bytes
|
||||
|
||||
- docker_container_mem specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- docker_container_cpu specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- cpu
|
||||
- docker_container_net specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- network
|
||||
- docker_container_blkio specific:
|
||||
- container_image
|
||||
- container_name
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
> docker n_containers=7i 1456926671065383978
|
||||
> docker n_images=152i 1456926671065383978
|
||||
> docker n_goroutines=36i 1456926671065383978
|
||||
> docker n_listener_events=0i 1456926671065383978
|
||||
> docker,unit=bytes memory_total=18935443456i 1456926671065383978
|
||||
> docker,unit=bytes pool_blocksize=65540i 1456926671065383978
|
||||
> docker_data,unit=bytes available=24340000000i,total=107400000000i,used=14820000000i 1456926671065383978
|
||||
> docker_metadata,unit=bytes available=2126999999i,total=2146999999i,used=20420000i 145692667106538
|
||||
> docker_container_mem,
|
||||
container_image=spotify/kafka,container_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_cpu,
|
||||
container_image=spotify/kafka,container_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_container_net,\
|
||||
container_image=spotify/kafka,container_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_container_blkio,
|
||||
container_image=spotify/kafka,container_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
```
|
||||
471
plugins/inputs/docker/docker.go
Normal file
471
plugins/inputs/docker/docker.go
Normal file
@@ -0,0 +1,471 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Docker object
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
Timeout internal.Duration
|
||||
|
||||
client DockerClient
|
||||
}
|
||||
|
||||
// DockerClient interface, useful for testing
|
||||
type DockerClient interface {
|
||||
Info(ctx context.Context) (types.Info, error)
|
||||
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
|
||||
ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// KB, MB, GB, TB, PB...human friendly
|
||||
const (
|
||||
KB = 1000
|
||||
MB = 1000 * KB
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
)
|
||||
|
||||
var (
|
||||
sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
|
||||
)
|
||||
|
||||
var sampleConfig = `
|
||||
## Docker Endpoint
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
## Timeout for docker list, info, and stats commands
|
||||
timeout = "5s"
|
||||
`
|
||||
|
||||
// Description returns input description
|
||||
func (d *Docker) Description() string {
|
||||
return "Read metrics about docker containers"
|
||||
}
|
||||
|
||||
// SampleConfig prints sampleConfig
|
||||
func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
// Gather starts stats collection
|
||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.client == nil {
|
||||
var c *client.Client
|
||||
var err error
|
||||
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"}
|
||||
if d.Endpoint == "ENV" {
|
||||
c, err = client.NewEnvClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if d.Endpoint == "" {
|
||||
c, err = client.NewClient("unix:///var/run/docker.sock", "", nil, defaultHeaders)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c, err = client.NewClient(d.Endpoint, "", nil, defaultHeaders)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
|
||||
// Get daemon info
|
||||
err := d.gatherInfo(acc)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
|
||||
// List containers
|
||||
opts := types.ContainerListOptions{}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
containers, err := d.client.ContainerList(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get container data
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
go func(c types.Container) {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
log.Printf("Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error())
|
||||
}
|
||||
}(container)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
// Init vars
|
||||
dataFields := make(map[string]interface{})
|
||||
metadataFields := make(map[string]interface{})
|
||||
now := time.Now()
|
||||
// Get info from docker daemon
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
info, err := d.client.Info(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"n_cpus": info.NCPU,
|
||||
"n_used_file_descriptors": info.NFd,
|
||||
"n_containers": info.Containers,
|
||||
"n_images": info.Images,
|
||||
"n_goroutines": info.NGoroutines,
|
||||
"n_listener_events": info.NEventsListener,
|
||||
}
|
||||
// Add metrics
|
||||
acc.AddFields("docker",
|
||||
fields,
|
||||
nil,
|
||||
now)
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"memory_total": info.MemTotal},
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
// Get storage metrics
|
||||
for _, rawData := range info.DriverStatus {
|
||||
// Try to convert string to int (bytes)
|
||||
value, err := parseSize(rawData[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1))
|
||||
if name == "pool_blocksize" {
|
||||
// pool blocksize
|
||||
acc.AddFields("docker",
|
||||
map[string]interface{}{"pool_blocksize": value},
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
} else if strings.HasPrefix(name, "data_space_") {
|
||||
// data space
|
||||
fieldName := strings.TrimPrefix(name, "data_space_")
|
||||
dataFields[fieldName] = value
|
||||
} else if strings.HasPrefix(name, "metadata_space_") {
|
||||
// metadata space
|
||||
fieldName := strings.TrimPrefix(name, "metadata_space_")
|
||||
metadataFields[fieldName] = value
|
||||
}
|
||||
}
|
||||
if len(dataFields) > 0 {
|
||||
acc.AddFields("docker_data",
|
||||
dataFields,
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
}
|
||||
if len(metadataFields) > 0 {
|
||||
acc.AddFields("docker_metadata",
|
||||
metadataFields,
|
||||
map[string]string{"unit": "bytes"},
|
||||
now)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherContainer(
|
||||
container types.Container,
|
||||
acc telegraf.Accumulator,
|
||||
) error {
|
||||
var v *types.StatsJSON
|
||||
// Parse container name
|
||||
cname := "unknown"
|
||||
if len(container.Names) > 0 {
|
||||
// Not sure what to do with other names, just take the first.
|
||||
cname = strings.TrimPrefix(container.Names[0], "/")
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"container_name": cname,
|
||||
"container_image": container.Image,
|
||||
}
|
||||
if len(d.ContainerNames) > 0 {
|
||||
if !sliceContains(cname, d.ContainerNames) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
|
||||
defer cancel()
|
||||
r, err := d.client.ContainerStats(ctx, container.ID, false)
|
||||
if err != nil {
|
||||
log.Printf("Error getting docker stats: %s\n", err.Error())
|
||||
}
|
||||
defer r.Close()
|
||||
dec := json.NewDecoder(r)
|
||||
if err = dec.Decode(&v); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error decoding: %s", err.Error())
|
||||
}
|
||||
|
||||
// Add labels to tags
|
||||
for k, label := range container.Labels {
|
||||
tags[k] = label
|
||||
}
|
||||
|
||||
gatherContainerStats(v, acc, tags, container.ID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherContainerStats(
|
||||
stat *types.StatsJSON,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
id string,
|
||||
) {
|
||||
now := stat.Read
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": stat.MemoryStats.MaxUsage,
|
||||
"usage": stat.MemoryStats.Usage,
|
||||
"fail_count": stat.MemoryStats.Failcnt,
|
||||
"limit": stat.MemoryStats.Limit,
|
||||
"total_pgmafault": stat.MemoryStats.Stats["total_pgmajfault"],
|
||||
"cache": stat.MemoryStats.Stats["cache"],
|
||||
"mapped_file": stat.MemoryStats.Stats["mapped_file"],
|
||||
"total_inactive_file": stat.MemoryStats.Stats["total_inactive_file"],
|
||||
"pgpgout": stat.MemoryStats.Stats["pagpgout"],
|
||||
"rss": stat.MemoryStats.Stats["rss"],
|
||||
"total_mapped_file": stat.MemoryStats.Stats["total_mapped_file"],
|
||||
"writeback": stat.MemoryStats.Stats["writeback"],
|
||||
"unevictable": stat.MemoryStats.Stats["unevictable"],
|
||||
"pgpgin": stat.MemoryStats.Stats["pgpgin"],
|
||||
"total_unevictable": stat.MemoryStats.Stats["total_unevictable"],
|
||||
"pgmajfault": stat.MemoryStats.Stats["pgmajfault"],
|
||||
"total_rss": stat.MemoryStats.Stats["total_rss"],
|
||||
"total_rss_huge": stat.MemoryStats.Stats["total_rss_huge"],
|
||||
"total_writeback": stat.MemoryStats.Stats["total_write_back"],
|
||||
"total_inactive_anon": stat.MemoryStats.Stats["total_inactive_anon"],
|
||||
"rss_huge": stat.MemoryStats.Stats["rss_huge"],
|
||||
"hierarchical_memory_limit": stat.MemoryStats.Stats["hierarchical_memory_limit"],
|
||||
"total_pgfault": stat.MemoryStats.Stats["total_pgfault"],
|
||||
"total_active_file": stat.MemoryStats.Stats["total_active_file"],
|
||||
"active_anon": stat.MemoryStats.Stats["active_anon"],
|
||||
"total_active_anon": stat.MemoryStats.Stats["total_active_anon"],
|
||||
"total_pgpgout": stat.MemoryStats.Stats["total_pgpgout"],
|
||||
"total_cache": stat.MemoryStats.Stats["total_cache"],
|
||||
"inactive_anon": stat.MemoryStats.Stats["inactive_anon"],
|
||||
"active_file": stat.MemoryStats.Stats["active_file"],
|
||||
"pgfault": stat.MemoryStats.Stats["pgfault"],
|
||||
"inactive_file": stat.MemoryStats.Stats["inactive_file"],
|
||||
"total_pgpgin": stat.MemoryStats.Stats["total_pgpgin"],
|
||||
"usage_percent": calculateMemPercent(stat),
|
||||
"container_id": id,
|
||||
}
|
||||
acc.AddFields("docker_container_mem", memfields, tags, now)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
|
||||
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
|
||||
"usage_system": stat.CPUStats.SystemUsage,
|
||||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
"usage_percent": calculateCPUPercent(stat),
|
||||
"container_id": id,
|
||||
}
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_container_cpu", cpufields, cputags, now)
|
||||
|
||||
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
||||
percputags := copyTags(tags)
|
||||
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
||||
acc.AddFields("docker_container_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
|
||||
}
|
||||
|
||||
for network, netstats := range stat.Networks {
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": netstats.RxDropped,
|
||||
"rx_bytes": netstats.RxBytes,
|
||||
"rx_errors": netstats.RxErrors,
|
||||
"tx_packets": netstats.TxPackets,
|
||||
"tx_dropped": netstats.TxDropped,
|
||||
"rx_packets": netstats.RxPackets,
|
||||
"tx_errors": netstats.TxErrors,
|
||||
"tx_bytes": netstats.TxBytes,
|
||||
"container_id": id,
|
||||
}
|
||||
// Create a new network tag dictionary for the "network" tag
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = network
|
||||
acc.AddFields("docker_container_net", netfields, nettags, now)
|
||||
}
|
||||
|
||||
gatherBlockIOMetrics(stat, acc, tags, now, id)
|
||||
}
|
||||
|
||||
func calculateMemPercent(stat *types.StatsJSON) float64 {
|
||||
var memPercent = 0.0
|
||||
if stat.MemoryStats.Limit > 0 {
|
||||
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
return memPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercent(stat *types.StatsJSON) float64 {
|
||||
var cpuPercent = 0.0
|
||||
// calculate the change for the cpu and system usage of the container in between readings
|
||||
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stat.CPUStats.SystemUsage) - float64(stat.PreCPUStats.SystemUsage)
|
||||
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *types.StatsJSON,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
now time.Time,
|
||||
id string,
|
||||
) {
|
||||
blkioStats := stat.BlkioStats
|
||||
// Make a map of devices to their block io stats
|
||||
deviceStatMap := make(map[string]map[string]interface{})
|
||||
|
||||
for _, metric := range blkioStats.IoServiceBytesRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoServicedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoQueuedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoServiceTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoWaitTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoMergedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IoTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
deviceStatMap[device]["io_time_recursive"] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.SectorsRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
deviceStatMap[device]["sectors_recursive"] = metric.Value
|
||||
}
|
||||
|
||||
for device, fields := range deviceStatMap {
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = device
|
||||
fields["container_id"] = id
|
||||
acc.AddFields("docker_container_blkio", fields, iotags, now)
|
||||
}
|
||||
}
|
||||
|
||||
func copyTags(in map[string]string) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func sliceContains(in string, sl []string) bool {
|
||||
for _, str := range sl {
|
||||
if str == in {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Parses the human-readable size string into the amount it represents.
|
||||
func parseSize(sizeStr string) (int64, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||
if len(matches) != 4 {
|
||||
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
|
||||
}
|
||||
|
||||
size, err := strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
uMap := map[string]int64{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
||||
unitPrefix := strings.ToLower(matches[3])
|
||||
if mul, ok := uMap[unitPrefix]; ok {
|
||||
size *= float64(mul)
|
||||
}
|
||||
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{}
|
||||
})
|
||||
}
|
||||
429
plugins/inputs/docker/docker_test.go
Normal file
429
plugins/inputs/docker/docker_test.go
Normal file
@@ -0,0 +1,429 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/engine-api/types/registry"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := testStats()
|
||||
|
||||
tags := map[string]string{
|
||||
"container_name": "redis",
|
||||
"container_image": "redis/image",
|
||||
}
|
||||
gatherContainerStats(stats, &acc, tags, "123456789")
|
||||
|
||||
// test docker_container_net measurement
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": uint64(1),
|
||||
"rx_bytes": uint64(2),
|
||||
"rx_errors": uint64(3),
|
||||
"tx_packets": uint64(4),
|
||||
"tx_dropped": uint64(1),
|
||||
"rx_packets": uint64(2),
|
||||
"tx_errors": uint64(3),
|
||||
"tx_bytes": uint64(4),
|
||||
"container_id": "123456789",
|
||||
}
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = "eth0"
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_net", netfields, nettags)
|
||||
|
||||
// test docker_blkio measurement
|
||||
blkiotags := copyTags(tags)
|
||||
blkiotags["device"] = "6:0"
|
||||
blkiofields := map[string]interface{}{
|
||||
"io_service_bytes_recursive_read": uint64(100),
|
||||
"io_serviced_recursive_write": uint64(101),
|
||||
"container_id": "123456789",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_blkio", blkiofields, blkiotags)
|
||||
|
||||
// test docker_container_mem measurement
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": uint64(1001),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(2000),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_rss": uint64(44),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_writeback": uint64(55),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
"usage_percent": float64(55.55),
|
||||
"container_id": "123456789",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags)
|
||||
|
||||
// test docker_container_cpu measurement
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": uint64(500),
|
||||
"usage_in_usermode": uint64(100),
|
||||
"usage_in_kernelmode": uint64(200),
|
||||
"usage_system": uint64(100),
|
||||
"throttling_periods": uint64(1),
|
||||
"throttling_throttled_periods": uint64(0),
|
||||
"throttling_throttled_time": uint64(0),
|
||||
"usage_percent": float64(400.0),
|
||||
"container_id": "123456789",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpufields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu0"
|
||||
cpu0fields := map[string]interface{}{
|
||||
"usage_total": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu0fields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu1"
|
||||
cpu1fields := map[string]interface{}{
|
||||
"usage_total": uint64(1002),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu1fields, cputags)
|
||||
}
|
||||
|
||||
func testStats() *types.StatsJSON {
|
||||
stats := &types.StatsJSON{}
|
||||
stats.Read = time.Now()
|
||||
stats.Networks = make(map[string]types.NetworkStats)
|
||||
|
||||
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
|
||||
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
||||
stats.CPUStats.CPUUsage.TotalUsage = 500
|
||||
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
||||
stats.CPUStats.SystemUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
||||
stats.PreCPUStats.SystemUsage = 50
|
||||
|
||||
stats.MemoryStats.Stats = make(map[string]uint64)
|
||||
stats.MemoryStats.Stats["total_pgmajfault"] = 0
|
||||
stats.MemoryStats.Stats["cache"] = 0
|
||||
stats.MemoryStats.Stats["mapped_file"] = 0
|
||||
stats.MemoryStats.Stats["total_inactive_file"] = 0
|
||||
stats.MemoryStats.Stats["pagpgout"] = 0
|
||||
stats.MemoryStats.Stats["rss"] = 0
|
||||
stats.MemoryStats.Stats["total_mapped_file"] = 0
|
||||
stats.MemoryStats.Stats["writeback"] = 0
|
||||
stats.MemoryStats.Stats["unevictable"] = 0
|
||||
stats.MemoryStats.Stats["pgpgin"] = 0
|
||||
stats.MemoryStats.Stats["total_unevictable"] = 0
|
||||
stats.MemoryStats.Stats["pgmajfault"] = 0
|
||||
stats.MemoryStats.Stats["total_rss"] = 44
|
||||
stats.MemoryStats.Stats["total_rss_huge"] = 444
|
||||
stats.MemoryStats.Stats["total_write_back"] = 55
|
||||
stats.MemoryStats.Stats["total_inactive_anon"] = 0
|
||||
stats.MemoryStats.Stats["rss_huge"] = 0
|
||||
stats.MemoryStats.Stats["hierarchical_memory_limit"] = 0
|
||||
stats.MemoryStats.Stats["total_pgfault"] = 0
|
||||
stats.MemoryStats.Stats["total_active_file"] = 0
|
||||
stats.MemoryStats.Stats["active_anon"] = 0
|
||||
stats.MemoryStats.Stats["total_active_anon"] = 0
|
||||
stats.MemoryStats.Stats["total_pgpgout"] = 0
|
||||
stats.MemoryStats.Stats["total_cache"] = 0
|
||||
stats.MemoryStats.Stats["inactive_anon"] = 0
|
||||
stats.MemoryStats.Stats["active_file"] = 1
|
||||
stats.MemoryStats.Stats["pgfault"] = 2
|
||||
stats.MemoryStats.Stats["inactive_file"] = 3
|
||||
stats.MemoryStats.Stats["total_pgpgin"] = 4
|
||||
|
||||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 2000
|
||||
|
||||
stats.Networks["eth0"] = types.NetworkStats{
|
||||
RxDropped: 1,
|
||||
RxBytes: 2,
|
||||
RxErrors: 3,
|
||||
TxPackets: 4,
|
||||
TxDropped: 1,
|
||||
RxPackets: 2,
|
||||
TxErrors: 3,
|
||||
TxBytes: 4,
|
||||
}
|
||||
|
||||
sbr := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "read",
|
||||
Value: 100,
|
||||
}
|
||||
sr := types.BlkioStatEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "write",
|
||||
Value: 101,
|
||||
}
|
||||
|
||||
stats.BlkioStats.IoServiceBytesRecursive = append(
|
||||
stats.BlkioStats.IoServiceBytesRecursive, sbr)
|
||||
stats.BlkioStats.IoServicedRecursive = append(
|
||||
stats.BlkioStats.IoServicedRecursive, sr)
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
type FakeDockerClient struct {
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
||||
env := types.Info{
|
||||
Containers: 108,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
ID: "5WQQ:TFWR:FDNG:OKQ3:37Y4:FJWG:QIKK:623T:R3ME:QTKB:A7F7:OLHD",
|
||||
Debug: false,
|
||||
LoggingDriver: "json-file",
|
||||
KernelVersion: "4.3.0-1-amd64",
|
||||
IndexServerAddress: "https://index.docker.io/v1/",
|
||||
MemTotal: 3840757760,
|
||||
Images: 199,
|
||||
CPUCfsQuota: true,
|
||||
Name: "absol",
|
||||
SwapLimit: false,
|
||||
IPv4Forwarding: true,
|
||||
ExecutionDriver: "native-0.2",
|
||||
ExperimentalBuild: false,
|
||||
CPUCfsPeriod: true,
|
||||
RegistryConfig: ®istry.ServiceConfig{
|
||||
IndexConfigs: map[string]*registry.IndexInfo{
|
||||
"docker.io": {
|
||||
Name: "docker.io",
|
||||
Mirrors: []string{},
|
||||
Official: true,
|
||||
Secure: true,
|
||||
},
|
||||
}, InsecureRegistryCIDRs: []*registry.NetIPNet{{IP: []byte{127, 0, 0, 0}, Mask: []byte{255, 0, 0, 0}}}, Mirrors: []string{}},
|
||||
OperatingSystem: "Linux Mint LMDE (containerized)",
|
||||
BridgeNfIptables: true,
|
||||
HTTPSProxy: "",
|
||||
Labels: []string{},
|
||||
MemoryLimit: false,
|
||||
DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}},
|
||||
NFd: 19,
|
||||
HTTPProxy: "",
|
||||
Driver: "devicemapper",
|
||||
NGoroutines: 39,
|
||||
NCPU: 4,
|
||||
DockerRootDir: "/var/lib/docker",
|
||||
NoProxy: "",
|
||||
BridgeNfIP6tables: true,
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerList(octx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
container1 := types.Container{
|
||||
ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb",
|
||||
Names: []string{"/etcd"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd0 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941930,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4001,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2380,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2379,
|
||||
PublicPort: 2379,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
container2 := types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
Ports: []types.Port{
|
||||
types.Port{
|
||||
PrivatePort: 7002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 4002,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2381,
|
||||
PublicPort: 0,
|
||||
Type: "tcp",
|
||||
},
|
||||
types.Port{
|
||||
PrivatePort: 2382,
|
||||
PublicPort: 2382,
|
||||
Type: "tcp",
|
||||
IP: "0.0.0.0",
|
||||
},
|
||||
},
|
||||
SizeRw: 0,
|
||||
SizeRootFs: 0,
|
||||
}
|
||||
|
||||
containers := []types.Container{container1, container2}
|
||||
return containers, nil
|
||||
|
||||
//#{e6a96c84ca91a5258b7cb752579fb68826b68b49ff957487695cd4d13c343b44 titilambert/snmpsim /bin/sh -c 'snmpsimd --agent-udpv4-endpoint=0.0.0.0:31161 --process-user=root --process-group=user' 1455724831 Up 4 hours [{31161 31161 udp 0.0.0.0}] 0 0 [/snmp] map[]}]2016/02/24 01:05:01 Gathered metrics, (3s interval), from 1 inputs in 1.233836656s
|
||||
}
|
||||
|
||||
func (d FakeDockerClient) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
|
||||
var stat io.ReadCloser
|
||||
jsonStat := `{"read":"2016-02-24T11:42:27.472459608-05:00","memory_stats":{"stats":{},"limit":18935443456},"blkio_stats":{"io_service_bytes_recursive":[{"major":252,"minor":1,"op":"Read","value":753664},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":753664},{"major":252,"minor":1,"op":"Total","value":753664}],"io_serviced_recursive":[{"major":252,"minor":1,"op":"Read","value":26},{"major":252,"minor":1,"op":"Write"},{"major":252,"minor":1,"op":"Sync"},{"major":252,"minor":1,"op":"Async","value":26},{"major":252,"minor":1,"op":"Total","value":26}]},"cpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052607520000000,"throttling_data":{}},"precpu_stats":{"cpu_usage":{"percpu_usage":[17871,4959158,1646137,1231652,11829401,244656,369972,0],"usage_in_usermode":10000000,"total_usage":20298847},"system_cpu_usage":24052599550000000,"throttling_data":{}}}`
|
||||
stat = ioutil.NopCloser(strings.NewReader(jsonStat))
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func TestDockerGatherInfo(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
client := FakeDockerClient{}
|
||||
d := Docker{client: client}
|
||||
|
||||
err := d.Gather(&acc)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker",
|
||||
map[string]interface{}{
|
||||
"n_listener_events": int(0),
|
||||
"n_cpus": int(4),
|
||||
"n_used_file_descriptors": int(19),
|
||||
"n_containers": int(108),
|
||||
"n_images": int(199),
|
||||
"n_goroutines": int(39),
|
||||
},
|
||||
map[string]string{},
|
||||
)
|
||||
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_data",
|
||||
map[string]interface{}{
|
||||
"used": int64(17300000000),
|
||||
"total": int64(107400000000),
|
||||
"available": int64(36530000000),
|
||||
},
|
||||
map[string]string{
|
||||
"unit": "bytes",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_container_cpu",
|
||||
map[string]interface{}{
|
||||
"usage_total": uint64(1231652),
|
||||
},
|
||||
map[string]string{
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io/coreos/etcd:v2.2.2",
|
||||
"cpu": "cpu3",
|
||||
},
|
||||
)
|
||||
acc.AssertContainsTaggedFields(t,
|
||||
"docker_container_mem",
|
||||
map[string]interface{}{
|
||||
"total_pgpgout": uint64(0),
|
||||
"usage_percent": float64(0),
|
||||
"rss": uint64(0),
|
||||
"total_writeback": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_pgmafault": uint64(0),
|
||||
"total_rss": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"active_file": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"total_rss_huge": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_pgpgin": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"inactive_file": uint64(0),
|
||||
"max_usage": uint64(0),
|
||||
"fail_count": uint64(0),
|
||||
"pgfault": uint64(0),
|
||||
"usage": uint64(0),
|
||||
"limit": uint64(18935443456),
|
||||
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
},
|
||||
map[string]string{
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io/coreos/etcd:v2.2.2",
|
||||
},
|
||||
)
|
||||
|
||||
//fmt.Print(info)
|
||||
}
|
||||
74
plugins/inputs/dovecot/README.md
Normal file
74
plugins/inputs/dovecot/README.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Dovecot Input Plugin
|
||||
|
||||
The dovecot plugin uses the dovecot Stats protocol to gather metrics on configured
|
||||
domains. You can read Dovecot's documentation
|
||||
[here](http://wiki2.dovecot.org/Statistics)
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read metrics about dovecot servers
|
||||
[[inputs.dovecot]]
|
||||
## specify dovecot servers via an address:port list
|
||||
## e.g.
|
||||
## localhost:24242
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost:24242"]
|
||||
## Type is one of "user", "domain", "ip", or "global"
|
||||
type = "global"
|
||||
## Wildcard matches like "*.com". An empty string "" is same as "*"
|
||||
## If type = "ip" filters should be <IP/network>
|
||||
filters = [""]
|
||||
```
|
||||
|
||||
|
||||
### Tags:
|
||||
server: hostname
|
||||
type: query type
|
||||
ip: ip addr
|
||||
user: username
|
||||
domain: domain name
|
||||
|
||||
|
||||
### Fields:
|
||||
|
||||
reset_timestamp time.Time
|
||||
last_update time.Time
|
||||
num_logins int64
|
||||
num_cmds int64
|
||||
num_connected_sessions int64 ## not in <user> type
|
||||
user_cpu float32
|
||||
sys_cpu float32
|
||||
clock_time float64
|
||||
min_faults int64
|
||||
maj_faults int64
|
||||
vol_cs int64
|
||||
invol_cs int64
|
||||
disk_input int64
|
||||
disk_output int64
|
||||
read_count int64
|
||||
read_bytes int64
|
||||
write_count int64
|
||||
write_bytes int64
|
||||
mail_lookup_path int64
|
||||
mail_lookup_attr int64
|
||||
mail_read_count int64
|
||||
mail_read_bytes int64
|
||||
mail_cache_hits int64
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
telegraf -config t.cfg -input-filter dovecot -test
|
||||
* Plugin: dovecot, Collection 1
|
||||
> dovecot,ip=192.168.0.1,server=dovecot-1.domain.test,type=ip clock_time=0,disk_input=0i,disk_output=0i,invol_cs=0i,last_update="2016-04-08 10:59:47.000208479 +0200 CEST",mail_cache_hits=0i,mail_lookup_attr=0i,mail_lookup_path=0i,mail_read_bytes=0i,mail_read_count=0i,maj_faults=0i,min_faults=0i,num_cmds=12i,num_connected_sessions=0i,num_logins=6i,read_bytes=0i,read_count=0i,reset_timestamp="2016-04-08 10:33:34 +0200 CEST",sys_cpu=0,user_cpu=0,vol_cs=0i,write_bytes=0i,write_count=0i 1460106251633824223
|
||||
* Plugin: dovecot, Collection 1
|
||||
> dovecot,server=dovecot-1.domain.test,type=user,user=user-1@domain.test clock_time=0.00006,disk_input=405504i,disk_output=77824i,invol_cs=67i,last_update="2016-04-08 11:02:55.000111634 +0200 CEST",mail_cache_hits=26i,mail_lookup_attr=0i,mail_lookup_path=6i,mail_read_bytes=86233i,mail_read_count=5i,maj_faults=0i,min_faults=975i,num_cmds=41i,num_logins=3i,read_bytes=368833i,read_count=394i,reset_timestamp="2016-04-08 11:01:32 +0200 CEST",sys_cpu=0.008,user_cpu=0.004,vol_cs=323i,write_bytes=105086i,write_count=176i 1460106256637049167
|
||||
* Plugin: dovecot, Collection 1
|
||||
> dovecot,domain=domain.test,server=dovecot-1.domain.test,type=domain clock_time=100896189179847.7,disk_input=6467588263936i,disk_output=17933680439296i,invol_cs=1194808498i,last_update="2016-04-08 11:04:08.000377367 +0200 CEST",mail_cache_hits=46455781i,mail_lookup_attr=0i,mail_lookup_path=571490i,mail_read_bytes=79287033067i,mail_read_count=491243i,maj_faults=16992i,min_faults=1278442541i,num_cmds=606005i,num_connected_sessions=6597i,num_logins=166381i,read_bytes=30231409780721i,read_count=1624912080i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=156440.372,user_cpu=216676.476,vol_cs=2749291157i,write_bytes=17097106707594i,write_count=944448998i 1460106261639672622
|
||||
* Plugin: dovecot, Collection 1
|
||||
> dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907
|
||||
```
|
||||
193
plugins/inputs/dovecot/dovecot.go
Normal file
193
plugins/inputs/dovecot/dovecot.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package dovecot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
// "log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Dovecot struct {
|
||||
Type string
|
||||
Filters []string
|
||||
Servers []string
|
||||
}
|
||||
|
||||
func (d *Dovecot) Description() string {
|
||||
return "Read statistics from one or many dovecot servers"
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## specify dovecot servers via an address:port list
|
||||
## e.g.
|
||||
## localhost:24242
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost:24242"]
|
||||
## Type is one of "user", "domain", "ip", or "global"
|
||||
type = "global"
|
||||
## Wildcard matches like "*.com". An empty string "" is same as "*"
|
||||
## If type = "ip" filters should be <IP/network>
|
||||
filters = [""]
|
||||
`
|
||||
|
||||
var defaultTimeout = time.Second * time.Duration(5)
|
||||
|
||||
var validQuery = map[string]bool{
|
||||
"user": true, "domain": true, "global": true, "ip": true,
|
||||
}
|
||||
|
||||
func (d *Dovecot) SampleConfig() string { return sampleConfig }
|
||||
|
||||
const defaultPort = "24242"
|
||||
|
||||
// Reads stats from all configured servers.
|
||||
func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if !validQuery[d.Type] {
|
||||
return fmt.Errorf("Error: %s is not a valid query type\n",
|
||||
d.Type)
|
||||
}
|
||||
|
||||
if len(d.Servers) == 0 {
|
||||
d.Servers = append(d.Servers, "127.0.0.1:24242")
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
if len(d.Filters) <= 0 {
|
||||
d.Filters = append(d.Filters, "")
|
||||
}
|
||||
|
||||
for _, serv := range d.Servers {
|
||||
for _, filter := range d.Filters {
|
||||
wg.Add(1)
|
||||
go func(serv string, filter string) {
|
||||
defer wg.Done()
|
||||
outerr = d.gatherServer(serv, acc, d.Type, filter)
|
||||
}(serv, filter)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error: %s on url %s\n", err, addr)
|
||||
}
|
||||
|
||||
c, err := net.DialTimeout("tcp", addr, defaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
// Extend connection
|
||||
c.SetDeadline(time.Now().Add(defaultTimeout))
|
||||
|
||||
msg := fmt.Sprintf("EXPORT\t%s", qtype)
|
||||
if len(filter) > 0 {
|
||||
msg += fmt.Sprintf("\t%s=%s", qtype, filter)
|
||||
}
|
||||
msg += "\n"
|
||||
|
||||
c.Write([]byte(msg))
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, c)
|
||||
|
||||
host, _, _ := net.SplitHostPort(addr)
|
||||
|
||||
return gatherStats(&buf, acc, host, qtype)
|
||||
}
|
||||
|
||||
func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, host string, qtype string) error {
|
||||
|
||||
lines := strings.Split(buf.String(), "\n")
|
||||
head := strings.Split(lines[0], "\t")
|
||||
vals := lines[1:]
|
||||
|
||||
for i := range vals {
|
||||
if vals[i] == "" {
|
||||
continue
|
||||
}
|
||||
val := strings.Split(vals[i], "\t")
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"server": host, "type": qtype}
|
||||
|
||||
if qtype != "global" {
|
||||
tags[qtype] = val[0]
|
||||
}
|
||||
|
||||
for n := range val {
|
||||
switch head[n] {
|
||||
case qtype:
|
||||
continue
|
||||
case "user_cpu", "sys_cpu", "clock_time":
|
||||
fields[head[n]] = secParser(val[n])
|
||||
case "reset_timestamp", "last_update":
|
||||
fields[head[n]] = timeParser(val[n])
|
||||
default:
|
||||
ival, _ := splitSec(val[n])
|
||||
fields[head[n]] = ival
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("dovecot", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func splitSec(tm string) (sec int64, msec int64) {
|
||||
var err error
|
||||
ss := strings.Split(tm, ".")
|
||||
|
||||
sec, err = strconv.ParseInt(ss[0], 10, 64)
|
||||
if err != nil {
|
||||
sec = 0
|
||||
}
|
||||
if len(ss) > 1 {
|
||||
msec, err = strconv.ParseInt(ss[1], 10, 64)
|
||||
if err != nil {
|
||||
msec = 0
|
||||
}
|
||||
} else {
|
||||
msec = 0
|
||||
}
|
||||
|
||||
return sec, msec
|
||||
}
|
||||
|
||||
func timeParser(tm string) time.Time {
|
||||
|
||||
sec, msec := splitSec(tm)
|
||||
return time.Unix(sec, msec)
|
||||
}
|
||||
|
||||
func secParser(tm string) float64 {
|
||||
|
||||
sec, msec := splitSec(tm)
|
||||
return float64(sec) + (float64(msec) / 1000000.0)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dovecot", func() telegraf.Input {
|
||||
return &Dovecot{}
|
||||
})
|
||||
}
|
||||
119
plugins/inputs/dovecot/dovecot_test.go
Normal file
119
plugins/inputs/dovecot/dovecot_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package dovecot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDovecot(t *testing.T) {
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"reset_timestamp": time.Unix(1453969886, 0),
|
||||
"last_update": time.Unix(1454603963, 39864),
|
||||
"num_logins": int64(7503897),
|
||||
"num_cmds": int64(52595715),
|
||||
"num_connected_sessions": int64(1204),
|
||||
"user_cpu": 1.00831175372e+08,
|
||||
"sys_cpu": 8.3849071112e+07,
|
||||
"clock_time": 4.3260019315281835e+15,
|
||||
"min_faults": int64(763950011),
|
||||
"maj_faults": int64(1112443),
|
||||
"vol_cs": int64(4120386897),
|
||||
"invol_cs": int64(3685239306),
|
||||
"disk_input": int64(41679480946688),
|
||||
"disk_output": int64(1819070669176832),
|
||||
"read_count": int64(2368906465),
|
||||
"read_bytes": int64(2957928122981169),
|
||||
"write_count": int64(3545389615),
|
||||
"write_bytes": int64(1666822498251286),
|
||||
"mail_lookup_path": int64(24396105),
|
||||
"mail_lookup_attr": int64(302845),
|
||||
"mail_read_count": int64(20155768),
|
||||
"mail_read_bytes": int64(669946617705),
|
||||
"mail_cache_hits": int64(1557255080),
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
// Test type=global
|
||||
tags := map[string]string{"server": "dovecot.test", "type": "global"}
|
||||
buf := bytes.NewBufferString(sampleGlobal)
|
||||
|
||||
err := gatherStats(buf, &acc, "dovecot.test", "global")
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "dovecot", fields, tags)
|
||||
|
||||
// Test type=domain
|
||||
tags = map[string]string{"server": "dovecot.test", "type": "domain", "domain": "domain.test"}
|
||||
buf = bytes.NewBufferString(sampleDomain)
|
||||
|
||||
err = gatherStats(buf, &acc, "dovecot.test", "domain")
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "dovecot", fields, tags)
|
||||
|
||||
// Test type=ip
|
||||
tags = map[string]string{"server": "dovecot.test", "type": "ip", "ip": "192.168.0.100"}
|
||||
buf = bytes.NewBufferString(sampleIp)
|
||||
|
||||
err = gatherStats(buf, &acc, "dovecot.test", "ip")
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "dovecot", fields, tags)
|
||||
|
||||
// Test type=user
|
||||
fields = map[string]interface{}{
|
||||
"reset_timestamp": time.Unix(1453969886, 0),
|
||||
"last_update": time.Unix(1454603963, 39864),
|
||||
"num_logins": int64(7503897),
|
||||
"num_cmds": int64(52595715),
|
||||
"user_cpu": 1.00831175372e+08,
|
||||
"sys_cpu": 8.3849071112e+07,
|
||||
"clock_time": 4.3260019315281835e+15,
|
||||
"min_faults": int64(763950011),
|
||||
"maj_faults": int64(1112443),
|
||||
"vol_cs": int64(4120386897),
|
||||
"invol_cs": int64(3685239306),
|
||||
"disk_input": int64(41679480946688),
|
||||
"disk_output": int64(1819070669176832),
|
||||
"read_count": int64(2368906465),
|
||||
"read_bytes": int64(2957928122981169),
|
||||
"write_count": int64(3545389615),
|
||||
"write_bytes": int64(1666822498251286),
|
||||
"mail_lookup_path": int64(24396105),
|
||||
"mail_lookup_attr": int64(302845),
|
||||
"mail_read_count": int64(20155768),
|
||||
"mail_read_bytes": int64(669946617705),
|
||||
"mail_cache_hits": int64(1557255080),
|
||||
}
|
||||
|
||||
tags = map[string]string{"server": "dovecot.test", "type": "user", "user": "user.1@domain.test"}
|
||||
buf = bytes.NewBufferString(sampleUser)
|
||||
|
||||
err = gatherStats(buf, &acc, "dovecot.test", "user")
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "dovecot", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080`
|
||||
|
||||
const sampleDomain = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
domain.test 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080`
|
||||
|
||||
const sampleIp = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
192.168.0.100 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080`
|
||||
|
||||
const sampleUser = `user reset_timestamp last_update num_logins num_cmds user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
user.1@domain.test 1453969886 1454603963.039864 7503897 52595715 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080`
|
||||
320
plugins/inputs/elasticsearch/README.md
Normal file
320
plugins/inputs/elasticsearch/README.md
Normal file
@@ -0,0 +1,320 @@
|
||||
# Elasticsearch plugin
|
||||
|
||||
#### Plugin arguments:
|
||||
- **servers** []string: list of one or more Elasticsearch servers
|
||||
- **local** boolean: If false, it will read the indices stats from all nodes
|
||||
- **cluster_health** boolean: If true, it will also obtain cluster level stats
|
||||
|
||||
#### Description
|
||||
|
||||
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
||||
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
||||
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
[elasticsearch]
|
||||
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
local = true
|
||||
|
||||
cluster_health = true
|
||||
```
|
||||
|
||||
# Measurements
|
||||
#### cluster measurements (utilizes fields instead of single values):
|
||||
|
||||
contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_cluster_health
|
||||
|
||||
contains `status`, `number_of_shards`, `number_of_replicas`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_indices
|
||||
|
||||
#### node measurements:
|
||||
|
||||
field data circuit breaker measurement names:
|
||||
- elasticsearch_breakers_fielddata_estimated_size_in_bytes value=0
|
||||
- elasticsearch_breakers_fielddata_overhead value=1.03
|
||||
- elasticsearch_breakers_fielddata_tripped value=0
|
||||
- elasticsearch_breakers_fielddata_limit_size_in_bytes value=623326003
|
||||
- elasticsearch_breakers_request_estimated_size_in_bytes value=0
|
||||
- elasticsearch_breakers_request_overhead value=1.0
|
||||
- elasticsearch_breakers_request_tripped value=0
|
||||
- elasticsearch_breakers_request_limit_size_in_bytes value=415550668
|
||||
- elasticsearch_breakers_parent_overhead value=1.0
|
||||
- elasticsearch_breakers_parent_tripped value=0
|
||||
- elasticsearch_breakers_parent_limit_size_in_bytes value=727213670
|
||||
- elasticsearch_breakers_parent_estimated_size_in_bytes value=0
|
||||
|
||||
File system information, data path, free disk space, read/write measurement names:
|
||||
- elasticsearch_fs_timestamp value=1436460392946
|
||||
- elasticsearch_fs_total_free_in_bytes value=16909316096
|
||||
- elasticsearch_fs_total_available_in_bytes value=15894814720
|
||||
- elasticsearch_fs_total_total_in_bytes value=19507089408
|
||||
|
||||
indices size, document count, indexing and deletion times, search times,
|
||||
field cache size, merges and flushes measurement names:
|
||||
- elasticsearch_indices_id_cache_memory_size_in_bytes value=0
|
||||
- elasticsearch_indices_completion_size_in_bytes value=0
|
||||
- elasticsearch_indices_suggest_total value=0
|
||||
- elasticsearch_indices_suggest_time_in_millis value=0
|
||||
- elasticsearch_indices_suggest_current value=0
|
||||
- elasticsearch_indices_query_cache_memory_size_in_bytes value=0
|
||||
- elasticsearch_indices_query_cache_evictions value=0
|
||||
- elasticsearch_indices_query_cache_hit_count value=0
|
||||
- elasticsearch_indices_query_cache_miss_count value=0
|
||||
- elasticsearch_indices_store_size_in_bytes value=37715234
|
||||
- elasticsearch_indices_store_throttle_time_in_millis value=215
|
||||
- elasticsearch_indices_merges_current_docs value=0
|
||||
- elasticsearch_indices_merges_current_size_in_bytes value=0
|
||||
- elasticsearch_indices_merges_total value=133
|
||||
- elasticsearch_indices_merges_total_time_in_millis value=21060
|
||||
- elasticsearch_indices_merges_total_docs value=203672
|
||||
- elasticsearch_indices_merges_total_size_in_bytes value=142900226
|
||||
- elasticsearch_indices_merges_current value=0
|
||||
- elasticsearch_indices_filter_cache_memory_size_in_bytes value=7384
|
||||
- elasticsearch_indices_filter_cache_evictions value=0
|
||||
- elasticsearch_indices_indexing_index_total value=84790
|
||||
- elasticsearch_indices_indexing_index_time_in_millis value=29680
|
||||
- elasticsearch_indices_indexing_index_current value=0
|
||||
- elasticsearch_indices_indexing_noop_update_total value=0
|
||||
- elasticsearch_indices_indexing_throttle_time_in_millis value=0
|
||||
- elasticsearch_indices_indexing_delete_tota value=13879
|
||||
- elasticsearch_indices_indexing_delete_time_in_millis value=1139
|
||||
- elasticsearch_indices_indexing_delete_current value=0
|
||||
- elasticsearch_indices_get_exists_time_in_millis value=0
|
||||
- elasticsearch_indices_get_missing_total value=1
|
||||
- elasticsearch_indices_get_missing_time_in_millis value=2
|
||||
- elasticsearch_indices_get_current value=0
|
||||
- elasticsearch_indices_get_total value=1
|
||||
- elasticsearch_indices_get_time_in_millis value=2
|
||||
- elasticsearch_indices_get_exists_total value=0
|
||||
- elasticsearch_indices_refresh_total value=1076
|
||||
- elasticsearch_indices_refresh_total_time_in_millis value=20078
|
||||
- elasticsearch_indices_percolate_current value=0
|
||||
- elasticsearch_indices_percolate_memory_size_in_bytes value=-1
|
||||
- elasticsearch_indices_percolate_queries value=0
|
||||
- elasticsearch_indices_percolate_total value=0
|
||||
- elasticsearch_indices_percolate_time_in_millis value=0
|
||||
- elasticsearch_indices_translog_operations value=17702
|
||||
- elasticsearch_indices_translog_size_in_bytes value=17
|
||||
- elasticsearch_indices_recovery_current_as_source value=0
|
||||
- elasticsearch_indices_recovery_current_as_target value=0
|
||||
- elasticsearch_indices_recovery_throttle_time_in_millis value=0
|
||||
- elasticsearch_indices_docs_count value=29652
|
||||
- elasticsearch_indices_docs_deleted value=5229
|
||||
- elasticsearch_indices_flush_total_time_in_millis value=2401
|
||||
- elasticsearch_indices_flush_total value=115
|
||||
- elasticsearch_indices_fielddata_memory_size_in_bytes value=12996
|
||||
- elasticsearch_indices_fielddata_evictions value=0
|
||||
- elasticsearch_indices_search_fetch_current value=0
|
||||
- elasticsearch_indices_search_open_contexts value=0
|
||||
- elasticsearch_indices_search_query_total value=1452
|
||||
- elasticsearch_indices_search_query_time_in_millis value=5695
|
||||
- elasticsearch_indices_search_query_current value=0
|
||||
- elasticsearch_indices_search_fetch_total value=414
|
||||
- elasticsearch_indices_search_fetch_time_in_millis value=146
|
||||
- elasticsearch_indices_warmer_current value=0
|
||||
- elasticsearch_indices_warmer_total value=2319
|
||||
- elasticsearch_indices_warmer_total_time_in_millis value=448
|
||||
- elasticsearch_indices_segments_count value=134
|
||||
- elasticsearch_indices_segments_memory_in_bytes value=1285212
|
||||
- elasticsearch_indices_segments_index_writer_memory_in_bytes value=0
|
||||
- elasticsearch_indices_segments_index_writer_max_memory_in_bytes value=172368955
|
||||
- elasticsearch_indices_segments_version_map_memory_in_bytes value=611844
|
||||
- elasticsearch_indices_segments_fixed_bit_set_memory_in_bytes value=0
|
||||
|
||||
HTTP connection measurement names:
|
||||
- elasticsearch_http_current_open value=3
|
||||
- elasticsearch_http_total_opened value=3
|
||||
|
||||
JVM stats, memory pool information, garbage collection, buffer pools measurement names:
|
||||
- elasticsearch_jvm_timestamp value=1436460392945
|
||||
- elasticsearch_jvm_uptime_in_millis value=202245
|
||||
- elasticsearch_jvm_mem_non_heap_used_in_bytes value=39634576
|
||||
- elasticsearch_jvm_mem_non_heap_committed_in_bytes value=40841216
|
||||
- elasticsearch_jvm_mem_pools_young_max_in_bytes value=279183360
|
||||
- elasticsearch_jvm_mem_pools_young_peak_used_in_bytes value=71630848
|
||||
- elasticsearch_jvm_mem_pools_young_peak_max_in_bytes value=279183360
|
||||
- elasticsearch_jvm_mem_pools_young_used_in_bytes value=32685760
|
||||
- elasticsearch_jvm_mem_pools_survivor_peak_used_in_bytes value=8912888
|
||||
- elasticsearch_jvm_mem_pools_survivor_peak_max_in_bytes value=34865152
|
||||
- elasticsearch_jvm_mem_pools_survivor_used_in_bytes value=8912880
|
||||
- elasticsearch_jvm_mem_pools_survivor_max_in_bytes value=34865152
|
||||
- elasticsearch_jvm_mem_pools_old_peak_max_in_bytes value=724828160
|
||||
- elasticsearch_jvm_mem_pools_old_used_in_bytes value=11110928
|
||||
- elasticsearch_jvm_mem_pools_old_max_in_bytes value=724828160
|
||||
- elasticsearch_jvm_mem_pools_old_peak_used_in_bytes value=14354608
|
||||
- elasticsearch_jvm_mem_heap_used_in_bytes value=52709568
|
||||
- elasticsearch_jvm_mem_heap_used_percent value=5
|
||||
- elasticsearch_jvm_mem_heap_committed_in_bytes value=259522560
|
||||
- elasticsearch_jvm_mem_heap_max_in_bytes value=1038876672
|
||||
- elasticsearch_jvm_threads_peak_count value=45
|
||||
- elasticsearch_jvm_threads_count value=44
|
||||
- elasticsearch_jvm_gc_collectors_young_collection_count value=2
|
||||
- elasticsearch_jvm_gc_collectors_young_collection_time_in_millis value=98
|
||||
- elasticsearch_jvm_gc_collectors_old_collection_count value=1
|
||||
- elasticsearch_jvm_gc_collectors_old_collection_time_in_millis value=24
|
||||
- elasticsearch_jvm_buffer_pools_direct_count value=40
|
||||
- elasticsearch_jvm_buffer_pools_direct_used_in_bytes value=6304239
|
||||
- elasticsearch_jvm_buffer_pools_direct_total_capacity_in_bytes value=6304239
|
||||
- elasticsearch_jvm_buffer_pools_mapped_count value=0
|
||||
- elasticsearch_jvm_buffer_pools_mapped_used_in_bytes value=0
|
||||
- elasticsearch_jvm_buffer_pools_mapped_total_capacity_in_bytes value=0
|
||||
|
||||
TCP information measurement names:
|
||||
- elasticsearch_network_tcp_in_errs value=0
|
||||
- elasticsearch_network_tcp_passive_opens value=16
|
||||
- elasticsearch_network_tcp_curr_estab value=29
|
||||
- elasticsearch_network_tcp_in_segs value=113
|
||||
- elasticsearch_network_tcp_out_segs value=97
|
||||
- elasticsearch_network_tcp_retrans_segs value=0
|
||||
- elasticsearch_network_tcp_attempt_fails value=0
|
||||
- elasticsearch_network_tcp_active_opens value=13
|
||||
- elasticsearch_network_tcp_estab_resets value=0
|
||||
- elasticsearch_network_tcp_out_rsts value=0
|
||||
|
||||
Operating system stats, load average, cpu, mem, swap measurement names:
|
||||
- elasticsearch_os_swap_used_in_bytes value=0
|
||||
- elasticsearch_os_swap_free_in_bytes value=487997440
|
||||
- elasticsearch_os_timestamp value=1436460392944
|
||||
- elasticsearch_os_uptime_in_millis value=25092
|
||||
- elasticsearch_os_cpu_sys value=0
|
||||
- elasticsearch_os_cpu_user value=0
|
||||
- elasticsearch_os_cpu_idle value=99
|
||||
- elasticsearch_os_cpu_usage value=0
|
||||
- elasticsearch_os_cpu_stolen value=0
|
||||
- elasticsearch_os_mem_free_percent value=74
|
||||
- elasticsearch_os_mem_used_percent value=25
|
||||
- elasticsearch_os_mem_actual_free_in_bytes value=1565470720
|
||||
- elasticsearch_os_mem_actual_used_in_bytes value=534159360
|
||||
- elasticsearch_os_mem_free_in_bytes value=477761536
|
||||
- elasticsearch_os_mem_used_in_bytes value=1621868544
|
||||
|
||||
Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
|
||||
- elasticsearch_process_mem_resident_in_bytes value=246382592
|
||||
- elasticsearch_process_mem_share_in_bytes value=18747392
|
||||
- elasticsearch_process_mem_total_virtual_in_bytes value=4747890688
|
||||
- elasticsearch_process_timestamp value=1436460392945
|
||||
- elasticsearch_process_open_file_descriptors value=160
|
||||
- elasticsearch_process_cpu_total_in_millis value=15480
|
||||
- elasticsearch_process_cpu_percent value=2
|
||||
- elasticsearch_process_cpu_sys_in_millis value=1870
|
||||
- elasticsearch_process_cpu_user_in_millis value=13610
|
||||
|
||||
Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
|
||||
- elasticsearch_thread_pool_merge_threads value=6
|
||||
- elasticsearch_thread_pool_merge_queue value=4
|
||||
- elasticsearch_thread_pool_merge_active value=5
|
||||
- elasticsearch_thread_pool_merge_rejected value=2
|
||||
- elasticsearch_thread_pool_merge_largest value=5
|
||||
- elasticsearch_thread_pool_merge_completed value=1
|
||||
- elasticsearch_thread_pool_bulk_threads value=4
|
||||
- elasticsearch_thread_pool_bulk_queue value=5
|
||||
- elasticsearch_thread_pool_bulk_active value=7
|
||||
- elasticsearch_thread_pool_bulk_rejected value=3
|
||||
- elasticsearch_thread_pool_bulk_largest value=1
|
||||
- elasticsearch_thread_pool_bulk_completed value=4
|
||||
- elasticsearch_thread_pool_warmer_threads value=2
|
||||
- elasticsearch_thread_pool_warmer_queue value=7
|
||||
- elasticsearch_thread_pool_warmer_active value=3
|
||||
- elasticsearch_thread_pool_warmer_rejected value=2
|
||||
- elasticsearch_thread_pool_warmer_largest value=3
|
||||
- elasticsearch_thread_pool_warmer_completed value=1
|
||||
- elasticsearch_thread_pool_get_largest value=2
|
||||
- elasticsearch_thread_pool_get_completed value=1
|
||||
- elasticsearch_thread_pool_get_threads value=1
|
||||
- elasticsearch_thread_pool_get_queue value=8
|
||||
- elasticsearch_thread_pool_get_active value=4
|
||||
- elasticsearch_thread_pool_get_rejected value=3
|
||||
- elasticsearch_thread_pool_index_threads value=6
|
||||
- elasticsearch_thread_pool_index_queue value=8
|
||||
- elasticsearch_thread_pool_index_active value=4
|
||||
- elasticsearch_thread_pool_index_rejected value=2
|
||||
- elasticsearch_thread_pool_index_largest value=3
|
||||
- elasticsearch_thread_pool_index_completed value=6
|
||||
- elasticsearch_thread_pool_suggest_threads value=2
|
||||
- elasticsearch_thread_pool_suggest_queue value=7
|
||||
- elasticsearch_thread_pool_suggest_active value=2
|
||||
- elasticsearch_thread_pool_suggest_rejected value=1
|
||||
- elasticsearch_thread_pool_suggest_largest value=8
|
||||
- elasticsearch_thread_pool_suggest_completed value=3
|
||||
- elasticsearch_thread_pool_fetch_shard_store_queue value=7
|
||||
- elasticsearch_thread_pool_fetch_shard_store_active value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_store_rejected value=2
|
||||
- elasticsearch_thread_pool_fetch_shard_store_largest value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_store_completed value=1
|
||||
- elasticsearch_thread_pool_fetch_shard_store_threads value=1
|
||||
- elasticsearch_thread_pool_management_threads value=2
|
||||
- elasticsearch_thread_pool_management_queue value=3
|
||||
- elasticsearch_thread_pool_management_active value=1
|
||||
- elasticsearch_thread_pool_management_rejected value=6
|
||||
- elasticsearch_thread_pool_management_largest value=2
|
||||
- elasticsearch_thread_pool_management_completed value=22
|
||||
- elasticsearch_thread_pool_percolate_queue value=23
|
||||
- elasticsearch_thread_pool_percolate_active value=13
|
||||
- elasticsearch_thread_pool_percolate_rejected value=235
|
||||
- elasticsearch_thread_pool_percolate_largest value=23
|
||||
- elasticsearch_thread_pool_percolate_completed value=33
|
||||
- elasticsearch_thread_pool_percolate_threads value=123
|
||||
- elasticsearch_thread_pool_listener_active value=4
|
||||
- elasticsearch_thread_pool_listener_rejected value=8
|
||||
- elasticsearch_thread_pool_listener_largest value=1
|
||||
- elasticsearch_thread_pool_listener_completed value=1
|
||||
- elasticsearch_thread_pool_listener_threads value=1
|
||||
- elasticsearch_thread_pool_listener_queue value=2
|
||||
- elasticsearch_thread_pool_search_rejected value=7
|
||||
- elasticsearch_thread_pool_search_largest value=2
|
||||
- elasticsearch_thread_pool_search_completed value=4
|
||||
- elasticsearch_thread_pool_search_threads value=5
|
||||
- elasticsearch_thread_pool_search_queue value=7
|
||||
- elasticsearch_thread_pool_search_active value=2
|
||||
- elasticsearch_thread_pool_fetch_shard_started_threads value=3
|
||||
- elasticsearch_thread_pool_fetch_shard_started_queue value=1
|
||||
- elasticsearch_thread_pool_fetch_shard_started_active value=5
|
||||
- elasticsearch_thread_pool_fetch_shard_started_rejected value=6
|
||||
- elasticsearch_thread_pool_fetch_shard_started_largest value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_started_completed value=54
|
||||
- elasticsearch_thread_pool_refresh_rejected value=4
|
||||
- elasticsearch_thread_pool_refresh_largest value=8
|
||||
- elasticsearch_thread_pool_refresh_completed value=3
|
||||
- elasticsearch_thread_pool_refresh_threads value=23
|
||||
- elasticsearch_thread_pool_refresh_queue value=7
|
||||
- elasticsearch_thread_pool_refresh_active value=3
|
||||
- elasticsearch_thread_pool_optimize_threads value=3
|
||||
- elasticsearch_thread_pool_optimize_queue value=4
|
||||
- elasticsearch_thread_pool_optimize_active value=1
|
||||
- elasticsearch_thread_pool_optimize_rejected value=2
|
||||
- elasticsearch_thread_pool_optimize_largest value=7
|
||||
- elasticsearch_thread_pool_optimize_completed value=3
|
||||
- elasticsearch_thread_pool_snapshot_largest value=1
|
||||
- elasticsearch_thread_pool_snapshot_completed value=0
|
||||
- elasticsearch_thread_pool_snapshot_threads value=8
|
||||
- elasticsearch_thread_pool_snapshot_queue value=5
|
||||
- elasticsearch_thread_pool_snapshot_active value=6
|
||||
- elasticsearch_thread_pool_snapshot_rejected value=2
|
||||
- elasticsearch_thread_pool_generic_threads value=1
|
||||
- elasticsearch_thread_pool_generic_queue value=4
|
||||
- elasticsearch_thread_pool_generic_active value=6
|
||||
- elasticsearch_thread_pool_generic_rejected value=3
|
||||
- elasticsearch_thread_pool_generic_largest value=2
|
||||
- elasticsearch_thread_pool_generic_completed value=27
|
||||
- elasticsearch_thread_pool_flush_threads value=3
|
||||
- elasticsearch_thread_pool_flush_queue value=8
|
||||
- elasticsearch_thread_pool_flush_active value=0
|
||||
- elasticsearch_thread_pool_flush_rejected value=1
|
||||
- elasticsearch_thread_pool_flush_largest value=5
|
||||
- elasticsearch_thread_pool_flush_completed value=3
|
||||
|
||||
Transport statistics about sent and received bytes in cluster communication measurement names:
|
||||
- elasticsearch_transport_server_open value=13
|
||||
- elasticsearch_transport_rx_count value=6
|
||||
- elasticsearch_transport_rx_size_in_bytes value=1380
|
||||
- elasticsearch_transport_tx_count value=6
|
||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
||||
255
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
255
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
@@ -0,0 +1,255 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
const statsPath = "/_nodes/stats"
|
||||
const statsPathLocal = "/_nodes/_local/stats"
|
||||
const healthPath = "/_cluster/health"
|
||||
|
||||
type node struct {
|
||||
Host string `json:"host"`
|
||||
Name string `json:"name"`
|
||||
Attributes map[string]string `json:"attributes"`
|
||||
Indices interface{} `json:"indices"`
|
||||
OS interface{} `json:"os"`
|
||||
Process interface{} `json:"process"`
|
||||
JVM interface{} `json:"jvm"`
|
||||
ThreadPool interface{} `json:"thread_pool"`
|
||||
FS interface{} `json:"fs"`
|
||||
Transport interface{} `json:"transport"`
|
||||
HTTP interface{} `json:"http"`
|
||||
Breakers interface{} `json:"breakers"`
|
||||
}
|
||||
|
||||
type clusterHealth struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
NumberOfNodes int `json:"number_of_nodes"`
|
||||
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
Indices map[string]indexHealth `json:"indices"`
|
||||
}
|
||||
|
||||
type indexHealth struct {
|
||||
Status string `json:"status"`
|
||||
NumberOfShards int `json:"number_of_shards"`
|
||||
NumberOfReplicas int `json:"number_of_replicas"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
local = true
|
||||
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
cluster_health = false
|
||||
`
|
||||
|
||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||
// servers.
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
ClusterHealth bool
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
func NewElasticsearch() *Elasticsearch {
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
return &Elasticsearch{client: client}
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
func (e *Elasticsearch) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns the plugin description.
|
||||
func (e *Elasticsearch) Description() string {
|
||||
return "Read stats from one or more Elasticsearch servers or clusters"
|
||||
}
|
||||
|
||||
// Gather reads the stats from Elasticsearch and writes it to the
|
||||
// Accumulator.
|
||||
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
errChan := make(chan error, len(e.Servers))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
for _, serv := range e.Servers {
|
||||
go func(s string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
var url string
|
||||
if e.Local {
|
||||
url = s + statsPathLocal
|
||||
} else {
|
||||
url = s + statsPath
|
||||
}
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
||||
}
|
||||
}(serv, acc)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
// Get all errors and return them as one giant error
|
||||
errStrings := []string{}
|
||||
for err := range errChan {
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errStrings, "\n"))
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*node `json:"nodes"`
|
||||
}{}
|
||||
if err := e.gatherData(url, nodeStats); err != nil {
|
||||
return err
|
||||
}
|
||||
for id, n := range nodeStats.Nodes {
|
||||
tags := map[string]string{
|
||||
"node_id": id,
|
||||
"node_host": n.Host,
|
||||
"node_name": n.Name,
|
||||
"cluster_name": nodeStats.ClusterName,
|
||||
}
|
||||
|
||||
for k, v := range n.Attributes {
|
||||
tags["node_attribute_"+k] = v
|
||||
}
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"indices": n.Indices,
|
||||
"os": n.OS,
|
||||
"process": n.Process,
|
||||
"jvm": n.JVM,
|
||||
"thread_pool": n.ThreadPool,
|
||||
"fs": n.FS,
|
||||
"transport": n.Transport,
|
||||
"http": n.HTTP,
|
||||
"breakers": n.Breakers,
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterHealth{}
|
||||
if err := e.gatherData(url, clusterStats); err != nil {
|
||||
return err
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"status": clusterStats.Status,
|
||||
"timed_out": clusterStats.TimedOut,
|
||||
"number_of_nodes": clusterStats.NumberOfNodes,
|
||||
"number_of_data_nodes": clusterStats.NumberOfDataNodes,
|
||||
"active_primary_shards": clusterStats.ActivePrimaryShards,
|
||||
"active_shards": clusterStats.ActiveShards,
|
||||
"relocating_shards": clusterStats.RelocatingShards,
|
||||
"initializing_shards": clusterStats.InitializingShards,
|
||||
"unassigned_shards": clusterStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": clusterStats.ClusterName},
|
||||
measurementTime,
|
||||
)
|
||||
|
||||
for name, health := range clusterStats.Indices {
|
||||
indexFields := map[string]interface{}{
|
||||
"status": health.Status,
|
||||
"number_of_shards": health.NumberOfShards,
|
||||
"number_of_replicas": health.NumberOfReplicas,
|
||||
"active_primary_shards": health.ActivePrimaryShards,
|
||||
"active_shards": health.ActiveShards,
|
||||
"relocating_shards": health.RelocatingShards,
|
||||
"initializing_shards": health.InitializingShards,
|
||||
"unassigned_shards": health.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_indices",
|
||||
indexFields,
|
||||
map[string]string{"index": name},
|
||||
measurementTime,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
r, err := e.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("elasticsearch", func() telegraf.Input {
|
||||
return NewElasticsearch()
|
||||
})
|
||||
}
|
||||
89
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
89
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type transportMock struct {
|
||||
statusCode int
|
||||
body string
|
||||
}
|
||||
|
||||
func newTransportMock(statusCode int, body string) http.RoundTripper {
|
||||
return &transportMock{
|
||||
statusCode: statusCode,
|
||||
body: body,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{
|
||||
Header: make(http.Header),
|
||||
Request: r,
|
||||
StatusCode: t.statusCode,
|
||||
}
|
||||
res.Header.Set("Content-Type", "application/json")
|
||||
res.Body = ioutil.NopCloser(strings.NewReader(t.body))
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *transportMock) CancelRequest(_ *http.Request) {
|
||||
}
|
||||
|
||||
func TestElasticsearch(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.Gather(&acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
|
||||
"node_name": "test.host.com",
|
||||
"node_host": "test",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.Gather(&acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
765
plugins/inputs/elasticsearch/testdata_test.go
Normal file
765
plugins/inputs/elasticsearch/testdata_test.go
Normal file
@@ -0,0 +1,765 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"indices": {
|
||||
"v1": {
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0
|
||||
},
|
||||
"v2": {
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var clusterHealthExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v1IndexExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v2IndexExpected = map[string]interface{}{
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20,
|
||||
}
|
||||
|
||||
const statsResponse = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
"SDFsfSDFsdfFSDSDfSFDSDF": {
|
||||
"timestamp": 1436365550135,
|
||||
"name": "test.host.com",
|
||||
"transport_address": "inet[/127.0.0.1:9300]",
|
||||
"host": "test",
|
||||
"ip": [
|
||||
"inet[/127.0.0.1:9300]",
|
||||
"NONE"
|
||||
],
|
||||
"attributes": {
|
||||
"master": "true"
|
||||
},
|
||||
"indices": {
|
||||
"docs": {
|
||||
"count": 29652,
|
||||
"deleted": 5229
|
||||
},
|
||||
"store": {
|
||||
"size_in_bytes": 37715234,
|
||||
"throttle_time_in_millis": 215
|
||||
},
|
||||
"indexing": {
|
||||
"index_total": 84790,
|
||||
"index_time_in_millis": 29680,
|
||||
"index_current": 0,
|
||||
"delete_total": 13879,
|
||||
"delete_time_in_millis": 1139,
|
||||
"delete_current": 0,
|
||||
"noop_update_total": 0,
|
||||
"is_throttled": false,
|
||||
"throttle_time_in_millis": 0
|
||||
},
|
||||
"get": {
|
||||
"total": 1,
|
||||
"time_in_millis": 2,
|
||||
"exists_total": 0,
|
||||
"exists_time_in_millis": 0,
|
||||
"missing_total": 1,
|
||||
"missing_time_in_millis": 2,
|
||||
"current": 0
|
||||
},
|
||||
"search": {
|
||||
"open_contexts": 0,
|
||||
"query_total": 1452,
|
||||
"query_time_in_millis": 5695,
|
||||
"query_current": 0,
|
||||
"fetch_total": 414,
|
||||
"fetch_time_in_millis": 146,
|
||||
"fetch_current": 0
|
||||
},
|
||||
"merges": {
|
||||
"current": 0,
|
||||
"current_docs": 0,
|
||||
"current_size_in_bytes": 0,
|
||||
"total": 133,
|
||||
"total_time_in_millis": 21060,
|
||||
"total_docs": 203672,
|
||||
"total_size_in_bytes": 142900226
|
||||
},
|
||||
"refresh": {
|
||||
"total": 1076,
|
||||
"total_time_in_millis": 20078
|
||||
},
|
||||
"flush": {
|
||||
"total": 115,
|
||||
"total_time_in_millis": 2401
|
||||
},
|
||||
"warmer": {
|
||||
"current": 0,
|
||||
"total": 2319,
|
||||
"total_time_in_millis": 448
|
||||
},
|
||||
"filter_cache": {
|
||||
"memory_size_in_bytes": 7384,
|
||||
"evictions": 0
|
||||
},
|
||||
"id_cache": {
|
||||
"memory_size_in_bytes": 0
|
||||
},
|
||||
"fielddata": {
|
||||
"memory_size_in_bytes": 12996,
|
||||
"evictions": 0
|
||||
},
|
||||
"percolate": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0,
|
||||
"memory_size_in_bytes": -1,
|
||||
"memory_size": "-1b",
|
||||
"queries": 0
|
||||
},
|
||||
"completion": {
|
||||
"size_in_bytes": 0
|
||||
},
|
||||
"segments": {
|
||||
"count": 134,
|
||||
"memory_in_bytes": 1285212,
|
||||
"index_writer_memory_in_bytes": 0,
|
||||
"index_writer_max_memory_in_bytes": 172368955,
|
||||
"version_map_memory_in_bytes": 611844,
|
||||
"fixed_bit_set_memory_in_bytes": 0
|
||||
},
|
||||
"translog": {
|
||||
"operations": 17702,
|
||||
"size_in_bytes": 17
|
||||
},
|
||||
"suggest": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0
|
||||
},
|
||||
"query_cache": {
|
||||
"memory_size_in_bytes": 0,
|
||||
"evictions": 0,
|
||||
"hit_count": 0,
|
||||
"miss_count": 0
|
||||
},
|
||||
"recovery": {
|
||||
"current_as_source": 0,
|
||||
"current_as_target": 0,
|
||||
"throttle_time_in_millis": 0
|
||||
}
|
||||
},
|
||||
"os": {
|
||||
"timestamp": 1436460392944,
|
||||
"load_average": [
|
||||
0.01,
|
||||
0.04,
|
||||
0.05
|
||||
],
|
||||
"mem": {
|
||||
"free_in_bytes": 477761536,
|
||||
"used_in_bytes": 1621868544,
|
||||
"free_percent": 74,
|
||||
"used_percent": 25,
|
||||
"actual_free_in_bytes": 1565470720,
|
||||
"actual_used_in_bytes": 534159360
|
||||
},
|
||||
"swap": {
|
||||
"used_in_bytes": 0,
|
||||
"free_in_bytes": 487997440
|
||||
}
|
||||
},
|
||||
"process": {
|
||||
"timestamp": 1436460392945,
|
||||
"open_file_descriptors": 160,
|
||||
"cpu": {
|
||||
"percent": 2,
|
||||
"sys_in_millis": 1870,
|
||||
"user_in_millis": 13610,
|
||||
"total_in_millis": 15480
|
||||
},
|
||||
"mem": {
|
||||
"total_virtual_in_bytes": 4747890688
|
||||
}
|
||||
},
|
||||
"jvm": {
|
||||
"timestamp": 1436460392945,
|
||||
"uptime_in_millis": 202245,
|
||||
"mem": {
|
||||
"heap_used_in_bytes": 52709568,
|
||||
"heap_used_percent": 5,
|
||||
"heap_committed_in_bytes": 259522560,
|
||||
"heap_max_in_bytes": 1038876672,
|
||||
"non_heap_used_in_bytes": 39634576,
|
||||
"non_heap_committed_in_bytes": 40841216,
|
||||
"pools": {
|
||||
"young": {
|
||||
"used_in_bytes": 32685760,
|
||||
"max_in_bytes": 279183360,
|
||||
"peak_used_in_bytes": 71630848,
|
||||
"peak_max_in_bytes": 279183360
|
||||
},
|
||||
"survivor": {
|
||||
"used_in_bytes": 8912880,
|
||||
"max_in_bytes": 34865152,
|
||||
"peak_used_in_bytes": 8912888,
|
||||
"peak_max_in_bytes": 34865152
|
||||
},
|
||||
"old": {
|
||||
"used_in_bytes": 11110928,
|
||||
"max_in_bytes": 724828160,
|
||||
"peak_used_in_bytes": 14354608,
|
||||
"peak_max_in_bytes": 724828160
|
||||
}
|
||||
}
|
||||
},
|
||||
"threads": {
|
||||
"count": 44,
|
||||
"peak_count": 45
|
||||
},
|
||||
"gc": {
|
||||
"collectors": {
|
||||
"young": {
|
||||
"collection_count": 2,
|
||||
"collection_time_in_millis": 98
|
||||
},
|
||||
"old": {
|
||||
"collection_count": 1,
|
||||
"collection_time_in_millis": 24
|
||||
}
|
||||
}
|
||||
},
|
||||
"buffer_pools": {
|
||||
"direct": {
|
||||
"count": 40,
|
||||
"used_in_bytes": 6304239,
|
||||
"total_capacity_in_bytes": 6304239
|
||||
},
|
||||
"mapped": {
|
||||
"count": 0,
|
||||
"used_in_bytes": 0,
|
||||
"total_capacity_in_bytes": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"thread_pool": {
|
||||
"percolate": {
|
||||
"threads": 123,
|
||||
"queue": 23,
|
||||
"active": 13,
|
||||
"rejected": 235,
|
||||
"largest": 23,
|
||||
"completed": 33
|
||||
},
|
||||
"fetch_shard_started": {
|
||||
"threads": 3,
|
||||
"queue": 1,
|
||||
"active": 5,
|
||||
"rejected": 6,
|
||||
"largest": 4,
|
||||
"completed": 54
|
||||
},
|
||||
"listener": {
|
||||
"threads": 1,
|
||||
"queue": 2,
|
||||
"active": 4,
|
||||
"rejected": 8,
|
||||
"largest": 1,
|
||||
"completed": 1
|
||||
},
|
||||
"index": {
|
||||
"threads": 6,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 6
|
||||
},
|
||||
"refresh": {
|
||||
"threads": 23,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 4,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"suggest": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 1,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"generic": {
|
||||
"threads": 1,
|
||||
"queue": 4,
|
||||
"active": 6,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 27
|
||||
},
|
||||
"warmer": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 1
|
||||
},
|
||||
"search": {
|
||||
"threads": 5,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 7,
|
||||
"largest": 2,
|
||||
"completed": 4
|
||||
},
|
||||
"flush": {
|
||||
"threads": 3,
|
||||
"queue": 8,
|
||||
"active": 0,
|
||||
"rejected": 1,
|
||||
"largest": 5,
|
||||
"completed": 3
|
||||
},
|
||||
"optimize": {
|
||||
"threads": 3,
|
||||
"queue": 4,
|
||||
"active": 1,
|
||||
"rejected": 2,
|
||||
"largest": 7,
|
||||
"completed": 3
|
||||
},
|
||||
"fetch_shard_store": {
|
||||
"threads": 1,
|
||||
"queue": 7,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 4,
|
||||
"completed": 1
|
||||
},
|
||||
"management": {
|
||||
"threads": 2,
|
||||
"queue": 3,
|
||||
"active": 1,
|
||||
"rejected": 6,
|
||||
"largest": 2,
|
||||
"completed": 22
|
||||
},
|
||||
"get": {
|
||||
"threads": 1,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 1
|
||||
},
|
||||
"merge": {
|
||||
"threads": 6,
|
||||
"queue": 4,
|
||||
"active": 5,
|
||||
"rejected": 2,
|
||||
"largest": 5,
|
||||
"completed": 1
|
||||
},
|
||||
"bulk": {
|
||||
"threads": 4,
|
||||
"queue": 5,
|
||||
"active": 7,
|
||||
"rejected": 3,
|
||||
"largest": 1,
|
||||
"completed": 4
|
||||
},
|
||||
"snapshot": {
|
||||
"threads": 8,
|
||||
"queue": 5,
|
||||
"active": 6,
|
||||
"rejected": 2,
|
||||
"largest": 1,
|
||||
"completed": 0
|
||||
}
|
||||
},
|
||||
"fs": {
|
||||
"timestamp": 1436460392946,
|
||||
"total": {
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
},
|
||||
"data": [
|
||||
{
|
||||
"path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
|
||||
"mount": "/usr/share/elasticsearch/data",
|
||||
"type": "ext4",
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
}
|
||||
]
|
||||
},
|
||||
"transport": {
|
||||
"server_open": 13,
|
||||
"rx_count": 6,
|
||||
"rx_size_in_bytes": 1380,
|
||||
"tx_count": 6,
|
||||
"tx_size_in_bytes": 1380
|
||||
},
|
||||
"http": {
|
||||
"current_open": 3,
|
||||
"total_opened": 3
|
||||
},
|
||||
"breakers": {
|
||||
"fielddata": {
|
||||
"limit_size_in_bytes": 623326003,
|
||||
"limit_size": "594.4mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.03,
|
||||
"tripped": 0
|
||||
},
|
||||
"request": {
|
||||
"limit_size_in_bytes": 415550668,
|
||||
"limit_size": "396.2mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
},
|
||||
"parent": {
|
||||
"limit_size_in_bytes": 727213670,
|
||||
"limit_size": "693.5mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var indicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"suggest_total": float64(0),
|
||||
"suggest_time_in_millis": float64(0),
|
||||
"suggest_current": float64(0),
|
||||
"query_cache_memory_size_in_bytes": float64(0),
|
||||
"query_cache_evictions": float64(0),
|
||||
"query_cache_hit_count": float64(0),
|
||||
"query_cache_miss_count": float64(0),
|
||||
"store_size_in_bytes": float64(37715234),
|
||||
"store_throttle_time_in_millis": float64(215),
|
||||
"merges_current_docs": float64(0),
|
||||
"merges_current_size_in_bytes": float64(0),
|
||||
"merges_total": float64(133),
|
||||
"merges_total_time_in_millis": float64(21060),
|
||||
"merges_total_docs": float64(203672),
|
||||
"merges_total_size_in_bytes": float64(142900226),
|
||||
"merges_current": float64(0),
|
||||
"filter_cache_memory_size_in_bytes": float64(7384),
|
||||
"filter_cache_evictions": float64(0),
|
||||
"indexing_index_total": float64(84790),
|
||||
"indexing_index_time_in_millis": float64(29680),
|
||||
"indexing_index_current": float64(0),
|
||||
"indexing_noop_update_total": float64(0),
|
||||
"indexing_throttle_time_in_millis": float64(0),
|
||||
"indexing_delete_total": float64(13879),
|
||||
"indexing_delete_time_in_millis": float64(1139),
|
||||
"indexing_delete_current": float64(0),
|
||||
"get_exists_time_in_millis": float64(0),
|
||||
"get_missing_total": float64(1),
|
||||
"get_missing_time_in_millis": float64(2),
|
||||
"get_current": float64(0),
|
||||
"get_total": float64(1),
|
||||
"get_time_in_millis": float64(2),
|
||||
"get_exists_total": float64(0),
|
||||
"refresh_total": float64(1076),
|
||||
"refresh_total_time_in_millis": float64(20078),
|
||||
"percolate_current": float64(0),
|
||||
"percolate_memory_size_in_bytes": float64(-1),
|
||||
"percolate_queries": float64(0),
|
||||
"percolate_total": float64(0),
|
||||
"percolate_time_in_millis": float64(0),
|
||||
"translog_operations": float64(17702),
|
||||
"translog_size_in_bytes": float64(17),
|
||||
"recovery_current_as_source": float64(0),
|
||||
"recovery_current_as_target": float64(0),
|
||||
"recovery_throttle_time_in_millis": float64(0),
|
||||
"docs_count": float64(29652),
|
||||
"docs_deleted": float64(5229),
|
||||
"flush_total_time_in_millis": float64(2401),
|
||||
"flush_total": float64(115),
|
||||
"fielddata_memory_size_in_bytes": float64(12996),
|
||||
"fielddata_evictions": float64(0),
|
||||
"search_fetch_current": float64(0),
|
||||
"search_open_contexts": float64(0),
|
||||
"search_query_total": float64(1452),
|
||||
"search_query_time_in_millis": float64(5695),
|
||||
"search_query_current": float64(0),
|
||||
"search_fetch_total": float64(414),
|
||||
"search_fetch_time_in_millis": float64(146),
|
||||
"warmer_current": float64(0),
|
||||
"warmer_total": float64(2319),
|
||||
"warmer_total_time_in_millis": float64(448),
|
||||
"segments_count": float64(134),
|
||||
"segments_memory_in_bytes": float64(1285212),
|
||||
"segments_index_writer_memory_in_bytes": float64(0),
|
||||
"segments_index_writer_max_memory_in_bytes": float64(172368955),
|
||||
"segments_version_map_memory_in_bytes": float64(611844),
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var osExpected = map[string]interface{}{
|
||||
"load_average_0": float64(0.01),
|
||||
"load_average_1": float64(0.04),
|
||||
"load_average_2": float64(0.05),
|
||||
"swap_used_in_bytes": float64(0),
|
||||
"swap_free_in_bytes": float64(487997440),
|
||||
"timestamp": float64(1436460392944),
|
||||
"mem_free_percent": float64(74),
|
||||
"mem_used_percent": float64(25),
|
||||
"mem_actual_free_in_bytes": float64(1565470720),
|
||||
"mem_actual_used_in_bytes": float64(534159360),
|
||||
"mem_free_in_bytes": float64(477761536),
|
||||
"mem_used_in_bytes": float64(1621868544),
|
||||
}
|
||||
|
||||
var processExpected = map[string]interface{}{
|
||||
"mem_total_virtual_in_bytes": float64(4747890688),
|
||||
"timestamp": float64(1436460392945),
|
||||
"open_file_descriptors": float64(160),
|
||||
"cpu_total_in_millis": float64(15480),
|
||||
"cpu_percent": float64(2),
|
||||
"cpu_sys_in_millis": float64(1870),
|
||||
"cpu_user_in_millis": float64(13610),
|
||||
}
|
||||
|
||||
var jvmExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392945),
|
||||
"uptime_in_millis": float64(202245),
|
||||
"mem_non_heap_used_in_bytes": float64(39634576),
|
||||
"mem_non_heap_committed_in_bytes": float64(40841216),
|
||||
"mem_pools_young_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_peak_used_in_bytes": float64(71630848),
|
||||
"mem_pools_young_peak_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_used_in_bytes": float64(32685760),
|
||||
"mem_pools_survivor_peak_used_in_bytes": float64(8912888),
|
||||
"mem_pools_survivor_peak_max_in_bytes": float64(34865152),
|
||||
"mem_pools_survivor_used_in_bytes": float64(8912880),
|
||||
"mem_pools_survivor_max_in_bytes": float64(34865152),
|
||||
"mem_pools_old_peak_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_used_in_bytes": float64(11110928),
|
||||
"mem_pools_old_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_peak_used_in_bytes": float64(14354608),
|
||||
"mem_heap_used_in_bytes": float64(52709568),
|
||||
"mem_heap_used_percent": float64(5),
|
||||
"mem_heap_committed_in_bytes": float64(259522560),
|
||||
"mem_heap_max_in_bytes": float64(1038876672),
|
||||
"threads_peak_count": float64(45),
|
||||
"threads_count": float64(44),
|
||||
"gc_collectors_young_collection_count": float64(2),
|
||||
"gc_collectors_young_collection_time_in_millis": float64(98),
|
||||
"gc_collectors_old_collection_count": float64(1),
|
||||
"gc_collectors_old_collection_time_in_millis": float64(24),
|
||||
"buffer_pools_direct_count": float64(40),
|
||||
"buffer_pools_direct_used_in_bytes": float64(6304239),
|
||||
"buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
|
||||
"buffer_pools_mapped_count": float64(0),
|
||||
"buffer_pools_mapped_used_in_bytes": float64(0),
|
||||
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var threadPoolExpected = map[string]interface{}{
|
||||
"merge_threads": float64(6),
|
||||
"merge_queue": float64(4),
|
||||
"merge_active": float64(5),
|
||||
"merge_rejected": float64(2),
|
||||
"merge_largest": float64(5),
|
||||
"merge_completed": float64(1),
|
||||
"bulk_threads": float64(4),
|
||||
"bulk_queue": float64(5),
|
||||
"bulk_active": float64(7),
|
||||
"bulk_rejected": float64(3),
|
||||
"bulk_largest": float64(1),
|
||||
"bulk_completed": float64(4),
|
||||
"warmer_threads": float64(2),
|
||||
"warmer_queue": float64(7),
|
||||
"warmer_active": float64(3),
|
||||
"warmer_rejected": float64(2),
|
||||
"warmer_largest": float64(3),
|
||||
"warmer_completed": float64(1),
|
||||
"get_largest": float64(2),
|
||||
"get_completed": float64(1),
|
||||
"get_threads": float64(1),
|
||||
"get_queue": float64(8),
|
||||
"get_active": float64(4),
|
||||
"get_rejected": float64(3),
|
||||
"index_threads": float64(6),
|
||||
"index_queue": float64(8),
|
||||
"index_active": float64(4),
|
||||
"index_rejected": float64(2),
|
||||
"index_largest": float64(3),
|
||||
"index_completed": float64(6),
|
||||
"suggest_threads": float64(2),
|
||||
"suggest_queue": float64(7),
|
||||
"suggest_active": float64(2),
|
||||
"suggest_rejected": float64(1),
|
||||
"suggest_largest": float64(8),
|
||||
"suggest_completed": float64(3),
|
||||
"fetch_shard_store_queue": float64(7),
|
||||
"fetch_shard_store_active": float64(4),
|
||||
"fetch_shard_store_rejected": float64(2),
|
||||
"fetch_shard_store_largest": float64(4),
|
||||
"fetch_shard_store_completed": float64(1),
|
||||
"fetch_shard_store_threads": float64(1),
|
||||
"management_threads": float64(2),
|
||||
"management_queue": float64(3),
|
||||
"management_active": float64(1),
|
||||
"management_rejected": float64(6),
|
||||
"management_largest": float64(2),
|
||||
"management_completed": float64(22),
|
||||
"percolate_queue": float64(23),
|
||||
"percolate_active": float64(13),
|
||||
"percolate_rejected": float64(235),
|
||||
"percolate_largest": float64(23),
|
||||
"percolate_completed": float64(33),
|
||||
"percolate_threads": float64(123),
|
||||
"listener_active": float64(4),
|
||||
"listener_rejected": float64(8),
|
||||
"listener_largest": float64(1),
|
||||
"listener_completed": float64(1),
|
||||
"listener_threads": float64(1),
|
||||
"listener_queue": float64(2),
|
||||
"search_rejected": float64(7),
|
||||
"search_largest": float64(2),
|
||||
"search_completed": float64(4),
|
||||
"search_threads": float64(5),
|
||||
"search_queue": float64(7),
|
||||
"search_active": float64(2),
|
||||
"fetch_shard_started_threads": float64(3),
|
||||
"fetch_shard_started_queue": float64(1),
|
||||
"fetch_shard_started_active": float64(5),
|
||||
"fetch_shard_started_rejected": float64(6),
|
||||
"fetch_shard_started_largest": float64(4),
|
||||
"fetch_shard_started_completed": float64(54),
|
||||
"refresh_rejected": float64(4),
|
||||
"refresh_largest": float64(8),
|
||||
"refresh_completed": float64(3),
|
||||
"refresh_threads": float64(23),
|
||||
"refresh_queue": float64(7),
|
||||
"refresh_active": float64(3),
|
||||
"optimize_threads": float64(3),
|
||||
"optimize_queue": float64(4),
|
||||
"optimize_active": float64(1),
|
||||
"optimize_rejected": float64(2),
|
||||
"optimize_largest": float64(7),
|
||||
"optimize_completed": float64(3),
|
||||
"snapshot_largest": float64(1),
|
||||
"snapshot_completed": float64(0),
|
||||
"snapshot_threads": float64(8),
|
||||
"snapshot_queue": float64(5),
|
||||
"snapshot_active": float64(6),
|
||||
"snapshot_rejected": float64(2),
|
||||
"generic_threads": float64(1),
|
||||
"generic_queue": float64(4),
|
||||
"generic_active": float64(6),
|
||||
"generic_rejected": float64(3),
|
||||
"generic_largest": float64(2),
|
||||
"generic_completed": float64(27),
|
||||
"flush_threads": float64(3),
|
||||
"flush_queue": float64(8),
|
||||
"flush_active": float64(0),
|
||||
"flush_rejected": float64(1),
|
||||
"flush_largest": float64(5),
|
||||
"flush_completed": float64(3),
|
||||
}
|
||||
|
||||
var fsExpected = map[string]interface{}{
|
||||
"data_0_total_in_bytes": float64(19507089408),
|
||||
"data_0_free_in_bytes": float64(16909316096),
|
||||
"data_0_available_in_bytes": float64(15894814720),
|
||||
"timestamp": float64(1436460392946),
|
||||
"total_free_in_bytes": float64(16909316096),
|
||||
"total_available_in_bytes": float64(15894814720),
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var transportExpected = map[string]interface{}{
|
||||
"server_open": float64(13),
|
||||
"rx_count": float64(6),
|
||||
"rx_size_in_bytes": float64(1380),
|
||||
"tx_count": float64(6),
|
||||
"tx_size_in_bytes": float64(1380),
|
||||
}
|
||||
|
||||
var httpExpected = map[string]interface{}{
|
||||
"current_open": float64(3),
|
||||
"total_opened": float64(3),
|
||||
}
|
||||
|
||||
var breakersExpected = map[string]interface{}{
|
||||
"fielddata_estimated_size_in_bytes": float64(0),
|
||||
"fielddata_overhead": float64(1.03),
|
||||
"fielddata_tripped": float64(0),
|
||||
"fielddata_limit_size_in_bytes": float64(623326003),
|
||||
"request_estimated_size_in_bytes": float64(0),
|
||||
"request_overhead": float64(1.0),
|
||||
"request_tripped": float64(0),
|
||||
"request_limit_size_in_bytes": float64(415550668),
|
||||
"parent_overhead": float64(1.0),
|
||||
"parent_tripped": float64(0),
|
||||
"parent_limit_size_in_bytes": float64(727213670),
|
||||
"parent_estimated_size_in_bytes": float64(0),
|
||||
}
|
||||
183
plugins/inputs/exec/README.md
Normal file
183
plugins/inputs/exec/README.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Exec Input Plugin
|
||||
|
||||
Please also see: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md)
|
||||
|
||||
### Example 1 - JSON
|
||||
|
||||
#### Configuration
|
||||
|
||||
In this example a script called ```/tmp/test.sh``` and a script called ```/tmp/test2.sh```
|
||||
are configured for ```[[inputs.exec]]``` in JSON format.
|
||||
|
||||
```
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
commands = ["/tmp/test.sh", "/tmp/test2.sh"]
|
||||
|
||||
# Data format to consume.
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "json"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Below configuration will be used for data_format = "graphite", can be ignored for other data_format
|
||||
## If matching multiple measurement files, this string will be used to join the matched values.
|
||||
#separator = "."
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. The can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
#templates = [
|
||||
# "*.app env.service.resource.measurement",
|
||||
# "stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
# "stats2.* .host.measurement.field",
|
||||
# "measurement*"
|
||||
#]
|
||||
```
|
||||
|
||||
Other options for modifying the measurement names are:
|
||||
|
||||
```
|
||||
name_prefix = "prefix_"
|
||||
```
|
||||
|
||||
Let's say that we have the above configuration, and mycollector outputs the
|
||||
following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": 0.1,
|
||||
"d": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be stored as fields under the measurement
|
||||
"exec_mycollector":
|
||||
|
||||
```
|
||||
exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567
|
||||
```
|
||||
If using JSON, only numeric values are parsed and turned into floats. Booleans
|
||||
and strings will be ignored.
|
||||
|
||||
### Example 2 - Influx Line-Protocol
|
||||
|
||||
In this example an application called ```/usr/bin/line_protocol_collector```
|
||||
and a script called ```/tmp/test2.sh``` are configured for ```[[inputs.exec]]```
|
||||
in influx line-protocol format.
|
||||
|
||||
#### Configuration
|
||||
|
||||
```
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
# compatible with old version
|
||||
# we can still use the old command configuration
|
||||
# command = "/usr/bin/line_protocol_collector"
|
||||
commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"]
|
||||
|
||||
# Data format to consume.
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
The line_protocol_collector application outputs the following line protocol:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
```
|
||||
|
||||
You will get data in InfluxDB exactly as it is defined above,
|
||||
tags are cpu=cpuN, host=foo, and datacenter=us-east with fields usage_idle
|
||||
and usage_busy. They will receive a timestamp at collection time.
|
||||
Each line must end in \n, just as the Influx line protocol does.
|
||||
|
||||
|
||||
### Example 3 - Graphite
|
||||
|
||||
We can also change the data_format to "graphite" to use the metrics collecting scripts such as (compatible with graphite):
|
||||
|
||||
* Nagios [Metrics Plugins](https://exchange.nagios.org/directory/Plugins)
|
||||
* Sensu [Metrics Plugins](https://github.com/sensu-plugins)
|
||||
|
||||
In this example a script called /tmp/test.sh and a script called /tmp/test2.sh are configured for [[inputs.exec]] in graphite format.
|
||||
|
||||
#### Configuration
|
||||
```
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
commands = ["/tmp/test.sh","/tmp/test2.sh"]
|
||||
|
||||
# Data format to consume.
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "graphite"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Below configuration will be used for data_format = "graphite", can be ignored for other data_format
|
||||
## If matching multiple measurement files, this string will be used to join the matched values.
|
||||
separator = "."
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. The can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
```
|
||||
Graphite messages are in this format:
|
||||
|
||||
```
|
||||
metric_path value timestamp\n
|
||||
```
|
||||
|
||||
__metric_path__ is the metric namespace that you want to populate.
|
||||
|
||||
__value__ is the value that you want to assign to the metric at this time.
|
||||
|
||||
__timestamp__ is the unix epoch time.
|
||||
|
||||
And test.sh/test2.sh will output:
|
||||
|
||||
```
|
||||
sensu.metric.net.server0.eth0.rx_packets 461295119435 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_bytes 1093086493388480 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_bytes 1015633926034834 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_errors 0 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_errors 0 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_dropped 0 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_dropped 0 1444234982
|
||||
```
|
||||
|
||||
The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines.
|
||||
|
||||
More detail information about templates, please refer to [The graphite Input](https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md)
|
||||
|
||||
176
plugins/inputs/exec/exec.go
Normal file
176
plugins/inputs/exec/exec.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/nagios"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## Timeout for each command to complete.
|
||||
timeout = "5s"
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
type Exec struct {
|
||||
Commands []string
|
||||
Command string
|
||||
Timeout internal.Duration
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
runner Runner
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
func NewExec() *Exec {
|
||||
return &Exec{
|
||||
runner: CommandRunner{},
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
}
|
||||
}
|
||||
|
||||
type Runner interface {
|
||||
Run(*Exec, string, telegraf.Accumulator) ([]byte, error)
|
||||
}
|
||||
|
||||
type CommandRunner struct{}
|
||||
|
||||
func AddNagiosState(exitCode error, acc telegraf.Accumulator) error {
|
||||
nagiosState := 0
|
||||
if exitCode != nil {
|
||||
exiterr, ok := exitCode.(*exec.ExitError)
|
||||
if ok {
|
||||
status, ok := exiterr.Sys().(syscall.WaitStatus)
|
||||
if ok {
|
||||
nagiosState = status.ExitStatus()
|
||||
} else {
|
||||
return fmt.Errorf("exec: unable to get nagios plugin exit code")
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("exec: unable to get nagios plugin exit code")
|
||||
}
|
||||
}
|
||||
fields := map[string]interface{}{"state": nagiosState}
|
||||
acc.AddFields("nagios_state", fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c CommandRunner) Run(
|
||||
e *Exec,
|
||||
command string,
|
||||
acc telegraf.Accumulator,
|
||||
) ([]byte, error) {
|
||||
split_cmd, err := shellquote.Split(command)
|
||||
if err != nil || len(split_cmd) == 0 {
|
||||
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil {
|
||||
switch e.parser.(type) {
|
||||
case *nagios.NagiosParser:
|
||||
AddNagiosState(err, acc)
|
||||
default:
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command)
|
||||
}
|
||||
} else {
|
||||
switch e.parser.(type) {
|
||||
case *nagios.NagiosParser:
|
||||
AddNagiosState(nil, acc)
|
||||
}
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator) {
|
||||
defer e.wg.Done()
|
||||
|
||||
out, err := e.runner.Run(e, command, acc)
|
||||
if err != nil {
|
||||
e.errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
metrics, err := e.parser.Parse(out)
|
||||
if err != nil {
|
||||
e.errChan <- err
|
||||
} else {
|
||||
for _, metric := range metrics {
|
||||
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Exec) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (e *Exec) Description() string {
|
||||
return "Read metrics from one or more commands that can output to stdout"
|
||||
}
|
||||
|
||||
func (e *Exec) SetParser(parser parsers.Parser) {
|
||||
e.parser = parser
|
||||
}
|
||||
|
||||
func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
// Legacy single command support
|
||||
if e.Command != "" {
|
||||
e.Commands = append(e.Commands, e.Command)
|
||||
e.Command = ""
|
||||
}
|
||||
|
||||
e.errChan = make(chan error, len(e.Commands))
|
||||
|
||||
e.wg.Add(len(e.Commands))
|
||||
for _, command := range e.Commands {
|
||||
go e.ProcessCommand(command, acc)
|
||||
}
|
||||
e.wg.Wait()
|
||||
|
||||
select {
|
||||
default:
|
||||
close(e.errChan)
|
||||
return nil
|
||||
case err := <-e.errChan:
|
||||
close(e.errChan)
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("exec", func() telegraf.Input {
|
||||
return NewExec()
|
||||
})
|
||||
}
|
||||
171
plugins/inputs/exec/exec_test.go
Normal file
171
plugins/inputs/exec/exec_test.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Midnight 9/22/2015
|
||||
const baseTimeSeconds = 1442905200
|
||||
|
||||
const validJson = `
|
||||
{
|
||||
"status": "green",
|
||||
"num_processes": 82,
|
||||
"cpu": {
|
||||
"status": "red",
|
||||
"nil_status": null,
|
||||
"used": 8234,
|
||||
"free": 32
|
||||
},
|
||||
"percent": 0.81,
|
||||
"users": [0, 1, 2, 3]
|
||||
}`
|
||||
|
||||
const malformedJson = `
|
||||
{
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1"
|
||||
|
||||
const lineProtocolMulti = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
type runnerMock struct {
|
||||
out []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newRunnerMock(out []byte, err error) Runner {
|
||||
return &runnerMock{
|
||||
out: out,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (r runnerMock) Run(e *Exec, command string, acc telegraf.Accumulator) ([]byte, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
return r.out, nil
|
||||
}
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(validJson), nil),
|
||||
Commands: []string{"testcommand arg1"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"num_processes": float64(82),
|
||||
"cpu_used": float64(8234),
|
||||
"cpu_free": float64(32),
|
||||
"percent": float64(0.81),
|
||||
"users_0": float64(0),
|
||||
"users_1": float64(1),
|
||||
"users_2": float64(2),
|
||||
"users_3": float64(3),
|
||||
}
|
||||
acc.AssertContainsFields(t, "exec", fields)
|
||||
}
|
||||
|
||||
func TestExecMalformed(t *testing.T) {
|
||||
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(malformedJson), nil),
|
||||
Commands: []string{"badcommand arg1"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestCommandError(t *testing.T) {
|
||||
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
|
||||
e := &Exec{
|
||||
runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")),
|
||||
Commands: []string{"badcommand"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestLineProtocolParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocol), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
|
||||
func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolMulti), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"}
|
||||
|
||||
for _, cpu := range cpuTags {
|
||||
tags["cpu"] = cpu
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
}
|
||||
37
plugins/inputs/filestat/README.md
Normal file
37
plugins/inputs/filestat/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# filestat Input Plugin
|
||||
|
||||
The filestat plugin gathers metrics about file existence, size, and other stats.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Read stats about given file(s)
|
||||
[[inputs.filestat]]
|
||||
## Files to gather stats about.
|
||||
## These accept standard unix glob matching rules, but with the addition of
|
||||
## ** as a "super asterisk". See https://github.com/gobwas/glob.
|
||||
files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"]
|
||||
## If true, read the entire file and calculate an md5 checksum.
|
||||
md5 = false
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- filestat
|
||||
- exists (int, 0 | 1)
|
||||
- size_bytes (int, bytes)
|
||||
- md5 (optional, string)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- file (the path the to file, as specified in the config)
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
$ telegraf -config /etc/telegraf/telegraf.conf -input-filter filestat -test
|
||||
* Plugin: filestat, Collection 1
|
||||
> filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1461203374493128216
|
||||
> filestat,file=/Users/sparrc/ws/telegraf.conf,host=tyrion exists=1i,size=47894i 1461203374493199335
|
||||
```
|
||||
125
plugins/inputs/filestat/filestat.go
Normal file
125
plugins/inputs/filestat/filestat.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package filestat
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/globpath"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
## Files to gather stats about.
|
||||
## These accept standard unix glob matching rules, but with the addition of
|
||||
## ** as a "super asterisk". ie:
|
||||
## "/var/log/**.log" -> recursively find all .log files in /var/log
|
||||
## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
|
||||
## "/var/log/apache.log" -> just tail the apache log file
|
||||
##
|
||||
## See https://github.com/gobwas/glob for more examples
|
||||
##
|
||||
files = ["/var/log/**.log"]
|
||||
## If true, read the entire file and calculate an md5 checksum.
|
||||
md5 = false
|
||||
`
|
||||
|
||||
type FileStat struct {
|
||||
Md5 bool
|
||||
Files []string
|
||||
|
||||
// maps full file paths to globmatch obj
|
||||
globs map[string]*globpath.GlobPath
|
||||
}
|
||||
|
||||
func NewFileStat() *FileStat {
|
||||
return &FileStat{
|
||||
globs: make(map[string]*globpath.GlobPath),
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *FileStat) Description() string {
|
||||
return "Read stats about given file(s)"
|
||||
}
|
||||
|
||||
func (_ *FileStat) SampleConfig() string { return sampleConfig }
|
||||
|
||||
func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
var errS string
|
||||
var err error
|
||||
|
||||
for _, filepath := range f.Files {
|
||||
// Get the compiled glob object for this filepath
|
||||
g, ok := f.globs[filepath]
|
||||
if !ok {
|
||||
if g, err = globpath.Compile(filepath); err != nil {
|
||||
errS += err.Error() + " "
|
||||
continue
|
||||
}
|
||||
f.globs[filepath] = g
|
||||
}
|
||||
|
||||
files := g.Match()
|
||||
if len(files) == 0 {
|
||||
acc.AddFields("filestat",
|
||||
map[string]interface{}{
|
||||
"exists": int64(0),
|
||||
},
|
||||
map[string]string{
|
||||
"file": filepath,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
for fileName, fileInfo := range files {
|
||||
tags := map[string]string{
|
||||
"file": fileName,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"exists": int64(1),
|
||||
"size_bytes": fileInfo.Size(),
|
||||
}
|
||||
|
||||
if f.Md5 {
|
||||
md5, err := getMd5(fileName)
|
||||
if err != nil {
|
||||
errS += err.Error() + " "
|
||||
} else {
|
||||
fields["md5_sum"] = md5
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("filestat", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
if errS != "" {
|
||||
return fmt.Errorf(errS)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read given file and calculate an md5 hash.
|
||||
func getMd5(file string) (string, error) {
|
||||
of, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer of.Close()
|
||||
|
||||
hash := md5.New()
|
||||
_, err = io.Copy(hash, of)
|
||||
if err != nil {
|
||||
// fatal error
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%x", hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("filestat", func() telegraf.Input {
|
||||
return NewFileStat()
|
||||
})
|
||||
}
|
||||
180
plugins/inputs/filestat/filestat_test.go
Normal file
180
plugins/inputs/filestat/filestat_test.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package filestat
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGatherNoMd5(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Files = []string{
|
||||
dir + "log1.log",
|
||||
dir + "log2.log",
|
||||
"/non/existant/file",
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
|
||||
tags3 := map[string]string{
|
||||
"file": "/non/existant/file",
|
||||
}
|
||||
fields3 := map[string]interface{}{
|
||||
"exists": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||
}
|
||||
|
||||
func TestGatherExplicitFiles(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Md5 = true
|
||||
fs.Files = []string{
|
||||
dir + "log1.log",
|
||||
dir + "log2.log",
|
||||
"/non/existant/file",
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
|
||||
tags3 := map[string]string{
|
||||
"file": "/non/existant/file",
|
||||
}
|
||||
fields3 := map[string]interface{}{
|
||||
"exists": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||
}
|
||||
|
||||
func TestGatherGlob(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Md5 = true
|
||||
fs.Files = []string{
|
||||
dir + "*.log",
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
}
|
||||
|
||||
func TestGatherSuperAsterisk(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
fs := NewFileStat()
|
||||
fs.Md5 = true
|
||||
fs.Files = []string{
|
||||
dir + "**",
|
||||
}
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
fs.Gather(&acc)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"file": dir + "log1.log",
|
||||
}
|
||||
fields1 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1)
|
||||
|
||||
tags2 := map[string]string{
|
||||
"file": dir + "log2.log",
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"size_bytes": int64(0),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "d41d8cd98f00b204e9800998ecf8427e",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2)
|
||||
|
||||
tags3 := map[string]string{
|
||||
"file": dir + "test.conf",
|
||||
}
|
||||
fields3 := map[string]interface{}{
|
||||
"size_bytes": int64(104),
|
||||
"exists": int64(1),
|
||||
"md5_sum": "5a7e9b77fa25e7bb411dbd17cf403c1f",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3)
|
||||
}
|
||||
|
||||
func TestGetMd5(t *testing.T) {
|
||||
dir := getTestdataDir()
|
||||
md5, err := getMd5(dir + "test.conf")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5)
|
||||
|
||||
md5, err = getMd5("/tmp/foo/bar/fooooo")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func getTestdataDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
return strings.Replace(filename, "filestat_test.go", "testdata/", 1)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user