Compare commits
839 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc4cb1654c | ||
|
|
f1fa915985 | ||
|
|
11482a75a1 | ||
|
|
e983d35c25 | ||
|
|
85c4f753ad | ||
|
|
1847ce3f3d | ||
|
|
83c27cc7b1 | ||
|
|
3e8f96a463 | ||
|
|
69e4f16b13 | ||
|
|
918c3fb260 | ||
|
|
54ee44839c | ||
|
|
8362aa9d66 | ||
|
|
2a6ff16819 | ||
|
|
47ad73cc89 | ||
|
|
9687f71a17 | ||
|
|
ed684be18d | ||
|
|
5aef725c13 | ||
|
|
d00550c45f | ||
|
|
9ce8d78835 | ||
|
|
29016822fd | ||
|
|
bb50d7edb4 | ||
|
|
d43d6f2b13 | ||
|
|
636dc27ead | ||
|
|
a18f535f21 | ||
|
|
6994d4a712 | ||
|
|
c9d0ae7cf3 | ||
|
|
9edc25999e | ||
|
|
53c130b704 | ||
|
|
e4e174981d | ||
|
|
584a52ac21 | ||
|
|
f9b5767dae | ||
|
|
3179829fa5 | ||
|
|
187d1b853d | ||
|
|
8d2e5f0bda | ||
|
|
7def6663bd | ||
|
|
a13d19c582 | ||
|
|
1837f83282 | ||
|
|
b14cfd6c64 | ||
|
|
963c51f473 | ||
|
|
1f77b75e14 | ||
|
|
e5f3acd139 | ||
|
|
c8365b3b7e | ||
|
|
29c671ce46 | ||
|
|
38ac9d2ecf | ||
|
|
3573d93855 | ||
|
|
3cc2cda026 | ||
|
|
7d10986f10 | ||
|
|
8c6a6604ce | ||
|
|
7170280401 | ||
|
|
babecb6d49 | ||
|
|
9770802901 | ||
|
|
4c1e817b38 | ||
|
|
52b329be4e | ||
|
|
1d50d62a79 | ||
|
|
07502c9804 | ||
|
|
59e0e49822 | ||
|
|
05170d78be | ||
|
|
88c83277c6 | ||
|
|
d0734b105b | ||
|
|
4860dc148c | ||
|
|
ee468be696 | ||
|
|
7f539c951a | ||
|
|
e495ae9030 | ||
|
|
ccb6b3c64b | ||
|
|
85594cc92e | ||
|
|
0b72612cd2 | ||
|
|
dd086c7830 | ||
|
|
6a601ceb97 | ||
|
|
8236534e3c | ||
|
|
0fef147713 | ||
|
|
0198296ced | ||
|
|
37726a02af | ||
|
|
a9c135488e | ||
|
|
72f5c9b62d | ||
|
|
8d0f50a6fd | ||
|
|
6c353e8b8f | ||
|
|
512d9822f0 | ||
|
|
d003ca46c7 | ||
|
|
7587dc350e | ||
|
|
28664fedb2 | ||
|
|
ef20f05221 | ||
|
|
cabf5d004d | ||
|
|
d551da26e5 | ||
|
|
893357f01e | ||
|
|
fc7fa4b6c5 | ||
|
|
fb75db2f1f | ||
|
|
7c20522a30 | ||
|
|
c09884c686 | ||
|
|
9273782093 | ||
|
|
44ffe29c10 | ||
|
|
e619493ece | ||
|
|
1449c8b887 | ||
|
|
6b06a23102 | ||
|
|
b55a93a3e1 | ||
|
|
f5f43e6d1b | ||
|
|
1e03a9440b | ||
|
|
9a59512f75 | ||
|
|
35150caea4 | ||
|
|
f01da8fee4 | ||
|
|
434c08a357 | ||
|
|
bd9c5b6995 | ||
|
|
b941d270ce | ||
|
|
9406961125 | ||
|
|
0d391b66a3 | ||
|
|
a11e07e250 | ||
|
|
d266dad1f4 | ||
|
|
331b700d1b | ||
|
|
2163fde0a4 | ||
|
|
24a2aaef4b | ||
|
|
042cf517b2 | ||
|
|
b97027ac9a | ||
|
|
4ea3f82e50 | ||
|
|
38c4111e6c | ||
|
|
338341add8 | ||
|
|
93bb679f9d | ||
|
|
40d859354f | ||
|
|
9e7c8df384 | ||
|
|
f088dd7e00 | ||
|
|
10c4e4f63f | ||
|
|
962325cc40 | ||
|
|
a9c33abfa5 | ||
|
|
d835c19fce | ||
|
|
1f1384afc6 | ||
|
|
9d4b55be19 | ||
|
|
c549ab907a | ||
|
|
9c0d14bb60 | ||
|
|
a822d942cd | ||
|
|
3a64a01f91 | ||
|
|
6ebb6bc7ee | ||
|
|
be95dfdd0e | ||
|
|
88890fa7c2 | ||
|
|
f8930b9cbc | ||
|
|
c10227a766 | ||
|
|
7e7e462de1 | ||
|
|
a93e1ceac8 | ||
|
|
7f8469b66a | ||
|
|
cf568487c8 | ||
|
|
4c74a2dd3a | ||
|
|
a70452219b | ||
|
|
47ea2d5fb4 | ||
|
|
16540e35f1 | ||
|
|
3bfb3a9fe2 | ||
|
|
f9517dcf24 | ||
|
|
7878b22b09 | ||
|
|
e6d7e4e309 | ||
|
|
40d0da404e | ||
|
|
8675bd125a | ||
|
|
4e5dfa5d33 | ||
|
|
89f5b77550 | ||
|
|
5b15cd9163 | ||
|
|
dbf1383a38 | ||
|
|
46b367e74b | ||
|
|
3da390682d | ||
|
|
5349a3b6d1 | ||
|
|
f2ab5f61f5 | ||
|
|
e910a03af4 | ||
|
|
4d0dc8b7c8 | ||
|
|
e0dc1ef5bd | ||
|
|
f24f5e98dd | ||
|
|
6647cfc228 | ||
|
|
ddcd99a1ce | ||
|
|
55c07f23b0 | ||
|
|
8192572e23 | ||
|
|
0cdf1b07e9 | ||
|
|
8653bae6ac | ||
|
|
fc1aa7d3b4 | ||
|
|
8bdcd6d576 | ||
|
|
d3925fe578 | ||
|
|
d3a5cca1bc | ||
|
|
f3b553712a | ||
|
|
839651fadb | ||
|
|
6a50fceea4 | ||
|
|
7efe108686 | ||
|
|
c313af1b24 | ||
|
|
1388b1b58b | ||
|
|
551db20657 | ||
|
|
bc71e956a5 | ||
|
|
5af6974796 | ||
|
|
a712036b56 | ||
|
|
37b96c192b | ||
|
|
8cbdf0f907 | ||
|
|
ef5c630d3a | ||
|
|
6eea89f4c0 | ||
|
|
dbbb2d9877 | ||
|
|
c483e16d72 | ||
|
|
40a5bad968 | ||
|
|
1421bce371 | ||
|
|
5e7dd6d51b | ||
|
|
71f4e72b22 | ||
|
|
b24e71b232 | ||
|
|
f60c090e4c | ||
|
|
50334e6bac | ||
|
|
963a9429dd | ||
|
|
2eda8d64c7 | ||
|
|
9b96c62e46 | ||
|
|
378b7467a4 | ||
|
|
c0d98ecd4b | ||
|
|
b44644b6bf | ||
|
|
7bfb42946e | ||
|
|
e8907acd28 | ||
|
|
d6ef3b1e02 | ||
|
|
a39a7a7a03 | ||
|
|
923be102b3 | ||
|
|
7531e218c1 | ||
|
|
3cc1fecb53 | ||
|
|
3c89847489 | ||
|
|
fb837ca66d | ||
|
|
2ec1ffdc11 | ||
|
|
56509a61b9 | ||
|
|
f37f8ac815 | ||
|
|
231b5feb23 | ||
|
|
81fa063338 | ||
|
|
07b4a4dbca | ||
|
|
fd6daaa73b | ||
|
|
6496d185ab | ||
|
|
7499c1f969 | ||
|
|
9c5db1057d | ||
|
|
30d24a3c1c | ||
|
|
ad4af06802 | ||
|
|
64b98a9b61 | ||
|
|
4fdcb136bc | ||
|
|
0e398f5802 | ||
|
|
b9869eadc3 | ||
|
|
936c5a8a7a | ||
|
|
10f19fade1 | ||
|
|
c01594c2a4 | ||
|
|
ccbd7bb785 | ||
|
|
6eb49dee5d | ||
|
|
6a4bf9fcff | ||
|
|
9ada89d51a | ||
|
|
524fddedb4 | ||
|
|
c4a7711e02 | ||
|
|
2e20fc413c | ||
|
|
498482d0f6 | ||
|
|
4bd5b6a4d6 | ||
|
|
2e764cb22d | ||
|
|
c8914679b7 | ||
|
|
e25ac0d587 | ||
|
|
41374aabcb | ||
|
|
f60d846eb3 | ||
|
|
96e54ab326 | ||
|
|
40a3feaad0 | ||
|
|
2611931f82 | ||
|
|
ec39d10695 | ||
|
|
30d8ed411a | ||
|
|
64a832467e | ||
|
|
9c5321c538 | ||
|
|
aba123dae0 | ||
|
|
5aca58ad2a | ||
|
|
a34418d724 | ||
|
|
5f4262921a | ||
|
|
6fcd05b855 | ||
|
|
7746a2b3cd | ||
|
|
2749dcd128 | ||
|
|
92343d91d6 | ||
|
|
ce7b48143a | ||
|
|
e30e98a496 | ||
|
|
4798bd9d33 | ||
|
|
38d6cb97ad | ||
|
|
3be111a160 | ||
|
|
97a66b73cf | ||
|
|
50fc3ec974 | ||
|
|
ee8d99b955 | ||
|
|
5bf7c4d241 | ||
|
|
c2b5f21832 | ||
|
|
bdac9b7241 | ||
|
|
ec6eae9537 | ||
|
|
f607074899 | ||
|
|
0571eecb0c | ||
|
|
4f3d6ddf17 | ||
|
|
34f0c593ad | ||
|
|
97ebcc2af1 | ||
|
|
4852b5c11e | ||
|
|
16ce06f621 | ||
|
|
811a54af6c | ||
|
|
e02973b6f4 | ||
|
|
b91eab6737 | ||
|
|
c89ef84df7 | ||
|
|
e3c8a1131a | ||
|
|
eb78b9268f | ||
|
|
d62e63c448 | ||
|
|
03e66d5b87 | ||
|
|
22afc99f1e | ||
|
|
c83f220fc4 | ||
|
|
0d0a8e9b68 | ||
|
|
bcafadb68a | ||
|
|
9999b2e3c6 | ||
|
|
6c23fb3173 | ||
|
|
e6517d4140 | ||
|
|
00a6dbbe97 | ||
|
|
4cf47dcd0f | ||
|
|
03863bd84d | ||
|
|
7a2eeb7439 | ||
|
|
6fb7d2883d | ||
|
|
a844c1ac74 | ||
|
|
a7b77d9658 | ||
|
|
3509713a23 | ||
|
|
4b3b41fea5 | ||
|
|
2be7fc072f | ||
|
|
ca222a14de | ||
|
|
4aa94ee290 | ||
|
|
5c051eb801 | ||
|
|
3761f00062 | ||
|
|
b705608b04 | ||
|
|
a5f2d5ff21 | ||
|
|
979e5f193a | ||
|
|
8dde60e869 | ||
|
|
224a570a08 | ||
|
|
78f2ea89f8 | ||
|
|
13ccf420d7 | ||
|
|
d47740bd8d | ||
|
|
e2aa0e8a35 | ||
|
|
d505be1fd4 | ||
|
|
40fd33d1b0 | ||
|
|
317a352a65 | ||
|
|
970bfce997 | ||
|
|
a3feddd8ed | ||
|
|
a8294c2c34 | ||
|
|
0823eed546 | ||
|
|
f85bc6e7f7 | ||
|
|
21c4e70f33 | ||
|
|
03a6f28d55 | ||
|
|
19e5d975ca | ||
|
|
5664625f67 | ||
|
|
375045953f | ||
|
|
b10b186cc8 | ||
|
|
bf8e0f4cae | ||
|
|
a6ae597dfc | ||
|
|
b975419bc7 | ||
|
|
0f036d6bec | ||
|
|
20fbfc7006 | ||
|
|
e167b72b16 | ||
|
|
68ef07bff6 | ||
|
|
10a20e208a | ||
|
|
e10394ba3b | ||
|
|
019585f0db | ||
|
|
e619845ffe | ||
|
|
3012928452 | ||
|
|
352ccde52b | ||
|
|
92fb51026a | ||
|
|
acf9c1141a | ||
|
|
a8bcc51071 | ||
|
|
dcd1c6766c | ||
|
|
00ee2529bc | ||
|
|
2af97cdbcb | ||
|
|
1accab02ed | ||
|
|
1a05899be0 | ||
|
|
d54f6be639 | ||
|
|
00614026b3 | ||
|
|
acf1da4d30 | ||
|
|
921ffb7bdb | ||
|
|
b2e22cbc59 | ||
|
|
55c598f9ff | ||
|
|
eabc0875de | ||
|
|
62270a3697 | ||
|
|
40d8aeecb0 | ||
|
|
2daa9ff260 | ||
|
|
25fd4297a8 | ||
|
|
f05d89ed72 | ||
|
|
4c2501be95 | ||
|
|
e2854232d0 | ||
|
|
6794fd06eb | ||
|
|
befc906167 | ||
|
|
422d240afb | ||
|
|
2b966b40f2 | ||
|
|
a992e16f7d | ||
|
|
0398dc1226 | ||
|
|
5592738603 | ||
|
|
688ffd024b | ||
|
|
4ac1c819e0 | ||
|
|
a6e0ae2896 | ||
|
|
cb8499c264 | ||
|
|
d2fb065d0d | ||
|
|
4449f7f2fb | ||
|
|
7cc60dfb8f | ||
|
|
028bae8f04 | ||
|
|
fa9555c430 | ||
|
|
48d11f0a5c | ||
|
|
09a0c3b40f | ||
|
|
e622bd5e7f | ||
|
|
0d31f40e16 | ||
|
|
e13500fc4f | ||
|
|
2a76942a74 | ||
|
|
c73c28de7e | ||
|
|
9e0ec0927c | ||
|
|
23e6715a02 | ||
|
|
f7eae86cdb | ||
|
|
889c0a50a4 | ||
|
|
7d15061984 | ||
|
|
ccbfb038ee | ||
|
|
cb951ebd28 | ||
|
|
d35c78e933 | ||
|
|
e9356c893b | ||
|
|
869483617b | ||
|
|
df96958fb8 | ||
|
|
de7ad9dfbc | ||
|
|
bf1cf4557e | ||
|
|
86d20496ea | ||
|
|
ae7ad2230f | ||
|
|
2007064c47 | ||
|
|
c8852339c9 | ||
|
|
eb0a19062e | ||
|
|
2f08577967 | ||
|
|
891f3af504 | ||
|
|
c5f200917a | ||
|
|
21622a1a17 | ||
|
|
a1067fa4ae | ||
|
|
4395a46190 | ||
|
|
c938523cd5 | ||
|
|
ae10fc7fb4 | ||
|
|
0299a17da1 | ||
|
|
d77cfd6ecc | ||
|
|
03d79996de | ||
|
|
553208a960 | ||
|
|
dfc59866e8 | ||
|
|
ac685d19f8 | ||
|
|
dd2e9e08df | ||
|
|
499b5befd6 | ||
|
|
c26ce9c4fe | ||
|
|
6263bc2d1b | ||
|
|
f7504fb5eb | ||
|
|
6869362f43 | ||
|
|
7600cc87d8 | ||
|
|
3192c78d96 | ||
|
|
c3dad00c1b | ||
|
|
a1bad378d2 | ||
|
|
73f1ed4f25 | ||
|
|
b28b4bd71e | ||
|
|
b15928c95e | ||
|
|
97d4f9e0ff | ||
|
|
0986caf0ad | ||
|
|
9cccf8f88a | ||
|
|
3ae5b4b280 | ||
|
|
62b0e25b84 | ||
|
|
4e5ed9d3b9 | ||
|
|
555436a222 | ||
|
|
6977119f1e | ||
|
|
52be516fa3 | ||
|
|
2dd3eee58e | ||
|
|
d40351286a | ||
|
|
d84a258b0a | ||
|
|
eb2a4dc724 | ||
|
|
316fa1cc01 | ||
|
|
04e2db1f41 | ||
|
|
2f7d781635 | ||
|
|
88ff269370 | ||
|
|
7121e1a3b0 | ||
|
|
8fd06b96d7 | ||
|
|
181c3cdc28 | ||
|
|
ccfa913186 | ||
|
|
2a9f31bfea | ||
|
|
0bc76f094a | ||
|
|
d394003739 | ||
|
|
17dd058308 | ||
|
|
99b1a3071d | ||
|
|
dc38e448bd | ||
|
|
1d1180ec0c | ||
|
|
81539c4ed6 | ||
|
|
cf1dcfe37c | ||
|
|
7293376973 | ||
|
|
d9f1a60a64 | ||
|
|
4f6526e1a5 | ||
|
|
e6ea09f482 | ||
|
|
d620651ef6 | ||
|
|
9221f93be9 | ||
|
|
795ea49093 | ||
|
|
6827459b9f | ||
|
|
e424d47ce6 | ||
|
|
ca0e732331 | ||
|
|
8e52905ea9 | ||
|
|
5cc26bb640 | ||
|
|
fdf00c1be6 | ||
|
|
47258a7093 | ||
|
|
5112d077d5 | ||
|
|
b4e8a23da4 | ||
|
|
63e9a4ae68 | ||
|
|
7e96a9afda | ||
|
|
f5a225f1e0 | ||
|
|
6f4a3816a5 | ||
|
|
29363794c1 | ||
|
|
64a3a718e6 | ||
|
|
b01c28ebc6 | ||
|
|
f5d1aaf7d9 | ||
|
|
f6f45881da | ||
|
|
cd7468f3be | ||
|
|
cd93b9ae0b | ||
|
|
0ffaafd788 | ||
|
|
c6283d1b5a | ||
|
|
24527859e6 | ||
|
|
0c6c5718fe | ||
|
|
c4bbc18cb6 | ||
|
|
6e76759225 | ||
|
|
87ed2d4a21 | ||
|
|
74b3309225 | ||
|
|
1d741cbfc5 | ||
|
|
12420db4b9 | ||
|
|
aad6a7e262 | ||
|
|
b12b804f0a | ||
|
|
64d38ed17e | ||
|
|
f8d64a7378 | ||
|
|
b92a0d5126 | ||
|
|
e0372358df | ||
|
|
1bce6e3faf | ||
|
|
81dd281789 | ||
|
|
f7b38dc270 | ||
|
|
ec9819071a | ||
|
|
72edc3c4fe | ||
|
|
5657e8d1da | ||
|
|
0700e0cf94 | ||
|
|
1cd2db9f8c | ||
|
|
10d411c4f7 | ||
|
|
167b8b8eb8 | ||
|
|
74da03d9fa | ||
|
|
b8a58dad65 | ||
|
|
b012713cf2 | ||
|
|
82d914149e | ||
|
|
450f5e03a5 | ||
|
|
b04706b875 | ||
|
|
10b0438201 | ||
|
|
0270ace3d4 | ||
|
|
bbb27fa484 | ||
|
|
df15e7b379 | ||
|
|
df651ab98e | ||
|
|
dd7a3b37b0 | ||
|
|
94a623c00e | ||
|
|
6cb0f2d392 | ||
|
|
17e165382f | ||
|
|
733ba07312 | ||
|
|
46cd9ff9f5 | ||
|
|
66ed4f7328 | ||
|
|
3be6d84675 | ||
|
|
406e980fae | ||
|
|
211065565f | ||
|
|
d979ee5573 | ||
|
|
c843b53c30 | ||
|
|
5d280e4d25 | ||
|
|
f00d43aa09 | ||
|
|
2e68d3cb3c | ||
|
|
4d6f11b61f | ||
|
|
aac9ba6c1e | ||
|
|
d926a3b5da | ||
|
|
fa5753c579 | ||
|
|
3fa3b2d836 | ||
|
|
76041e84e8 | ||
|
|
19c6572926 | ||
|
|
2217fb8c58 | ||
|
|
50fcb3914d | ||
|
|
9a0c0886ce | ||
|
|
fc41cc9878 | ||
|
|
08b220a1fb | ||
|
|
7e3beaf822 | ||
|
|
d2150efc19 | ||
|
|
380146b75b | ||
|
|
2bf096cfc7 | ||
|
|
cb887dee81 | ||
|
|
2ee7d5eeb6 | ||
|
|
6d6158ff08 | ||
|
|
11126cf4ae | ||
|
|
bd00f46d8b | ||
|
|
d8482cc286 | ||
|
|
f7a4317990 | ||
|
|
a55f6498c8 | ||
|
|
81f4aa9a5d | ||
|
|
3c7c8926fb | ||
|
|
a7ed46160a | ||
|
|
a9b97c7a2b | ||
|
|
0780ad4ad9 | ||
|
|
bf9992b613 | ||
|
|
8c5e1ff0a0 | ||
|
|
b3044a6e2b | ||
|
|
6260dd1018 | ||
|
|
e47801074e | ||
|
|
6d42973d7c | ||
|
|
68e41f130c | ||
|
|
65b33a848e | ||
|
|
5bfb6df0e0 | ||
|
|
13061d1ec7 | ||
|
|
0143a4227e | ||
|
|
3f63bcde12 | ||
|
|
b86c6bba4e | ||
|
|
4d19fc0860 | ||
|
|
9c57c30e57 | ||
|
|
9969c4e810 | ||
|
|
e2bc5d80c9 | ||
|
|
ab191e2b58 | ||
|
|
d418a6e872 | ||
|
|
bdfd1aef62 | ||
|
|
ff2de0c715 | ||
|
|
5b78b1e548 | ||
|
|
d1f965ae30 | ||
|
|
434267898b | ||
|
|
a00510a73c | ||
|
|
846fd31121 | ||
|
|
ab4344a781 | ||
|
|
ac97fefb91 | ||
|
|
8d034f544c | ||
|
|
ca1d2c7000 | ||
|
|
0acf15c025 | ||
|
|
94eed9b43c | ||
|
|
8a6665c03f | ||
|
|
85ae6fffbb | ||
|
|
bd85a36cb1 | ||
|
|
a449e4b47c | ||
|
|
42602a3f35 | ||
|
|
50f902cb02 | ||
|
|
b014ac12ee | ||
|
|
610f24e0cd | ||
|
|
f45f7e56fd | ||
|
|
afe366d6b7 | ||
|
|
1daa059ef9 | ||
|
|
9777aa6165 | ||
|
|
143ec1a019 | ||
|
|
13ee9ff37b | ||
|
|
a3c846b73e | ||
|
|
3d05575e9d | ||
|
|
9d00b5e165 | ||
|
|
a29b39e17a | ||
|
|
8273679634 | ||
|
|
f8c1e953d4 | ||
|
|
532d953b5a | ||
|
|
ecfdafab06 | ||
|
|
9bc39987f1 | ||
|
|
601b444a60 | ||
|
|
4b0671205d | ||
|
|
db634f4c0b | ||
|
|
7d9efd7cff | ||
|
|
06ef2a72c5 | ||
|
|
5e8b6dd164 | ||
|
|
03c7d564d9 | ||
|
|
c3ec3f4bc8 | ||
|
|
7273e2e6f2 | ||
|
|
af770e042a | ||
|
|
07a1bffc60 | ||
|
|
183e79398d | ||
|
|
d98bedd6e1 | ||
|
|
461245c83d | ||
|
|
6fcbb7bdb0 | ||
|
|
2304d03b40 | ||
|
|
4e3213f3bd | ||
|
|
55fb249f6b | ||
|
|
4d614b3088 | ||
|
|
cad0a762a0 | ||
|
|
0ae5075cc9 | ||
|
|
3145a732f2 | ||
|
|
ceaf6fd67a | ||
|
|
c26fa33094 | ||
|
|
b199d7a9fe | ||
|
|
0e65d8e64e | ||
|
|
1e742aec04 | ||
|
|
ba1e4917d1 | ||
|
|
4ce61875a4 | ||
|
|
04963f12a3 | ||
|
|
5d4b6c41a8 | ||
|
|
5cb3a096c1 | ||
|
|
ddf438dac0 | ||
|
|
ed13924c5a | ||
|
|
5cc6f88ade | ||
|
|
32124a7913 | ||
|
|
08042089f9 | ||
|
|
53969ae054 | ||
|
|
16c424de2a | ||
|
|
9e2f8f664b | ||
|
|
343d8f87b4 | ||
|
|
374a0af084 | ||
|
|
9f2e6d6172 | ||
|
|
af647990ab | ||
|
|
9b2b1df7e2 | ||
|
|
abdef7c326 | ||
|
|
b312e48d31 | ||
|
|
48a075529a | ||
|
|
7f22211e4b | ||
|
|
a63c3c8e0b | ||
|
|
bba162c55b | ||
|
|
540ba6d6ae | ||
|
|
29e8ce68e4 | ||
|
|
5691253acd | ||
|
|
cd5c85a245 | ||
|
|
7e1d1c19e6 | ||
|
|
46cdb40800 | ||
|
|
e3c6101b93 | ||
|
|
448aeb9c55 | ||
|
|
5e55104aa6 | ||
|
|
03cd83dc82 | ||
|
|
0cebae8e23 | ||
|
|
38bbe7567a | ||
|
|
fc95e8401a | ||
|
|
b70f821a10 | ||
|
|
95bb21f3f5 | ||
|
|
94741d52ed | ||
|
|
1ac6da4a8b | ||
|
|
090c0a60fa | ||
|
|
e7ca9113bc | ||
|
|
924700f381 | ||
|
|
d280b968d7 | ||
|
|
1d8c7a74d6 | ||
|
|
c1dc77c69c | ||
|
|
3ecb5a20a5 | ||
|
|
0c1460062d | ||
|
|
c0cef8ca43 | ||
|
|
a3e20ab2d6 | ||
|
|
7a23eb69eb | ||
|
|
7da12dc324 | ||
|
|
ed9b43e2cc | ||
|
|
2cd56e43a8 | ||
|
|
91f6c4b740 | ||
|
|
c0249caef9 | ||
|
|
e0d0bc0966 | ||
|
|
24eb7d6bc9 | ||
|
|
48c10f9454 | ||
|
|
d9b208260e | ||
|
|
5dd16399b3 | ||
|
|
96014f8e94 | ||
|
|
5dd14f2ee2 | ||
|
|
ad2e0bc4e3 | ||
|
|
85c61fb684 | ||
|
|
2601a09a83 | ||
|
|
d318ef6df7 | ||
|
|
7ed19de44e | ||
|
|
72652ff16e | ||
|
|
4a12471918 | ||
|
|
32cbbdbf73 | ||
|
|
ab28707d71 | ||
|
|
42a7203b1e | ||
|
|
5259c50612 | ||
|
|
06a84def5f | ||
|
|
d7bda01ccb | ||
|
|
890b2453f8 | ||
|
|
df9e1669cf | ||
|
|
8b491a46f3 | ||
|
|
c698dc9784 | ||
|
|
77dd1e3d45 | ||
|
|
b3cb8d0f53 | ||
|
|
260fc43281 | ||
|
|
b4ef7bb3ed | ||
|
|
816313de30 | ||
|
|
bb7bdffada | ||
|
|
0647666c65 | ||
|
|
8255945ea7 | ||
|
|
2364595697 | ||
|
|
e442d754d0 | ||
|
|
6b510652ed | ||
|
|
9ea5a88f84 | ||
|
|
aa0adc98f9 | ||
|
|
fdd2401f7b | ||
|
|
6b820d91ae | ||
|
|
611ad26d1b | ||
|
|
0911b5b2e8 | ||
|
|
c660ff80bf | ||
|
|
ef098923d6 | ||
|
|
a4f7ffea3f | ||
|
|
c5deb9d557 | ||
|
|
3ff2ea8d4e | ||
|
|
85ecee3525 | ||
|
|
d09e5f37ab | ||
|
|
7edcd7aaf5 | ||
|
|
7def1364b5 | ||
|
|
3b588f0502 | ||
|
|
03c520798e | ||
|
|
c0fa6af51b | ||
|
|
a4d0c47fc6 | ||
|
|
5bf00e87cc | ||
|
|
014ddd76f4 | ||
|
|
6eb4bdcf0e | ||
|
|
b4e032d9c9 | ||
|
|
4ca39dfd1e | ||
|
|
15ef62747a | ||
|
|
ad6dcb478d | ||
|
|
0b7aa65dbf | ||
|
|
e484d4bbf4 | ||
|
|
e6ff9c6cd5 | ||
|
|
fad63b28d1 | ||
|
|
7a075e091d | ||
|
|
0df4708267 | ||
|
|
d5b4e4ba60 | ||
|
|
b717dc0742 | ||
|
|
6ad37267e4 | ||
|
|
22d4d1fb42 | ||
|
|
98b0543b26 | ||
|
|
c0512e720c | ||
|
|
0f6664b260 | ||
|
|
f76f99e789 | ||
|
|
e2d48f42cc | ||
|
|
ec138cae62 | ||
|
|
986b89f5ed | ||
|
|
d799011039 | ||
|
|
0faa1c886a | ||
|
|
cb839d0fe8 | ||
|
|
ec4079733e | ||
|
|
4743c9ab16 | ||
|
|
c4e5e743c4 | ||
|
|
1d23681efe | ||
|
|
56d49f2f4f | ||
|
|
ac54b7cdd1 | ||
|
|
efe2771a34 | ||
|
|
10c4ec74cc | ||
|
|
d90026646f | ||
|
|
9cd1344740 | ||
|
|
c6a9335bf2 | ||
|
|
6c87148cd4 | ||
|
|
3f6c46e1ec | ||
|
|
b3c13b7aef | ||
|
|
55cfd5c904 | ||
|
|
d38f2223a5 | ||
|
|
86145d5eb5 | ||
|
|
ef335d9fd7 | ||
|
|
d2810ddc95 | ||
|
|
037c43cd25 | ||
|
|
aa86c16838 | ||
|
|
ed16a84e0d | ||
|
|
530a60a52d | ||
|
|
48463681fb | ||
|
|
f5a8739b7c | ||
|
|
4471e2bdbb | ||
|
|
63552282d7 | ||
|
|
ae385b336d | ||
|
|
0db55007ab | ||
|
|
d545b197ea | ||
|
|
bbc6fa57fa | ||
|
|
120218f9c6 | ||
|
|
38ee6adcd2 | ||
|
|
1d8e6473c6 | ||
|
|
494704b479 | ||
|
|
d634b08969 | ||
|
|
350f91601c | ||
|
|
659e1cfe85 | ||
|
|
1943d89147 | ||
|
|
aa822756e7 | ||
|
|
073b1084b7 | ||
|
|
5cbe15b676 | ||
|
|
e2cff9febe | ||
|
|
e9ad786578 | ||
|
|
0692b4be61 | ||
|
|
6550d4f634 | ||
|
|
c523ae2c52 | ||
|
|
5e1ba3fbb7 | ||
|
|
6e8a298d21 | ||
|
|
815e9534b8 | ||
|
|
5390a8ea71 | ||
|
|
e34c52402f |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
pkg/
|
||||
tivan
|
||||
.vagrant
|
||||
/telegraf
|
||||
.idea
|
||||
*~
|
||||
*#
|
||||
|
||||
490
CHANGELOG.md
490
CHANGELOG.md
@@ -1,11 +1,493 @@
|
||||
## v0.1.2 [unreleased]
|
||||
## v0.10.5 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
- [#12](https://github.com/influxdb/influxdb/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdb/influxdb/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
|
||||
### Bugfixes
|
||||
- [#13](https://github.com/influxdb/influxdb/pull/13): Fix the packaging script.
|
||||
|
||||
## v0.10.4 [2016-02-24]
|
||||
|
||||
### Release Notes
|
||||
- The pass/drop parameters have been renamed to fielddrop/fieldpass parameters,
|
||||
to more accurately indicate their purpose.
|
||||
- There are also now namedrop/namepass parameters for passing/dropping based
|
||||
on the metric _name_.
|
||||
- Experimental windows builds now available.
|
||||
|
||||
### Features
|
||||
- [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene!
|
||||
- [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion!
|
||||
- [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel!
|
||||
- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF!
|
||||
- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath!
|
||||
|
||||
### Bugfixes
|
||||
- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode.
|
||||
- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters.
|
||||
|
||||
## v0.10.3 [2016-02-18]
|
||||
|
||||
### Release Notes
|
||||
- Users of the `exec` and `kafka_consumer` (and the new `nats_consumer`
|
||||
and `mqtt_consumer` plugins) can now specify the incoming data
|
||||
format that they would like to parse. Currently supports: "json", "influx", and
|
||||
"graphite"
|
||||
- Users of message broker and file output plugins can now choose what data format
|
||||
they would like to output. Currently supports: "influx" and "graphite"
|
||||
- More info on parsing _incoming_ data formats can be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md)
|
||||
- More info on serializing _outgoing_ data formats can be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
|
||||
- Telegraf now has an option `flush_buffer_when_full` that will flush the
|
||||
metric buffer whenever it fills up for each output, rather than dropping
|
||||
points and only flushing on a set time interval. This will default to `true`
|
||||
and is in the `[agent]` config section.
|
||||
|
||||
### Features
|
||||
- [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate!
|
||||
- [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs.
|
||||
- [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70!
|
||||
- [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen!
|
||||
- [#676](https://github.com/influxdata/telegraf/pull/676): MQTT consumer input plugin.
|
||||
- [#683](https://github.com/influxdata/telegraf/pull/683): PostGRES input plugin: add pg_stat_bgwriter. Thanks @menardorama!
|
||||
- [#679](https://github.com/influxdata/telegraf/pull/679): File/stdout output plugin.
|
||||
- [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats.
|
||||
- [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei!
|
||||
- [#650](https://github.com/influxdata/telegraf/pull/650): net_response input plugin. Thanks @titilambert!
|
||||
- [#699](https://github.com/influxdata/telegraf/pull/699): Flush based on buffer size rather than time.
|
||||
- [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes!
|
||||
|
||||
### Bugfixes
|
||||
- [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux.
|
||||
- [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug.
|
||||
- [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues.
|
||||
- [#394](https://github.com/influxdata/telegraf/issues/394): Support HTTP POST. Thanks @gabelev!
|
||||
- [#715](https://github.com/influxdata/telegraf/pull/715): Fix influxdb precision config panic. Thanks @netixen!
|
||||
|
||||
## v0.10.2 [2016-02-04]
|
||||
|
||||
### Release Notes
|
||||
- Statsd timing measurements are now aggregated into a single measurement with
|
||||
fields.
|
||||
- Graphite output now inserts tags into the bucket in alphabetical order.
|
||||
- Normalized TLS/SSL support for output plugins: MQTT, AMQP, Kafka
|
||||
- `verify_ssl` config option was removed from Kafka because it was actually
|
||||
doing the opposite of what it claimed to do (yikes). It's been replaced by
|
||||
`insecure_skip_verify`
|
||||
|
||||
### Features
|
||||
- [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse!
|
||||
- [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type.
|
||||
- [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch!
|
||||
- [#601](https://github.com/influxdata/telegraf/issues/601): Warn when overwriting cached metrics.
|
||||
- [#614](https://github.com/influxdata/telegraf/pull/614): PowerDNS input plugin. Thanks @Kasen!
|
||||
- [#617](https://github.com/influxdata/telegraf/pull/617): exec plugin: parse influx line protocol in addition to JSON.
|
||||
- [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support
|
||||
|
||||
### Bugfixes
|
||||
- [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements.
|
||||
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
|
||||
- [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong.
|
||||
- [#602](https://github.com/influxdata/telegraf/issues/602): Fix statsd field name templating.
|
||||
- [#612](https://github.com/influxdata/telegraf/pull/612): Docker input panic fix if stats received are nil.
|
||||
- [#634](https://github.com/influxdata/telegraf/pull/634): Properly set host headers in httpjson. Thanks @reginaldosousa!
|
||||
|
||||
## v0.10.1 [2016-01-27]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
|
||||
defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
|
||||
successful write to that output occurs.
|
||||
- The docker plugin has been significantly overhauled to add more metrics
|
||||
and allow for docker-machine (incl OSX) support.
|
||||
[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
|
||||
for the latest measurements, fields, and tags. There is also now support for
|
||||
specifying a docker endpoint to get metrics from.
|
||||
|
||||
### Features
|
||||
- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
|
||||
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
|
||||
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
|
||||
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
|
||||
- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
|
||||
- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
|
||||
- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
|
||||
- AMQP SSL support. Thanks @ekini!
|
||||
- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
|
||||
- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
|
||||
- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
|
||||
- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
|
||||
- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
|
||||
- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
|
||||
- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
|
||||
- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
|
||||
- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
|
||||
- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
|
||||
- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
|
||||
- [#589](https://github.com/influxdata/telegraf/pull/589): Microsoft SQL Server input plugin. Thanks @zensqlmonitor!
|
||||
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
|
||||
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
|
||||
|
||||
### Bugfixes
|
||||
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
|
||||
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
|
||||
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
|
||||
- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
|
||||
- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
|
||||
- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
|
||||
- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
|
||||
- [#585](https://github.com/influxdata/telegraf/pull/585): Log stack trace and continue on Telegraf panic. Thanks @wutaizeng!
|
||||
|
||||
## v0.10.0 [2016-01-12]
|
||||
|
||||
### Release Notes
|
||||
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
|
||||
and configuration files are in `/etc/telegraf`
|
||||
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
|
||||
`plugins` is too generic, as there are now also "output plugins", and will likely
|
||||
be "aggregator plugins" and "filter plugins" in the future. Additionally,
|
||||
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
|
||||
directory.
|
||||
- **breaking change** the `io` plugin has been renamed `diskio`
|
||||
- **breaking change** plugin measurements aggregated into a single measurement.
|
||||
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
|
||||
for configuration.
|
||||
- **breaking change** `twemproxy` plugin: `prefix` option removed.
|
||||
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_`
|
||||
instead of only `cpu_`
|
||||
- **breaking change** some command-line flags have been renamed to separate words.
|
||||
`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`,
|
||||
`-outputfilter` -> `-output-filter`
|
||||
- The prometheus plugin schema has not been changed (measurements have not been
|
||||
aggregated).
|
||||
|
||||
### Packaging change note:
|
||||
|
||||
RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their
|
||||
configurations overwritten by the upgrade. There is a backup stored at
|
||||
/etc/telegraf/telegraf.conf.$(date +%s).backup.
|
||||
|
||||
### Features
|
||||
- Plugin measurements aggregated into a single measurement.
|
||||
- Added ability to specify per-plugin tags
|
||||
- Added ability to specify per-plugin measurement suffix and prefix.
|
||||
(`name_prefix` and `name_suffix`)
|
||||
- Added ability to override base plugin measurement name. (`name_override`)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.2.5 [unreleased]
|
||||
|
||||
### Features
|
||||
- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
|
||||
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
|
||||
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
|
||||
|
||||
### Bugfixes
|
||||
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
|
||||
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
|
||||
|
||||
## v0.2.4 [2015-12-08]
|
||||
|
||||
### Features
|
||||
- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
|
||||
- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
|
||||
- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
|
||||
- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
|
||||
- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
|
||||
- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
|
||||
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
|
||||
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
|
||||
|
||||
### Bugfixes
|
||||
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
|
||||
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
|
||||
|
||||
## v0.2.3 [2015-11-30]
|
||||
|
||||
### Release Notes
|
||||
- **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`.
|
||||
and most of the config option names have changed.
|
||||
This only affects the kafka consumer _plugin_ (not the
|
||||
output). There were a number of problems with the kafka plugin that led to it
|
||||
only collecting data once at startup, so the kafka plugin was basically non-
|
||||
functional.
|
||||
- Plugins can now be specified as a list, and multiple plugin instances of the
|
||||
same type can be specified, like this:
|
||||
|
||||
```
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
```
|
||||
|
||||
- Riemann output added
|
||||
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
|
||||
|
||||
### Features
|
||||
- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
|
||||
- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
|
||||
- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
|
||||
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
|
||||
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
|
||||
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
|
||||
|
||||
## v0.2.2 [2015-11-18]
|
||||
|
||||
### Release Notes
|
||||
- 0.2.1 has a bug where all lists within plugins get duplicated, this includes
|
||||
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
|
||||
|
||||
### Bugfixes
|
||||
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
|
||||
|
||||
## v0.2.1 [2015-11-16]
|
||||
|
||||
### Release Notes
|
||||
- Telegraf will no longer use docker-compose for "long" unit test, it has been
|
||||
changed to just run docker commands in the Makefile. See `make docker-run` and
|
||||
`make docker-kill`. `make test` will still run all unit tests with docker.
|
||||
- Long unit tests are now run in CircleCI, with docker & race detector
|
||||
- Redis plugin tag has changed from `host` to `server`
|
||||
- HAProxy plugin tag has changed from `host` to `server`
|
||||
- UDP output now supported
|
||||
- Telegraf will now compile on FreeBSD
|
||||
- Users can now specify outputs as lists, specifying multiple outputs of the
|
||||
same type.
|
||||
|
||||
### Features
|
||||
- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
|
||||
- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
|
||||
- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
|
||||
- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
|
||||
- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
|
||||
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
|
||||
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
|
||||
- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
|
||||
|
||||
## v0.2.0 [2015-10-27]
|
||||
|
||||
### Release Notes
|
||||
- The -test flag will now only output 2 collections for plugins that need it
|
||||
- There is a new agent configuration option: `flush_interval`. This option tells
|
||||
Telegraf how often to flush data to InfluxDB and other output sinks. For example,
|
||||
users can set `interval = "2s"` and `flush_interval = "60s"` for Telegraf to
|
||||
collect data every 2 seconds, and flush every 60 seconds.
|
||||
- `precision` and `utc` are no longer valid agent config values. `precision` has
|
||||
moved to the `influxdb` output config, where it will continue to default to "s"
|
||||
- debug and test output will now print the raw line-protocol string
|
||||
- Telegraf will now, by default, round the collection interval to the nearest
|
||||
even interval. This means that `interval="10s"` will collect every :00, :10, etc.
|
||||
To ease scale concerns, flushing will be "jittered" by a random amount so that
|
||||
all Telegraf instances do not flush at the same time. Both of these options can
|
||||
be controlled via the `round_interval` and `flush_jitter` config options.
|
||||
- Telegraf will now retry metric flushes twice
|
||||
|
||||
### Features
|
||||
- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
|
||||
- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
|
||||
- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
|
||||
- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
|
||||
- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
|
||||
- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
|
||||
- Memory plugin: cached and buffered measurements re-added
|
||||
- Logging: additional logging for each collection interval, track the number
|
||||
of metrics collected and from how many inputs.
|
||||
- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
|
||||
- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
|
||||
- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
|
||||
- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
|
||||
- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
|
||||
- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
|
||||
- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
|
||||
- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
|
||||
- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
|
||||
- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
|
||||
- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
|
||||
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
|
||||
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
|
||||
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
|
||||
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
|
||||
- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
|
||||
- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
|
||||
- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
|
||||
- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
|
||||
- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
|
||||
|
||||
## v0.1.9 [2015-09-22]
|
||||
|
||||
### Release Notes
|
||||
- InfluxDB output config change: `url` is now `urls`, and is a list. Config files
|
||||
will still be backwards compatible if only `url` is specified.
|
||||
- The -test flag will now output two metric collections
|
||||
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
|
||||
allow filtering of output sinks on the command-line using the `-outputfilter`
|
||||
flag, much like how the `-filter` flag works for inputs.
|
||||
- Support for filtering on config-file creation -- Telegraf now supports
|
||||
filtering to -sample-config command. You can now run
|
||||
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
|
||||
file with only the cpu plugin defined, and the influxdb output defined.
|
||||
- **Breaking Change**: The CPU collection plugin has been refactored to fix some
|
||||
bugs and outdated dependency issues. At the same time, I also decided to fix
|
||||
a naming consistency issue, so cpu_percentageIdle will become cpu_usage_idle.
|
||||
Also, all CPU time measurements now have it indicated in their name, so cpu_idle
|
||||
will become cpu_time_idle. Additionally, cpu_time measurements are going to be
|
||||
dropped in the default config.
|
||||
- **Breaking Change**: The memory plugin has been refactored and some measurements
|
||||
have been renamed for consistency. Some measurements have also been removed from being outputted. They are still being collected by gopsutil, and could easily be
|
||||
re-added in a "verbose" mode if there is demand for it.
|
||||
|
||||
### Features
|
||||
- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
|
||||
- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
|
||||
- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
|
||||
- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
|
||||
- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
|
||||
- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
|
||||
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
|
||||
and filtering when specifying a config file.
|
||||
|
||||
### Bugfixes
|
||||
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
|
||||
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
|
||||
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
|
||||
- Fix net plugin on darwin
|
||||
- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
|
||||
- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
|
||||
- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
|
||||
- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
|
||||
- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
|
||||
- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
|
||||
|
||||
## v0.1.8 [2015-09-04]
|
||||
|
||||
### Release Notes
|
||||
- Telegraf will now write data in UTC at second precision by default
|
||||
- Now using Go 1.5 to build telegraf
|
||||
|
||||
### Features
|
||||
- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
|
||||
- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
|
||||
- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
|
||||
- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
|
||||
- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
|
||||
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
|
||||
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.1.7 [2015-08-28]
|
||||
|
||||
### Features
|
||||
- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
|
||||
- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
|
||||
- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
|
||||
- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
|
||||
- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
|
||||
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
|
||||
- Indent the toml config file for readability
|
||||
|
||||
### Bugfixes
|
||||
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
|
||||
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
|
||||
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
|
||||
- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
|
||||
|
||||
## v0.1.6 [2015-08-20]
|
||||
|
||||
### Features
|
||||
- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
|
||||
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
|
||||
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
|
||||
|
||||
### Bugfixes
|
||||
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
|
||||
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
|
||||
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
|
||||
- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
|
||||
|
||||
## v0.1.5 [2015-08-13]
|
||||
|
||||
### Features
|
||||
- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
|
||||
- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
|
||||
- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
|
||||
- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
|
||||
- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
|
||||
- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
|
||||
- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
|
||||
- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
|
||||
- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
|
||||
- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
|
||||
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
|
||||
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
|
||||
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
|
||||
- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
|
||||
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
|
||||
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
|
||||
|
||||
### Bugfixes
|
||||
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
|
||||
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
|
||||
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
|
||||
- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
|
||||
- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
|
||||
|
||||
## v0.1.4 [2015-07-09]
|
||||
|
||||
### Features
|
||||
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
|
||||
|
||||
### Bugfixes
|
||||
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
|
||||
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
|
||||
|
||||
## v0.1.3 [2015-07-05]
|
||||
|
||||
### Features
|
||||
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
|
||||
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
|
||||
|
||||
### Bugfixes
|
||||
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
|
||||
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
|
||||
|
||||
## v0.1.2 [2015-07-01]
|
||||
|
||||
### Features
|
||||
- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
|
||||
- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
|
||||
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
|
||||
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
|
||||
|
||||
### Bugfixes
|
||||
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
|
||||
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
|
||||
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
|
||||
- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
|
||||
- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
|
||||
|
||||
## v0.1.1 [2015-06-19]
|
||||
|
||||
|
||||
300
CONTRIBUTING.md
Normal file
300
CONTRIBUTING.md
Normal file
@@ -0,0 +1,300 @@
|
||||
## Steps for Contributing:
|
||||
|
||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||
1. Make changes or write plugin (see below for details)
|
||||
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
||||
1. If your plugin requires a new Go package,
|
||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
||||
Output plugins READMEs are less structured,
|
||||
but any information you can provide on how the data will look is appreciated.
|
||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||
for a good example.
|
||||
|
||||
## GoDoc
|
||||
|
||||
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
|
||||
on the GoDoc
|
||||
|
||||
[](https://godoc.org/github.com/influxdata/telegraf)
|
||||
|
||||
## Sign the CLA
|
||||
|
||||
Before we can merge a pull request, you will need to sign the CLA,
|
||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||
|
||||
## Adding a dependency
|
||||
|
||||
Assuming you can already build the project, run these in the telegraf directory:
|
||||
|
||||
1. `go get github.com/sparrc/gdm`
|
||||
1. `gdm restore`
|
||||
1. `gdm save`
|
||||
|
||||
## Input Plugins
|
||||
|
||||
This section is for developers who want to create new collection inputs.
|
||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered and makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new inputs.
|
||||
|
||||
### Input Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `telegraf.Input` interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the
|
||||
current host.
|
||||
|
||||
### Input Plugin Example
|
||||
|
||||
```go
|
||||
package simple
|
||||
|
||||
// simple.go
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo plugin"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc inputs.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
||||
}
|
||||
```
|
||||
|
||||
## Input Plugins Accepting Arbitrary Data Formats
|
||||
|
||||
Some input plugins (such as
|
||||
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
|
||||
accept arbitrary input data formats. An overview of these data formats can
|
||||
be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||
|
||||
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
|
||||
function on the plugin object (see the exec plugin for an example), as well as
|
||||
defining `parser` as a field of the object.
|
||||
|
||||
You can then utilize the parser internally in your plugin, parsing data as you
|
||||
see fit. Telegraf's configuration layer will take care of instantiating and
|
||||
creating the `Parser` object.
|
||||
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
Below is the `Parser` interface.
|
||||
|
||||
```go
|
||||
// Parser is an interface defining functions that a parser plugin must satisfy.
|
||||
type Parser interface {
|
||||
// Parse takes a byte buffer separated by newlines
|
||||
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
|
||||
// and parses it into telegraf metrics
|
||||
Parse(buf []byte) ([]telegraf.Metric, error)
|
||||
|
||||
// ParseLine takes a single string metric
|
||||
// ie, "cpu.usage.idle 90"
|
||||
// and parses it into a telegraf metric.
|
||||
ParseLine(line string) (telegraf.Metric, error)
|
||||
}
|
||||
```
|
||||
|
||||
And you can view the code
|
||||
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
|
||||
|
||||
## Service Input Plugins
|
||||
|
||||
This section is for developers who want to create new "service" collection
|
||||
inputs. A service plugin differs from a regular plugin in that it operates
|
||||
a background service while Telegraf is running. One example would be the `statsd`
|
||||
plugin, which operates a statsd server.
|
||||
|
||||
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
||||
will require threads and locks to verify data integrity. Service Input Plugins should
|
||||
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||
|
||||
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||
and `Stop()` methods.
|
||||
|
||||
### Service Plugin Guidelines
|
||||
|
||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||
`inputs.ServiceInput` interface.
|
||||
|
||||
## Output Plugins
|
||||
|
||||
This section is for developers who want to create a new output sink. Outputs
|
||||
are created in a similar manner as collection plugins, and their interface has
|
||||
similar constructs.
|
||||
|
||||
### Output Plugin Guidelines
|
||||
|
||||
* An output must conform to the `outputs.Output` interface.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
output can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this output does.
|
||||
|
||||
### Output Example
|
||||
|
||||
```go
|
||||
package simpleoutput
|
||||
|
||||
// simpleoutput.go
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo output"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "url = localhost"
|
||||
}
|
||||
|
||||
func (s *Simple) Connect() error {
|
||||
// Make a connection to the URL here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Close() error {
|
||||
// Close connection to the URL here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||
for _, pt := range points {
|
||||
// write `pt` to the output sink here
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Output Plugins Writing Arbitrary Data Formats
|
||||
|
||||
Some output plugins (such as
|
||||
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
|
||||
can write arbitrary output data formats. An overview of these data formats can
|
||||
be found
|
||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
|
||||
|
||||
In order to enable this, you must specify a
|
||||
`SetSerializer(serializer serializers.Serializer)`
|
||||
function on the plugin object (see the file plugin for an example), as well as
|
||||
defining `serializer` as a field of the object.
|
||||
|
||||
You can then utilize the serializer internally in your plugin, serializing data
|
||||
before it's written. Telegraf's configuration layer will take care of
|
||||
instantiating and creating the `Serializer` object.
|
||||
|
||||
You should also add the following to your SampleConfig() return:
|
||||
|
||||
```toml
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Service Output Plugins
|
||||
|
||||
This section is for developers who want to create new "service" output. A
|
||||
service output differs from a regular output in that it operates a background service
|
||||
while Telegraf is running. One example would be the `prometheus_client` output,
|
||||
which operates an HTTP server.
|
||||
|
||||
Their interface is quite similar to a regular output, with the addition of `Start()`
|
||||
and `Stop()` methods.
|
||||
|
||||
### Service Output Guidelines
|
||||
|
||||
* Same as the `Output` guidelines, except that they must conform to the
|
||||
`output.ServiceOutput` interface.
|
||||
|
||||
## Unit Tests
|
||||
|
||||
### Execute short tests
|
||||
|
||||
execute `make test-short`
|
||||
|
||||
### Execute long tests
|
||||
|
||||
As Telegraf collects metrics from several third-party services it becomes a
|
||||
difficult task to mock each service as some of them have complicated protocols
|
||||
which would take some time to replicate.
|
||||
|
||||
To overcome this situation we've decided to use docker containers to provide a
|
||||
fast and reproducible environment to test those services which require it.
|
||||
For other situations
|
||||
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
|
||||
a simple mock will suffice.
|
||||
|
||||
To execute Telegraf tests follow these simple steps:
|
||||
|
||||
- Install docker following [these](https://docs.docker.com/installation/)
|
||||
instructions
|
||||
- execute `make test`
|
||||
|
||||
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||
get the IP address.
|
||||
|
||||
### Unit test troubleshooting
|
||||
|
||||
Try cleaning up your test environment by executing `make docker-kill` and
|
||||
re-running
|
||||
53
Godeps
Normal file
53
Godeps
Normal file
@@ -0,0 +1,53 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb
|
||||
56
Godeps_windows
Normal file
56
Godeps_windows
Normal file
@@ -0,0 +1,56 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||
github.com/aws/aws-sdk-go 87b1e60a50b09e4812dee560b33a238f67305804
|
||||
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||
github.com/dancannon/gorethink 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||
github.com/fsouza/go-dockerclient 7b651349f9479f5114913eefbfd3c4eeddd79ab4
|
||||
github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3
|
||||
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
|
||||
github.com/go-sql-driver/mysql 7c7f556282622f94213bc028b4d0a7b6151ba239
|
||||
github.com/golang/protobuf 6aaa8d47701fa6cf07e914ec01fde3d4a1fe79c3
|
||||
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 14ca1097bbe21584194c15e391a9dab95ad42a59
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil e77438504d45b9985c99a75730fe65220ceea00e
|
||||
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
|
||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||
github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18
|
||||
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/net 04b9de9b512f58addf28c9853d50ebef61c3953e
|
||||
golang.org/x/text 6d3c22c4525a4da167968fa2479be5524d2e8bd0
|
||||
gopkg.in/dancannon/gorethink.v1 6f088135ff288deb9d5546f4c71919207f891a70
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64
|
||||
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||
108
Makefile
Normal file
108
Makefile
Normal file
@@ -0,0 +1,108 @@
|
||||
UNAME := $(shell sh -c 'uname')
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||
endif
|
||||
|
||||
# Standard Telegraf build
|
||||
default: prepare build
|
||||
|
||||
# Windows build
|
||||
windows: prepare-windows build-windows
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go install -ldflags "-X main.Version=$(VERSION)" ./...
|
||||
|
||||
build-windows:
|
||||
go build -o telegraf.exe -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go -o telegraf -ldflags \
|
||||
"-X main.Version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# Build with race detector
|
||||
dev: prepare
|
||||
go build -race -ldflags "-X main.Version=$(VERSION)" ./...
|
||||
|
||||
# run package script
|
||||
package:
|
||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
||||
|
||||
# Get dependencies and use gdm to checkout changesets
|
||||
prepare:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore
|
||||
|
||||
# Use the windows godeps file to prepare dependencies
|
||||
prepare-windows:
|
||||
go get github.com/sparrc/gdm
|
||||
gdm restore -f Godeps_windows
|
||||
|
||||
# Run all docker containers necessary for unit tests
|
||||
docker-run:
|
||||
ifeq ($(UNAME), Darwin)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
ifeq ($(UNAME), Linux)
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
endif
|
||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||
docker run --name memcached -p "11211:11211" -d memcached
|
||||
docker run --name postgres -p "5432:5432" -d postgres
|
||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name redis -p "6379:6379" -d redis
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Run docker containers necessary for CircleCI unit tests
|
||||
docker-run-circle:
|
||||
docker run --name kafka \
|
||||
-e ADVERTISED_HOST=localhost \
|
||||
-e ADVERTISED_PORT=9092 \
|
||||
-p "2181:2181" -p "9092:9092" \
|
||||
-d spotify/kafka
|
||||
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||
docker run --name snmp -p "31161:31161/udp" -d titilambert/snmpsim
|
||||
|
||||
# Kill all docker containers, ignore errors
|
||||
docker-kill:
|
||||
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann snmp
|
||||
|
||||
# Run full unit tests using docker containers (includes setup and teardown)
|
||||
test: vet docker-kill docker-run
|
||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||
sleep 60
|
||||
# SUCCESS, running tests
|
||||
go test -race ./...
|
||||
|
||||
# Run "short" unit tests
|
||||
test-short: vet
|
||||
go test -short ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
.PHONY: test test-short vet build default
|
||||
331
README.md
331
README.md
@@ -1,22 +1,88 @@
|
||||
# Telegraf - A native agent for InfluxDB
|
||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf)
|
||||
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's running on or from other services and writing them into InfluxDB.
|
||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||
running on, or from other services, and writing them into InfluxDB or other
|
||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
||||
|
||||
Design goals are to have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics from well known services (like Hadoop, or Postgres, or Redis) and third party APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
Design goals are to have a minimal memory footprint with a plugin system so
|
||||
that developers in the community can easily add support for collecting metrics
|
||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||
|
||||
We'll eagerly accept pull requests for new plugins and will manage the set of plugins that Telegraf supports. See the bottom of this doc for instructions on writing new plugins.
|
||||
New input and output plugins are designed to be easy to contribute,
|
||||
we'll eagerly accept pull
|
||||
requests and will manage the set of plugins that Telegraf supports.
|
||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
||||
new plugins.
|
||||
|
||||
## Quickstart
|
||||
## Installation:
|
||||
|
||||
* Build from source or download telegraf:
|
||||
NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions
|
||||
of telegraf, both in the database layout and the configuration file. 0.2.x
|
||||
will continue to be supported, see below for download links.
|
||||
|
||||
### Linux packages for Debian/Ubuntu and RHEL/CentOS:
|
||||
For more details on the differences between Telegraf 0.2.x and 0.10.x, see
|
||||
the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/).
|
||||
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_arm.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.arm.rpm
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
||||
* On sysv systems, the telegraf daemon can be controlled via
|
||||
`service telegraf [action]`
|
||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||
controlled via `systemctl [action] telegraf`
|
||||
|
||||
### yum/apt Repositories:
|
||||
|
||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
||||
[here](https://docs.influxdata.com/influxdb/v0.9/introduction/installation/#installation)
|
||||
for instructions, replacing the `influxdb` package name with `telegraf`.
|
||||
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_i386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_arm.tar.gz
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
||||
|
||||
##### tarball Instructions:
|
||||
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
http://get.influxdb.org/telegraf/telegraf_0.1.1_amd64.deb
|
||||
http://get.influxdb.org/telegraf/telegraf-0.1.1-1.x86_64.rpm
|
||||
sudo tar -C / -zxvf ./telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-0.10.4-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
Ansible role: https://github.com/rossmcdonald/telegraf
|
||||
|
||||
### OSX via Homebrew:
|
||||
|
||||
```
|
||||
@@ -24,152 +90,161 @@ brew update
|
||||
brew install telegraf
|
||||
```
|
||||
|
||||
### How to use it:
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
* Run `telegraf -sample-config > telegraf.toml` to create an initial configuration
|
||||
* Edit the configuration to match your needs
|
||||
* Run `telegraf -config telegraf.toml -test` to output one full measurement sample to STDOUT
|
||||
* Run `telegraf -config telegraf.toml` to gather and send metrics to InfluxDB
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_amd64.zip
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_i386.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
## Telegraf Options
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
which gets installed via the Makefile
|
||||
if you don't have it already. You also must build with golang version 1.5+.
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the config. If you don't see an `agent` section run `telegraf -sample-config > telegraf.toml` to create a valid initial configuration:
|
||||
1. [Install Go](https://golang.org/doc/install)
|
||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||
3. Run `go get github.com/influxdata/telegraf`
|
||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||
5. Run `make`
|
||||
|
||||
* **hostname**: The hostname is passed as a tag. By default this will be the value retured by `hostname` on the machine running Telegraf. You can override that value here.
|
||||
* **interval**: How ofter to gather metrics. Uses a simple number + unit parser, ie "10s" for 10 seconds or "5m" for 5 minutes.
|
||||
* **debug**: Set to true to gather and send metrics to STDOUT as well as InfluxDB.
|
||||
## How to use it:
|
||||
|
||||
## Supported Plugins
|
||||
```console
|
||||
$ telegraf -help
|
||||
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Telegraf currently has support for collecting metrics from:
|
||||
Usage:
|
||||
|
||||
* System (memory, CPU, network, etc.)
|
||||
* Docker
|
||||
* MySQL
|
||||
* PostgreSQL
|
||||
* Redis
|
||||
telegraf <flags>
|
||||
|
||||
We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API.
|
||||
The flags are:
|
||||
|
||||
## Plugin Options
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
There are 3 configuration options that are configurable per plugin:
|
||||
Examples:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the current plugin. Each string in the array is tested as a prefix against metrics and if it matches, the metric is emitted.
|
||||
* **drop**: The inverse of pass, if a metric matches, it is not emitted.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single global interval, but if one particular plugin should be run less or more often, you can configure that here.
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
## Plugins
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
This section is for developers that want to create new collection plugins. Telegraf is entirely plugin driven. This interface allows for operators to
|
||||
pick and chose what is gathered as well as makes it easy for developers
|
||||
to create new ways of generating metrics.
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
Plugin authorship is kept as simple as possible to promote people to develop
|
||||
and submit new plugins.
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
## Guidelines
|
||||
|
||||
* A plugin must conform to the `plugins.Plugin` interface.
|
||||
* Telegraf promises to run each plugin's Gather function serially. This means
|
||||
developers don't have to worry about thread safety within these functions.
|
||||
* Each generated metric automatically has the name of the plugin that generated
|
||||
it prepended. This is to keep plugins honest.
|
||||
* Plugins should call `plugins.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdb/telegraf/plugins/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this plugin does.
|
||||
|
||||
### Plugin interface
|
||||
|
||||
```go
|
||||
type Plugin interface {
|
||||
SampleConfig() string
|
||||
Description() string
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type Accumulator interface {
|
||||
Add(measurement string, value interface{}, tags map[string]string)
|
||||
AddValuesWithTime(measurement string, values map[string]interface{}, tags map[string]string, timestamp time.Time)
|
||||
}
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
|
||||
### Accumulator
|
||||
## Configuration
|
||||
|
||||
The way that a plugin emits metrics is by interacting with the Accumulator.
|
||||
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
The `Add` function takes 3 arguments:
|
||||
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
|
||||
* **value**: A value for the metric. This accepts 5 different types of value:
|
||||
* **int**: The most common type. All int types are accepted but favor using `int64`
|
||||
Useful for counters, etc.
|
||||
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
||||
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
|
||||
* **string**: Typically used to indicate a message, or some kind of freeform information.
|
||||
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
|
||||
* **tags**: This is a map of strings to strings to describe the where or who about the metric. For instance, the `net` plugin adds a tag named `"interface"` set to the name of the network interface, like `"eth0"`.
|
||||
## Supported Input Plugins
|
||||
|
||||
The `AddValuesWithTime` allows multiple values for a point to be passed. The values
|
||||
used are the same type profile as **value** above. The **timestamp** argument
|
||||
allows a point to be registered as having occurred at an arbitrary time.
|
||||
Telegraf currently has support for collecting metrics from many sources. For
|
||||
more information on each, please look at the directory of the same name in
|
||||
`plugins/inputs`.
|
||||
|
||||
Let's say you've written a plugin that emits metrics about processes on the current host.
|
||||
Currently implemented sources:
|
||||
|
||||
```go
|
||||
* aerospike
|
||||
* apache
|
||||
* bcache
|
||||
* couchdb
|
||||
* disque
|
||||
* dns query time
|
||||
* docker
|
||||
* dovecot
|
||||
* elasticsearch
|
||||
* exec (generic executable plugin, support JSON, influx and graphite)
|
||||
* haproxy
|
||||
* httpjson (generic JSON-emitting http service plugin)
|
||||
* influxdb
|
||||
* jolokia
|
||||
* leofs
|
||||
* lustre2
|
||||
* mailchimp
|
||||
* memcached
|
||||
* mesos
|
||||
* mongodb
|
||||
* mysql
|
||||
* net_response
|
||||
* nginx
|
||||
* nsq
|
||||
* phpfpm
|
||||
* phusion passenger
|
||||
* ping
|
||||
* postgresql
|
||||
* powerdns
|
||||
* procstat
|
||||
* prometheus
|
||||
* puppetagent
|
||||
* rabbitmq
|
||||
* raindrops
|
||||
* redis
|
||||
* rethinkdb
|
||||
* riak
|
||||
* sensors (only available if built from source)
|
||||
* snmp
|
||||
* sql server (microsoft)
|
||||
* twemproxy
|
||||
* zfs
|
||||
* zookeeper
|
||||
* win_perf_counters (windows performance counters)
|
||||
* system
|
||||
* cpu
|
||||
* mem
|
||||
* net
|
||||
* netstat
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
|
||||
type Process struct {
|
||||
CPUTime float64
|
||||
MemoryBytes int64
|
||||
PID int
|
||||
}
|
||||
Telegraf can also collect metrics via the following service plugins:
|
||||
|
||||
func Gather(acc plugins.Accumulator) error {
|
||||
for _, process := range system.Processes() {
|
||||
tags := map[string]string {
|
||||
"pid": fmt.Sprintf("%d", process.Pid),
|
||||
}
|
||||
* statsd
|
||||
* mqtt_consumer
|
||||
* kafka_consumer
|
||||
* nats_consumer
|
||||
* github_webhooks
|
||||
|
||||
acc.Add("cpu", process.CPUTime, tags)
|
||||
acc.Add("memoory", process.MemoryBytes, tags)
|
||||
}
|
||||
}
|
||||
```
|
||||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
### Example
|
||||
## Supported Output Plugins
|
||||
|
||||
```go
|
||||
* influxdb
|
||||
* amon
|
||||
* amqp
|
||||
* aws kinesis
|
||||
* aws cloudwatch
|
||||
* datadog
|
||||
* graphite
|
||||
* kafka
|
||||
* librato
|
||||
* mqtt
|
||||
* nsq
|
||||
* opentsdb
|
||||
* prometheus
|
||||
* riemann
|
||||
|
||||
// simple.go
|
||||
|
||||
import "github.com/influxdb/telegraf/plugins"
|
||||
|
||||
type Simple struct {
|
||||
Ok bool
|
||||
}
|
||||
|
||||
func (s *Simple) Description() string {
|
||||
return "a demo plugin"
|
||||
}
|
||||
|
||||
func (s *Simple) SampleConfig() string {
|
||||
return "ok = true # indicate if everything is fine"
|
||||
}
|
||||
|
||||
func (s *Simple) Gather(acc plugins.Accumulator) error {
|
||||
if s.Ok {
|
||||
acc.Add("state", "pretty good", nil)
|
||||
} else {
|
||||
acc.Add("state", "not great", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("simple", func() plugins.Plugin { &Simple{} })
|
||||
}
|
||||
```
|
||||
## Contributing
|
||||
|
||||
Please see the
|
||||
[contributing guide](CONTRIBUTING.md)
|
||||
for details on contributing a plugin to Telegraf.
|
||||
|
||||
122
Vagrantfile
vendored
122
Vagrantfile
vendored
@@ -1,122 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# All Vagrant configuration is done here. The most common configuration
|
||||
# options are documented and commented below. For a complete reference,
|
||||
# please see the online documentation at vagrantup.com.
|
||||
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
|
||||
# Disable automatic box update checking. If you disable this, then
|
||||
# boxes will only be checked for updates when the user runs
|
||||
# `vagrant box outdated`. This is not recommended.
|
||||
# config.vm.box_check_update = false
|
||||
|
||||
# Create a forwarded port mapping which allows access to a specific port
|
||||
# within the machine from a port on the host machine. In the example below,
|
||||
# accessing "localhost:8080" will access port 80 on the guest machine.
|
||||
# config.vm.network "forwarded_port", guest: 80, host: 8080
|
||||
|
||||
# Create a private network, which allows host-only access to the machine
|
||||
# using a specific IP.
|
||||
# config.vm.network "private_network", ip: "192.168.33.10"
|
||||
|
||||
# Create a public network, which generally matched to bridged network.
|
||||
# Bridged networks make the machine appear as another physical device on
|
||||
# your network.
|
||||
# config.vm.network "public_network"
|
||||
|
||||
# If true, then any SSH connections made will enable agent forwarding.
|
||||
# Default value: false
|
||||
# config.ssh.forward_agent = true
|
||||
|
||||
# Share an additional folder to the guest VM. The first argument is
|
||||
# the path on the host to the actual folder. The second argument is
|
||||
# the path on the guest to mount the folder. And the optional third
|
||||
# argument is a set of non-required options.
|
||||
config.vm.synced_folder "~/go", "/home/vagrant/go"
|
||||
|
||||
# Provider-specific configuration so you can fine-tune various
|
||||
# backing providers for Vagrant. These expose provider-specific options.
|
||||
# Example for VirtualBox:
|
||||
#
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
# # Don't boot with headless mode
|
||||
# vb.gui = true
|
||||
#
|
||||
# # Use VBoxManage to customize the VM. For example to change memory:
|
||||
vb.customize ["modifyvm", :id, "--memory", "1024"]
|
||||
end
|
||||
#
|
||||
# View the documentation for the provider you're using for more
|
||||
# information on available options.
|
||||
|
||||
# Enable provisioning with CFEngine. CFEngine Community packages are
|
||||
# automatically installed. For example, configure the host as a
|
||||
# policy server and optionally a policy file to run:
|
||||
#
|
||||
# config.vm.provision "cfengine" do |cf|
|
||||
# cf.am_policy_hub = true
|
||||
# # cf.run_file = "motd.cf"
|
||||
# end
|
||||
#
|
||||
# You can also configure and bootstrap a client to an existing
|
||||
# policy server:
|
||||
#
|
||||
# config.vm.provision "cfengine" do |cf|
|
||||
# cf.policy_server_address = "10.0.2.15"
|
||||
# end
|
||||
|
||||
# Enable provisioning with Puppet stand alone. Puppet manifests
|
||||
# are contained in a directory path relative to this Vagrantfile.
|
||||
# You will need to create the manifests directory and a manifest in
|
||||
# the file default.pp in the manifests_path directory.
|
||||
#
|
||||
# config.vm.provision "puppet" do |puppet|
|
||||
# puppet.manifests_path = "manifests"
|
||||
# puppet.manifest_file = "site.pp"
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef solo, specifying a cookbooks path, roles
|
||||
# path, and data_bags path (all relative to this Vagrantfile), and adding
|
||||
# some recipes and/or roles.
|
||||
#
|
||||
# config.vm.provision "chef_solo" do |chef|
|
||||
# chef.cookbooks_path = "../my-recipes/cookbooks"
|
||||
# chef.roles_path = "../my-recipes/roles"
|
||||
# chef.data_bags_path = "../my-recipes/data_bags"
|
||||
# chef.add_recipe "mysql"
|
||||
# chef.add_role "web"
|
||||
#
|
||||
# # You may also specify custom JSON attributes:
|
||||
# chef.json = { mysql_password: "foo" }
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef server, specifying the chef server URL,
|
||||
# and the path to the validation key (relative to this Vagrantfile).
|
||||
#
|
||||
# The Opscode Platform uses HTTPS. Substitute your organization for
|
||||
# ORGNAME in the URL and validation key.
|
||||
#
|
||||
# If you have your own Chef Server, use the appropriate URL, which may be
|
||||
# HTTP instead of HTTPS depending on your configuration. Also change the
|
||||
# validation key to validation.pem.
|
||||
#
|
||||
# config.vm.provision "chef_client" do |chef|
|
||||
# chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
|
||||
# chef.validation_key_path = "ORGNAME-validator.pem"
|
||||
# end
|
||||
#
|
||||
# If you're using the Opscode platform, your validator client is
|
||||
# ORGNAME-validator, replacing ORGNAME with your organization name.
|
||||
#
|
||||
# If you have your own Chef Server, the default validation client name is
|
||||
# chef-validator, unless you changed the configuration.
|
||||
#
|
||||
# chef.validation_client_name = "ORGNAME-validator"
|
||||
end
|
||||
109
accumulator.go
109
accumulator.go
@@ -1,100 +1,21 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
import "time"
|
||||
|
||||
"github.com/influxdb/influxdb/client"
|
||||
)
|
||||
type Accumulator interface {
|
||||
// Create a point with a value, decorating it with tags
|
||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||
// it after passing to Add.
|
||||
Add(measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
type BatchPoints struct {
|
||||
mu sync.Mutex
|
||||
AddFields(measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
client.BatchPoints
|
||||
|
||||
Debug bool
|
||||
|
||||
Prefix string
|
||||
|
||||
Config *ConfiguredPlugin
|
||||
}
|
||||
|
||||
func (bp *BatchPoints) Add(measurement string, val interface{}, tags map[string]string) {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
|
||||
measurement = bp.Prefix + measurement
|
||||
|
||||
if bp.Config != nil {
|
||||
if !bp.Config.ShouldPass(measurement) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if bp.Debug {
|
||||
var tg []string
|
||||
|
||||
for k, v := range tags {
|
||||
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tg)
|
||||
|
||||
fmt.Printf("> [%s] %s value=%v\n", strings.Join(tg, " "), measurement, val)
|
||||
}
|
||||
|
||||
bp.Points = append(bp.Points, client.Point{
|
||||
Measurement: measurement,
|
||||
Tags: tags,
|
||||
Fields: map[string]interface{}{
|
||||
"value": val,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BatchPoints) AddValuesWithTime(
|
||||
measurement string,
|
||||
values map[string]interface{},
|
||||
tags map[string]string,
|
||||
timestamp time.Time,
|
||||
) {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
|
||||
measurement = bp.Prefix + measurement
|
||||
|
||||
if bp.Config != nil {
|
||||
if !bp.Config.ShouldPass(measurement) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if bp.Debug {
|
||||
var tg []string
|
||||
|
||||
for k, v := range tags {
|
||||
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
|
||||
}
|
||||
|
||||
var vals []string
|
||||
|
||||
for k, v := range values {
|
||||
vals = append(vals, fmt.Sprintf("%s=%v", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tg)
|
||||
sort.Strings(vals)
|
||||
|
||||
fmt.Printf("> [%s] %s %s\n", strings.Join(tg, " "), measurement, strings.Join(vals, " "))
|
||||
}
|
||||
|
||||
bp.Points = append(bp.Points, client.Point{
|
||||
Measurement: measurement,
|
||||
Tags: tags,
|
||||
Fields: values,
|
||||
Time: timestamp,
|
||||
})
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
}
|
||||
|
||||
292
agent.go
292
agent.go
@@ -1,292 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type runningPlugin struct {
|
||||
name string
|
||||
plugin plugins.Plugin
|
||||
config *ConfiguredPlugin
|
||||
}
|
||||
|
||||
type Agent struct {
|
||||
Interval Duration
|
||||
Debug bool
|
||||
Hostname string
|
||||
|
||||
Config *Config
|
||||
|
||||
plugins []*runningPlugin
|
||||
|
||||
conn *client.Client
|
||||
}
|
||||
|
||||
func NewAgent(config *Config) (*Agent, error) {
|
||||
agent := &Agent{Config: config, Interval: Duration{10 * time.Second}}
|
||||
|
||||
err := config.ApplyAgent(agent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agent.Hostname = hostname
|
||||
}
|
||||
|
||||
if config.Tags == nil {
|
||||
config.Tags = map[string]string{}
|
||||
}
|
||||
|
||||
config.Tags["host"] = agent.Hostname
|
||||
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
func (agent *Agent) Connect() error {
|
||||
config := agent.Config
|
||||
|
||||
u, err := url.Parse(config.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := client.NewClient(client.Config{
|
||||
URL: *u,
|
||||
Username: config.Username,
|
||||
Password: config.Password,
|
||||
UserAgent: config.UserAgent,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
agent.conn = c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) LoadPlugins() ([]string, error) {
|
||||
var names []string
|
||||
|
||||
for _, name := range a.Config.PluginsDeclared() {
|
||||
creator, ok := plugins.Plugins[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Undefined but requested plugin: %s", name)
|
||||
}
|
||||
|
||||
plugin := creator()
|
||||
|
||||
config, err := a.Config.ApplyPlugin(name, plugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.plugins = append(a.plugins, &runningPlugin{name, plugin, config})
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (a *Agent) crankParallel() error {
|
||||
points := make(chan *BatchPoints, len(a.plugins))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
if plugin.config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(plugin *runningPlugin) {
|
||||
defer wg.Done()
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Debug = a.Debug
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
|
||||
plugin.plugin.Gather(&acc)
|
||||
|
||||
points <- &acc
|
||||
}(plugin)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
close(points)
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
for sub := range points {
|
||||
acc.Points = append(acc.Points, sub.Points...)
|
||||
}
|
||||
|
||||
_, err := a.conn.Write(acc.BatchPoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Agent) crank() error {
|
||||
var acc BatchPoints
|
||||
|
||||
acc.Debug = a.Debug
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
_, err := a.conn.Write(acc.BatchPoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) error {
|
||||
ticker := time.NewTicker(plugin.config.Interval)
|
||||
|
||||
for {
|
||||
var acc BatchPoints
|
||||
|
||||
acc.Debug = a.Debug
|
||||
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.Tags = a.Config.Tags
|
||||
acc.Time = time.Now()
|
||||
acc.Database = a.Config.Database
|
||||
|
||||
a.conn.Write(acc.BatchPoints)
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) TestAllPlugins() error {
|
||||
var names []string
|
||||
|
||||
for name, _ := range plugins.Plugins {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
var acc BatchPoints
|
||||
acc.Debug = true
|
||||
|
||||
fmt.Printf("* Testing all plugins with default configuration\n")
|
||||
|
||||
for _, name := range names {
|
||||
plugin := plugins.Plugins[name]()
|
||||
|
||||
fmt.Printf("* Plugin: %s\n", name)
|
||||
|
||||
acc.Prefix = name + "_"
|
||||
err := plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) Test() error {
|
||||
var acc BatchPoints
|
||||
|
||||
acc.Debug = true
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
acc.Prefix = plugin.name + "_"
|
||||
acc.Config = plugin.config
|
||||
|
||||
fmt.Printf("* Plugin: %s\n", plugin.name)
|
||||
if plugin.config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", plugin.config.Interval)
|
||||
}
|
||||
|
||||
err := plugin.plugin.Gather(&acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
if a.conn == nil {
|
||||
err := a.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, plugin := range a.plugins {
|
||||
if plugin.config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(plugin *runningPlugin) {
|
||||
defer wg.Done()
|
||||
a.crankSeparate(shutdown, plugin)
|
||||
}(plugin)
|
||||
}
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
ticker := time.NewTicker(a.Interval.Duration)
|
||||
|
||||
for {
|
||||
err := a.crankParallel()
|
||||
if err != nil {
|
||||
log.Printf("Error in plugins: %s", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
172
agent/accumulator.go
Normal file
172
agent/accumulator.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *internal_models.InputConfig,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
acc := accumulator{}
|
||||
acc.metrics = metrics
|
||||
acc.inputConfig = inputConfig
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
|
||||
inputConfig *internal_models.InputConfig
|
||||
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (ac *accumulator) Add(
|
||||
measurement string,
|
||||
value interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
fields["value"] = value
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
ac.AddFields(measurement, fields, tags, t...)
|
||||
}
|
||||
|
||||
func (ac *accumulator) AddFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldNamePass(measurement) {
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.inputConfig != nil {
|
||||
if !ac.inputConfig.Filter.ShouldFieldsPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
|
||||
if ac.prefix != "" {
|
||||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
}
|
||||
if ac.debug {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
ac.metrics <- m
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) addDefaultTag(key, value string) {
|
||||
ac.defaultTags[key] = value
|
||||
}
|
||||
387
agent/agent.go
Normal file
387
agent/agent.go
Normal file
@@ -0,0 +1,387 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
|
||||
// Agent runs telegraf and collects data based on the given config
|
||||
type Agent struct {
|
||||
Config *config.Config
|
||||
}
|
||||
|
||||
// NewAgent returns an Agent struct based off the given Config
|
||||
func NewAgent(config *config.Config) (*Agent, error) {
|
||||
a := &Agent{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
if a.Config.Agent.Hostname == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.Config.Agent.Hostname = hostname
|
||||
}
|
||||
|
||||
config.Tags["host"] = a.Config.Agent.Hostname
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Connect connects to all configured outputs
|
||||
func (a *Agent) Connect() error {
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.Quiet = a.Config.Agent.Quiet
|
||||
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||
}
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s, "+
|
||||
"error was '%s' \n", o.Name, err)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the connection to all configured outputs
|
||||
func (a *Agent) Close() error {
|
||||
var err error
|
||||
for _, o := range a.Config.Outputs {
|
||||
err = o.Output.Close()
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
ot.Stop()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func panicRecover(input *internal_models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name, err, trace)
|
||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
}
|
||||
|
||||
// gatherParallel runs the inputs that are using the same reporting interval
|
||||
// as the telegraf agent.
|
||||
func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start := time.Now()
|
||||
counter := 0
|
||||
jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds()
|
||||
for _, input := range a.Config.Inputs {
|
||||
if input.Config.Interval != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
counter++
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer panicRecover(input)
|
||||
defer wg.Done()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if jitter != 0 {
|
||||
nanoSleep := rand.Int63n(jitter)
|
||||
d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep))
|
||||
if err != nil {
|
||||
log.Printf("Jittering collection interval failed for plugin %s",
|
||||
input.Name)
|
||||
} else {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
}(input)
|
||||
}
|
||||
|
||||
if counter == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
|
||||
a.Config.Agent.Interval.Duration, counter, elapsed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherSeparate runs the inputs that have been configured with their own
|
||||
// reporting interval.
|
||||
func (a *Agent) gatherSeparate(
|
||||
shutdown chan struct{},
|
||||
input *internal_models.RunningInput,
|
||||
metricC chan telegraf.Metric,
|
||||
) error {
|
||||
defer panicRecover(input)
|
||||
|
||||
ticker := time.NewTicker(input.Config.Interval)
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
start := time.Now()
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
log.Printf("Error in input [%s]: %s", input.Name, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if !a.Config.Agent.Quiet {
|
||||
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||
input.Config.Interval, input.Name, elapsed)
|
||||
}
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
||||
// Config struct
|
||||
func (a *Agent) Test() error {
|
||||
shutdown := make(chan struct{})
|
||||
defer close(shutdown)
|
||||
metricC := make(chan telegraf.Metric)
|
||||
|
||||
// dummy receiver for the point channel
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-metricC:
|
||||
// do nothing
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(true)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name {
|
||||
case "cpu", "mongodb", "procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// flush writes a list of metrics to all configured outputs
|
||||
func (a *Agent) flush() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(a.Config.Outputs))
|
||||
for _, o := range a.Config.Outputs {
|
||||
go func(output *internal_models.RunningOutput) {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// flusher monitors the metrics input channel and flushes on the minimum interval
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached metrics before shutdown")
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
a.flush()
|
||||
case m := <-metricC:
|
||||
for _, o := range a.Config.Outputs {
|
||||
o.AddMetric(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// jitterInterval applies the the interval jitter to the flush interval using
|
||||
// crypto/rand number generator
|
||||
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||
var jitter int64
|
||||
outinterval := ininterval
|
||||
if injitter.Nanoseconds() != 0 {
|
||||
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||
if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil {
|
||||
jitter = j.Int64()
|
||||
}
|
||||
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||
}
|
||||
|
||||
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
||||
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
||||
outinterval = time.Duration(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
return outinterval
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
a.Config.Agent.FlushInterval.Duration = jitterInterval(
|
||||
a.Config.Agent.FlushInterval.Duration,
|
||||
a.Config.Agent.FlushJitter.Duration)
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 10000)
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Start service of any ServicePlugins
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Special handling for inputs that have their own collection interval
|
||||
// configured. Default intervals are handled below with gatherParallel
|
||||
if input.Config.Interval != 0 {
|
||||
wg.Add(1)
|
||||
go func(input *internal_models.RunningInput) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input)
|
||||
}
|
||||
}
|
||||
|
||||
defer wg.Wait()
|
||||
|
||||
for {
|
||||
if err := a.gatherParallel(metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
175
agent/agent_test.go
Normal file
175
agent/agent_test.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
// needing to load the plugins
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
// needing to load the outputs
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
func TestAgent_LoadPlugin(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.InputFilters = []string{"mysql"}
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "redis"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||
}
|
||||
|
||||
func TestAgent_LoadOutput(t *testing.T) {
|
||||
c := config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb"}
|
||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ := NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"kafka"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(c.Outputs))
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||
assert.NoError(t, err)
|
||||
a, _ = NewAgent(c)
|
||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||
}
|
||||
|
||||
func TestAgent_ZeroJitter(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval.Nanoseconds()
|
||||
exp := time.Duration(10 * time.Second).Nanoseconds()
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroInterval(t *testing.T) {
|
||||
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
||||
max := time.Duration(5 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(5*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_ZeroBoth(t *testing.T) {
|
||||
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||
time.Duration(0*time.Second))
|
||||
|
||||
actual := flushinterval
|
||||
exp := time.Duration(500 * time.Millisecond)
|
||||
|
||||
if actual != exp {
|
||||
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMax(t *testing.T) {
|
||||
max := time.Duration(32 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual > max {
|
||||
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgent_JitterMin(t *testing.T) {
|
||||
min := time.Duration(30 * time.Second).Nanoseconds()
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||
time.Duration(2*time.Second))
|
||||
actual := flushinterval.Nanoseconds()
|
||||
if actual < min {
|
||||
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
/*
|
||||
func TestAgent_DrivesMetrics(t *testing.T) {
|
||||
var (
|
||||
plugin plugins.MockPlugin
|
||||
)
|
||||
|
||||
defer plugin.AssertExpectations(t)
|
||||
defer metrics.AssertExpectations(t)
|
||||
|
||||
a := &Agent{
|
||||
plugins: []plugins.Plugin{&plugin},
|
||||
Config: &Config{},
|
||||
}
|
||||
|
||||
plugin.On("Add", "foo", 1.2, nil).Return(nil)
|
||||
plugin.On("Add", "bar", 888, nil).Return(nil)
|
||||
|
||||
err := a.crank()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAgent_AppliesTags(t *testing.T) {
|
||||
var (
|
||||
plugin plugins.MockPlugin
|
||||
metrics MockMetrics
|
||||
)
|
||||
|
||||
defer plugin.AssertExpectations(t)
|
||||
defer metrics.AssertExpectations(t)
|
||||
|
||||
a := &Agent{
|
||||
plugins: []plugins.Plugin{&plugin},
|
||||
metrics: &metrics,
|
||||
Config: &Config{
|
||||
Tags: map[string]string{
|
||||
"dc": "us-west-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m1 := cypress.Metric()
|
||||
m1.Add("name", "foo")
|
||||
m1.Add("value", 1.2)
|
||||
|
||||
msgs := []*cypress.Message{m1}
|
||||
|
||||
m2 := cypress.Metric()
|
||||
m2.Timestamp = m1.Timestamp
|
||||
m2.Add("name", "foo")
|
||||
m2.Add("value", 1.2)
|
||||
m2.AddTag("dc", "us-west-1")
|
||||
|
||||
plugin.On("Read").Return(msgs, nil)
|
||||
metrics.On("Receive", m2).Return(nil)
|
||||
|
||||
err := a.crank()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
*/
|
||||
21
circle.yml
Normal file
21
circle.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.5.3 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.5.3.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- docker info
|
||||
post:
|
||||
- gem install fpm
|
||||
- sudo apt-get install -y rpm python-boto
|
||||
|
||||
test:
|
||||
override:
|
||||
- bash scripts/circle-test.sh
|
||||
@@ -7,115 +7,235 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdb/telegraf"
|
||||
_ "github.com/influxdb/telegraf/plugins/all"
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout")
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
var fConfig = flag.String("config", "", "configuration file to load")
|
||||
var fConfigDirectory = flag.String("config-directory", "",
|
||||
"directory containing additional *.conf files")
|
||||
var fVersion = flag.Bool("version", false, "display the version")
|
||||
var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration")
|
||||
var fSampleConfig = flag.Bool("sample-config", false,
|
||||
"print out full sample configuration")
|
||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||
var fInputFilters = flag.String("input-filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
|
||||
var Version = "unreleased"
|
||||
var Commit = ""
|
||||
var fInputFiltersLegacy = flag.String("filter", "",
|
||||
"filter the inputs to enable, separator is :")
|
||||
var fOutputFiltersLegacy = flag.String("outputfilter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
|
||||
"directory containing additional *.conf files")
|
||||
|
||||
// Telegraf version
|
||||
// -ldflags "-X main.Version=`git describe --always --tags`"
|
||||
var Version string
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
`
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
reload := make(chan bool, 1)
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
|
||||
if *fVersion {
|
||||
fmt.Printf("InfluxDB Telegraf agent - Version %s\n", Version)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
telegraf.PrintSampleConfig()
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
config *telegraf.Config
|
||||
err error
|
||||
)
|
||||
|
||||
if *fConfig != "" {
|
||||
config, err = telegraf.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if flag.NFlag() == 0 {
|
||||
usageExit(0)
|
||||
}
|
||||
} else {
|
||||
config = telegraf.DefaultConfig()
|
||||
}
|
||||
|
||||
ag, err := telegraf.NewAgent(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var inputFilters []string
|
||||
if *fInputFiltersLegacy != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFiltersLegacy)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
if *fInputFilters != "" {
|
||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Debug = true
|
||||
}
|
||||
var outputFilters []string
|
||||
if *fOutputFiltersLegacy != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFiltersLegacy)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
if *fOutputFilters != "" {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
|
||||
plugins, err := ag.LoadPlugins()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *fVersion {
|
||||
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||
fmt.Println(v)
|
||||
return
|
||||
}
|
||||
|
||||
if *fSampleConfig {
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
return
|
||||
}
|
||||
|
||||
if *fUsage != "" {
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
c *config.Config
|
||||
err error
|
||||
)
|
||||
|
||||
if *fTest {
|
||||
if *fConfig != "" {
|
||||
err = ag.Test()
|
||||
c = config.NewConfig()
|
||||
c.OutputFilters = outputFilters
|
||||
c.InputFilters = inputFilters
|
||||
err = c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
err = ag.TestAllPlugins()
|
||||
fmt.Println("You must specify a config file. See telegraf --help")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *fConfigDirectoryLegacy != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectoryLegacy)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
|
||||
signals := make(chan os.Signal)
|
||||
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-signals
|
||||
close(shutdown)
|
||||
}()
|
||||
|
||||
log.Print("InfluxDB Agent running")
|
||||
log.Printf("Loaded plugins: %s", strings.Join(plugins, " "))
|
||||
if ag.Debug {
|
||||
log.Printf("Debug: enabled")
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v\n",
|
||||
ag.Interval, ag.Debug, ag.Hostname)
|
||||
}
|
||||
|
||||
if config.URL != "" {
|
||||
log.Printf("Sending metrics to: %s", config.URL)
|
||||
log.Printf("Tags enabled: %v", config.ListTags())
|
||||
}
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
if *fQuiet {
|
||||
ag.Config.Agent.Quiet = true
|
||||
}
|
||||
|
||||
f.Close()
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
signals := make(chan os.Signal)
|
||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||
go func() {
|
||||
sig := <-signals
|
||||
if sig == os.Interrupt {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
ag.Run(shutdown)
|
||||
}
|
||||
|
||||
func usageExit(rc int) {
|
||||
fmt.Println(usage)
|
||||
os.Exit(rc)
|
||||
}
|
||||
|
||||
302
config.go
302
config.go
@@ -1,302 +0,0 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/naoina/toml"
|
||||
"github.com/naoina/toml/ast"
|
||||
)
|
||||
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
URL string
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
UserAgent string
|
||||
Tags map[string]string
|
||||
|
||||
agent *ast.Table
|
||||
plugins map[string]*ast.Table
|
||||
}
|
||||
|
||||
func (c *Config) Plugins() map[string]*ast.Table {
|
||||
return c.plugins
|
||||
}
|
||||
|
||||
type ConfiguredPlugin struct {
|
||||
Name string
|
||||
|
||||
Drop []string
|
||||
Pass []string
|
||||
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (cp *ConfiguredPlugin) ShouldPass(measurement string) bool {
|
||||
if cp.Pass != nil {
|
||||
for _, pat := range cp.Pass {
|
||||
if strings.HasPrefix(measurement, pat) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if cp.Drop != nil {
|
||||
for _, pat := range cp.Drop {
|
||||
if strings.HasPrefix(measurement, pat) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Config) ApplyAgent(v interface{}) error {
|
||||
if c.agent != nil {
|
||||
return toml.UnmarshalTable(c.agent, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) ApplyPlugin(name string, v interface{}) (*ConfiguredPlugin, error) {
|
||||
cp := &ConfiguredPlugin{Name: name}
|
||||
|
||||
if tbl, ok := c.plugins[name]; ok {
|
||||
|
||||
if node, ok := tbl.Fields["pass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
cp.Pass = append(cp.Pass, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["drop"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
cp.Drop = append(cp.Drop, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp.Interval = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "drop")
|
||||
delete(tbl.Fields, "pass")
|
||||
delete(tbl.Fields, "interval")
|
||||
return cp, toml.UnmarshalTable(tbl, v)
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func (c *Config) PluginsDeclared() []string {
|
||||
var plugins []string
|
||||
|
||||
for name, _ := range c.plugins {
|
||||
plugins = append(plugins, name)
|
||||
}
|
||||
|
||||
sort.Strings(plugins)
|
||||
|
||||
return plugins
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
var ErrInvalidConfig = errors.New("invalid configuration")
|
||||
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tbl, err := toml.Parse(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Config{
|
||||
plugins: make(map[string]*ast.Table),
|
||||
}
|
||||
|
||||
for name, val := range tbl.Fields {
|
||||
subtbl, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return nil, ErrInvalidConfig
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "influxdb":
|
||||
err := toml.UnmarshalTable(subtbl, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "agent":
|
||||
c.agent = subtbl
|
||||
default:
|
||||
c.plugins[name] = subtbl
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Config) ListTags() string {
|
||||
var tags []string
|
||||
|
||||
for k, v := range c.Tags {
|
||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
type hasConfig interface {
|
||||
BasicConfig() string
|
||||
}
|
||||
|
||||
type hasDescr interface {
|
||||
Description() string
|
||||
}
|
||||
|
||||
var header = `# Telegraf configuration
|
||||
|
||||
# If this file is missing an [agent] section, you must first generate a
|
||||
# valid config with 'telegraf -sample-config > telegraf.toml'
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared plugins.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[influxdb]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
url = "http://localhost:8086" # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# tags = { "dc": "us-east-1" }
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
|
||||
# [influxdb.tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf itself
|
||||
# [agent]
|
||||
# interval = "10s"
|
||||
# debug = false
|
||||
# hostname = "prod3241"
|
||||
|
||||
# PLUGINS
|
||||
|
||||
`
|
||||
|
||||
func PrintSampleConfig() {
|
||||
fmt.Printf(header)
|
||||
|
||||
var names []string
|
||||
|
||||
for name, _ := range plugins.Plugins {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
creator := plugins.Plugins[name]
|
||||
|
||||
plugin := creator()
|
||||
|
||||
fmt.Printf("# %s\n[%s]\n", plugin.Description(), name)
|
||||
|
||||
var config string
|
||||
|
||||
config = strings.TrimSpace(plugin.SampleConfig())
|
||||
|
||||
if config == "" {
|
||||
fmt.Printf(" # no configuration\n\n")
|
||||
} else {
|
||||
fmt.Printf("\n")
|
||||
lines := strings.Split(config, "\n")
|
||||
for _, line := range lines {
|
||||
fmt.Printf("%s\n", line)
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
236
docs/CONFIGURATION.md
Normal file
236
docs/CONFIGURATION.md
Normal file
@@ -0,0 +1,236 @@
|
||||
# Telegraf Configuration
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be generated using the -sample-config flag:
|
||||
`telegraf -sample-config > telegraf.conf`
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
-input-filter and -output-filter flags:
|
||||
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
|
||||
|
||||
## `[global_tags]` Configuration
|
||||
|
||||
Global tags can be specific in the `[global_tags]` section of the config file in
|
||||
key="value" format. All metrics being gathered on this host will be tagged
|
||||
with the tags specified here.
|
||||
|
||||
## `[agent]` Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
config.
|
||||
|
||||
* **interval**: Default data collection interval for all inputs
|
||||
* **round_interval**: Rounds collection interval to 'interval'
|
||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||
for each output, and will flush this buffer on a successful write.
|
||||
* **collection_jitter**: Collection jitter is used to jitter
|
||||
the collection by a random amount.
|
||||
Each plugin will sleep for a random time within jitter before collecting.
|
||||
This can be used to avoid many plugins querying things like sysfs at the
|
||||
same time, which can have a measurable effect on the system.
|
||||
* **flush_interval**: Default data flushing interval for all outputs.
|
||||
You should not set this below
|
||||
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
* **flush_jitter**: Jitter the flush interval by a random amount.
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode.
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
|
||||
## `[inputs.xxx]` Configuration
|
||||
|
||||
There are some configuration options that are configurable per input:
|
||||
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
#### Input Filters
|
||||
|
||||
There are also filters that can be configured per input:
|
||||
|
||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against
|
||||
measurement names and if it matches, the field is emitted.
|
||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted.
|
||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current input. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
||||
emitted. This is tested on measurements that have passed the tagpass test.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
fields which begin with `time_`.
|
||||
|
||||
```toml
|
||||
[global_tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# INPUTS
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
drop = ["time_*"]
|
||||
```
|
||||
|
||||
#### Input Config: tagpass and tagdrop
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[inputs.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[inputs.disk]]
|
||||
[inputs.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
#### Input Config: fieldpass and fielddrop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
fielddrop = ["usage_guest", "usage_steal"]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[inputs.disk]]
|
||||
fieldpass = ["inodes*"]
|
||||
```
|
||||
|
||||
#### Input Config: namepass and namedrop
|
||||
|
||||
```toml
|
||||
# Drop all metrics about containers for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namedrop = ["container_"]
|
||||
|
||||
# Only store rest client related metrics for kubelet
|
||||
[[inputs.prometheus]]
|
||||
urls = ["http://kube-node-1:4194/metrics"]
|
||||
namepass = ["rest_client_"]
|
||||
```
|
||||
|
||||
#### Input config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_suffix = "_total"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
This will emit measurements with the name `foobar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
name_override = "foobar"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
#### Input config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
[inputs.cpu.tags]
|
||||
tag1 = "foo"
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
#### Multiple inputs of the same type
|
||||
|
||||
Additional inputs (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file. It is highly recommended that
|
||||
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
||||
to avoid measurement collisions:
|
||||
|
||||
```toml
|
||||
[[inputs.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[inputs.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
name_override = "percpu_usage"
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## `[outputs.xxx]` Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as inputs
|
||||
(namepass, namedrop, tagpass, tagdrop)
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
namedrop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
namepass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
274
docs/DATA_FORMATS_INPUT.md
Normal file
274
docs/DATA_FORMATS_INPUT.md
Normal file
@@ -0,0 +1,274 @@
|
||||
# Telegraf Input Data Formats
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
These four parts are easily defined when using InfluxDB line-protocol as a
|
||||
data format. But there are other data formats that users may want to use which
|
||||
require more advanced configuration to create usable Telegraf metrics.
|
||||
|
||||
Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now,
|
||||
these plugins were statically configured to parse just a single
|
||||
data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only
|
||||
supported data in InfluxDB line-protocol.
|
||||
|
||||
But now we are normalizing the parsing of various data formats across all
|
||||
plugins that can support it. You will be able to identify a plugin that supports
|
||||
different data formats by the presence of a `data_format` config option, for
|
||||
example, in the exec plugin:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
## Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are parsed directly into Telegraf metrics.
|
||||
|
||||
#### Influx Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## JSON:
|
||||
|
||||
The JSON data format flattens JSON into metric _fields_. For example, this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Would get translated into _fields_ of a measurement:
|
||||
|
||||
```
|
||||
myjsonmetric a=5,b_c=6
|
||||
```
|
||||
|
||||
The _measurement_ _name_ is usually the name of the plugin,
|
||||
but can be overridden using the `name_override` config option.
|
||||
|
||||
#### JSON Configuration:
|
||||
|
||||
The JSON data format supports specifying "tag keys". If specified, keys
|
||||
will be searched for in the root-level of the JSON blob. If the key(s) exist,
|
||||
they will be applied as tags to the Telegraf metrics.
|
||||
|
||||
For example, if you had this configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "json"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
tag_keys = [
|
||||
"my_tag_1",
|
||||
"my_tag_2"
|
||||
]
|
||||
```
|
||||
|
||||
with this JSON output from a command:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 5,
|
||||
"b": {
|
||||
"c": 6
|
||||
},
|
||||
"my_tag_1": "foo"
|
||||
}
|
||||
```
|
||||
|
||||
Your Telegraf metrics would get tagged with "my_tag_1"
|
||||
|
||||
```
|
||||
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||
```
|
||||
|
||||
## Graphite:
|
||||
|
||||
The Graphite data format translates graphite _dot_ buckets directly into
|
||||
telegraf measurement names, with a single value field, and without any tags. For
|
||||
more advanced options, Telegraf supports specifying "templates" to translate
|
||||
graphite buckets into Telegraf metrics.
|
||||
|
||||
#### Separator:
|
||||
|
||||
You can specify a separator to use for the parsed metrics.
|
||||
By default, it will leave the metrics with a "." separator.
|
||||
Setting `separator = "_"` will translate:
|
||||
|
||||
```
|
||||
cpu.usage.idle 99
|
||||
=> cpu_usage_idle value=99
|
||||
```
|
||||
|
||||
#### Measurement/Tag Templates:
|
||||
|
||||
The most basic template is to specify a single transformation to apply to all
|
||||
incoming metrics. _measurement_ is a special keyword that tells Telegraf which
|
||||
parts of the graphite bucket to combine into the measurement name. It can have a
|
||||
trailing `*` to indicate that the remainder of the metric should be used.
|
||||
Other words are considered tag keys. So the following template:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"region.measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
us-west.cpu.load 100
|
||||
=> cpu.load,region=us-west value=100
|
||||
```
|
||||
|
||||
#### Field Templates:
|
||||
|
||||
There is also a _field_ keyword, which can only be specified once.
|
||||
The field keyword tells Telegraf to give the metric that field name.
|
||||
So the following template:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"measurement.measurement.field.region"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.us-west 100
|
||||
=> cpu_usage,region=us-west idle=100
|
||||
```
|
||||
|
||||
#### Filter Templates:
|
||||
|
||||
Users can also filter the template(s) to use based on the name of the bucket,
|
||||
using glob matching, like so:
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"cpu.* measurement.measurement.region",
|
||||
"mem.* measurement.measurement.host"
|
||||
]
|
||||
```
|
||||
|
||||
which would result in the following transformation:
|
||||
|
||||
```
|
||||
cpu.load.us-west 100
|
||||
=> cpu_load,region=us-west value=100
|
||||
|
||||
mem.cached.localhost 256
|
||||
=> mem_cached,host=localhost value=256
|
||||
```
|
||||
|
||||
#### Adding Tags:
|
||||
|
||||
Additional tags can be added to a metric that don't exist on the received metric.
|
||||
You can add additional tags by specifying them after the pattern.
|
||||
Tags have the same format as the line protocol.
|
||||
Multiple tags are separated by commas.
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"measurement.measurement.field.region datacenter=1a"
|
||||
]
|
||||
```
|
||||
|
||||
would result in the following Graphite -> Telegraf transformation.
|
||||
|
||||
```
|
||||
cpu.usage.idle.us-west 100
|
||||
=> cpu_usage,region=us-west,datacenter=1a idle=100
|
||||
```
|
||||
|
||||
There are many more options available,
|
||||
[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
|
||||
|
||||
#### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
[[inputs.exec]]
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite" (line-protocol)
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "graphite"
|
||||
|
||||
## This string will be used to join the matched values.
|
||||
separator = "_"
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. There can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
```
|
||||
97
docs/DATA_FORMATS_OUTPUT.md
Normal file
97
docs/DATA_FORMATS_OUTPUT.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# Telegraf Output Data Formats
|
||||
|
||||
Telegraf metrics, like InfluxDB
|
||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||
are a combination of four basic parts:
|
||||
|
||||
1. Measurement Name
|
||||
1. Tags
|
||||
1. Fields
|
||||
1. Timestamp
|
||||
|
||||
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
||||
|
||||
```
|
||||
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
||||
```
|
||||
|
||||
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
||||
InfluxDB line protocol was originally the only available output format. But now
|
||||
we are normalizing telegraf metric "serializers" into a
|
||||
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
||||
across all output plugins that can support it.
|
||||
You will be able to identify a plugin that supports different data formats
|
||||
by the presence of a `data_format`
|
||||
config option, for example, in the `file` output plugin:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout"]
|
||||
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## Additional configuration options go here
|
||||
```
|
||||
|
||||
Each data_format has an additional set of configuration options available, which
|
||||
I'll go over below.
|
||||
|
||||
## Influx:
|
||||
|
||||
There are no additional configuration options for InfluxDB line-protocol. The
|
||||
metrics are serialized directly into InfluxDB line-protocol.
|
||||
|
||||
#### Influx Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Graphite:
|
||||
|
||||
The Graphite data format translates Telegraf metrics into _dot_ buckets.
|
||||
The format is:
|
||||
|
||||
```
|
||||
[prefix].[host tag].[all tags (alphabetical)].[measurement name].[field name] value timestamp
|
||||
```
|
||||
|
||||
Which means the following influx metric -> graphite conversion would happen:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||
=>
|
||||
tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||
```
|
||||
|
||||
`prefix` is a configuration option when using the graphite output data format.
|
||||
|
||||
#### Graphite Configuration:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
## Files to write to, "stdout" is a specially handled file.
|
||||
files = ["stdout", "/tmp/metrics.out"]
|
||||
|
||||
## Data format to output. This can be "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
prefix = "telegraf"
|
||||
```
|
||||
33
docs/LICENSE_OF_DEPENDENCIES.md
Normal file
33
docs/LICENSE_OF_DEPENDENCIES.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# List
|
||||
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||
- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)).
|
||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
|
||||
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# If this file is missing an [agent] section, you must first generate a
|
||||
# valid config with 'telegraf -sample-config > telegraf.toml'
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared plugins.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[influxdb]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
url = "http://localhost:8086" # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# tags = { "dc": "us-east-1" }
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
|
||||
# [influxdb.tags]
|
||||
# dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf itself
|
||||
# [agent]
|
||||
# interval = "10s"
|
||||
# debug = false
|
||||
# hostname = "prod3241"
|
||||
|
||||
# PLUGINS
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[cpu]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[disk]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about docker containers
|
||||
[docker]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[io]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about memory usage
|
||||
[mem]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[mysql]
|
||||
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g. root:root@http://10.0.0.18/?tls=false
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[net]
|
||||
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
#
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[postgresql]
|
||||
|
||||
# specify servers via an array of tables
|
||||
[[postgresql.servers]]
|
||||
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=...
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
#
|
||||
|
||||
address = "sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[redis]
|
||||
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:18832, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[swap]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load
|
||||
[system]
|
||||
# no configuration
|
||||
|
||||
11
etc/logrotate.d/telegraf
Normal file
11
etc/logrotate.d/telegraf
Normal file
@@ -0,0 +1,11 @@
|
||||
/var/log/telegraf/telegraf.log
|
||||
{
|
||||
rotate 6
|
||||
daily
|
||||
missingok
|
||||
dateext
|
||||
copytruncate
|
||||
notifempty
|
||||
compress
|
||||
}
|
||||
|
||||
128
etc/telegraf.conf
Normal file
128
etc/telegraf.conf
Normal file
@@ -0,0 +1,128 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[global_tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. You shouldn't set this below
|
||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||
timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[inputs.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
fielddrop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.disk]]
|
||||
# By default, telegraf gather stats for all mountpoints.
|
||||
# Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# mount_points=["/"]
|
||||
|
||||
# Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# present on /run, /var/run, /dev/shm or /dev).
|
||||
ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
# disk partitions.
|
||||
# Setting devices will restrict the stats to the specified devices.
|
||||
# devices = ["sda", "sdb"]
|
||||
# Uncomment the following line if you do not need disk serial numbers.
|
||||
# skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
# no configuration
|
||||
|
||||
|
||||
###############################################################################
|
||||
# SERVICE INPUTS #
|
||||
###############################################################################
|
||||
164
etc/telegraf_windows.conf
Normal file
164
etc/telegraf_windows.conf
Normal file
@@ -0,0 +1,164 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[global_tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. You shouldn't set this below
|
||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||
timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
# which utilizes a lot of system resources.
|
||||
#
|
||||
# See more configuration examples at:
|
||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||
|
||||
[[inputs.win_perf_counters]]
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Processor usage, alternative to native, reports on a per core.
|
||||
ObjectName = "Processor"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
||||
Measurement = "win_cpu"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
ObjectName = "LogicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
||||
Measurement = "win_disk"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
||||
ObjectName = "Memory"
|
||||
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
||||
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
||||
Measurement = "win_mem"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
# Read metrics about cpu usage
|
||||
#[[inputs.cpu]]
|
||||
## Whether to report per-cpu stats or not
|
||||
#percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
#totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
#fielddrop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
#[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## mount_points=["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
#ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
#[[inputs.diskio]]
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
## devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you do not need disk serial numbers.
|
||||
## skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
#[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
#[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
31
input.go
Normal file
31
input.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package telegraf
|
||||
|
||||
type Input interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
}
|
||||
|
||||
type ServiceInput interface {
|
||||
// SampleConfig returns the default configuration of the Input
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input
|
||||
Description() string
|
||||
|
||||
// Gather takes in an accumulator and adds the metrics that the Input
|
||||
// gathers. This is called every "interval"
|
||||
Gather(Accumulator) error
|
||||
|
||||
// Start starts the ServiceInput's service, whatever that may be
|
||||
Start(Accumulator) error
|
||||
|
||||
// Stop stops the services and closes any necessary channels and connections
|
||||
Stop()
|
||||
}
|
||||
761
internal/config/config.go
Normal file
761
internal/config/config.go
Normal file
@@ -0,0 +1,761 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
"github.com/naoina/toml/ast"
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
// will be logging to, as well as all the plugins that the user has
|
||||
// specified
|
||||
type Config struct {
|
||||
Tags map[string]string
|
||||
InputFilters []string
|
||||
OutputFilters []string
|
||||
|
||||
Agent *AgentConfig
|
||||
Inputs []*internal_models.RunningInput
|
||||
Outputs []*internal_models.RunningOutput
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
c := &Config{
|
||||
// Agent defaults:
|
||||
Agent: &AgentConfig{
|
||||
Interval: internal.Duration{Duration: 10 * time.Second},
|
||||
RoundInterval: true,
|
||||
FlushInterval: internal.Duration{Duration: 10 * time.Second},
|
||||
FlushJitter: internal.Duration{Duration: 5 * time.Second},
|
||||
},
|
||||
|
||||
Tags: make(map[string]string),
|
||||
Inputs: make([]*internal_models.RunningInput, 0),
|
||||
Outputs: make([]*internal_models.RunningOutput, 0),
|
||||
InputFilters: make([]string, 0),
|
||||
OutputFilters: make([]string, 0),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type AgentConfig struct {
|
||||
// Interval at which to gather information
|
||||
Interval internal.Duration
|
||||
|
||||
// RoundInterval rounds collection interval to 'interval'.
|
||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||
RoundInterval bool
|
||||
|
||||
// CollectionJitter is used to jitter the collection by a random amount.
|
||||
// Each plugin will sleep for a random time within jitter before collecting.
|
||||
// This can be used to avoid many plugins querying things like sysfs at the
|
||||
// same time, which can have a measurable effect on the system.
|
||||
CollectionJitter internal.Duration
|
||||
|
||||
// FlushInterval is the Interval at which to flush data
|
||||
FlushInterval internal.Duration
|
||||
|
||||
// FlushJitter Jitters the flush interval by a random amount.
|
||||
// This is primarily to avoid large write spikes for users running a large
|
||||
// number of telegraf instances.
|
||||
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
FlushJitter internal.Duration
|
||||
|
||||
// MetricBufferLimit is the max number of metrics that each output plugin
|
||||
// will cache. The buffer is cleared when a successful write occurs. When
|
||||
// full, the oldest metrics will be overwritten.
|
||||
MetricBufferLimit int
|
||||
|
||||
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
|
||||
// it fills up, regardless of FlushInterval. Setting this option to true
|
||||
// does _not_ deactivate FlushInterval.
|
||||
FlushBufferWhenFull bool
|
||||
|
||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
||||
// valid for the agent config. Leaving them here for now for backwards-
|
||||
// compatability
|
||||
UTC bool `toml:"utc"`
|
||||
Precision string
|
||||
|
||||
// Debug is the option for running in debug mode
|
||||
Debug bool
|
||||
|
||||
// Quiet is the option for running in quiet mode
|
||||
Quiet bool
|
||||
Hostname string
|
||||
}
|
||||
|
||||
// Inputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) InputNames() []string {
|
||||
var name []string
|
||||
for _, input := range c.Inputs {
|
||||
name = append(name, input.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Outputs returns a list of strings of the configured inputs.
|
||||
func (c *Config) OutputNames() []string {
|
||||
var name []string
|
||||
for _, output := range c.Outputs {
|
||||
name = append(name, output.Name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ListTags returns a string of tags specified in the config,
|
||||
// line-protocol style
|
||||
func (c *Config) ListTags() string {
|
||||
var tags []string
|
||||
|
||||
for k, v := range c.Tags {
|
||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
var header = `# Telegraf Configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[global_tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. You shouldn't set this below
|
||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
#
|
||||
# OUTPUTS:
|
||||
#
|
||||
|
||||
`
|
||||
|
||||
var pluginHeader = `
|
||||
#
|
||||
# INPUTS:
|
||||
#
|
||||
`
|
||||
|
||||
var serviceInputHeader = `
|
||||
#
|
||||
# SERVICE INPUTS:
|
||||
#
|
||||
`
|
||||
|
||||
// PrintSampleConfig prints the sample config
|
||||
func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
|
||||
fmt.Printf(header)
|
||||
|
||||
// Filter outputs
|
||||
var onames []string
|
||||
for oname := range outputs.Outputs {
|
||||
if len(outputFilters) == 0 || sliceContains(oname, outputFilters) {
|
||||
onames = append(onames, oname)
|
||||
}
|
||||
}
|
||||
sort.Strings(onames)
|
||||
|
||||
// Print Outputs
|
||||
for _, oname := range onames {
|
||||
creator := outputs.Outputs[oname]
|
||||
output := creator()
|
||||
printConfig(oname, output, "outputs")
|
||||
}
|
||||
|
||||
// Filter inputs
|
||||
var pnames []string
|
||||
for pname := range inputs.Inputs {
|
||||
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
|
||||
// Print Inputs
|
||||
fmt.Printf(pluginHeader)
|
||||
servInputs := make(map[string]telegraf.ServiceInput)
|
||||
for _, pname := range pnames {
|
||||
creator := inputs.Inputs[pname]
|
||||
input := creator()
|
||||
|
||||
switch p := input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
servInputs[pname] = p
|
||||
continue
|
||||
}
|
||||
|
||||
printConfig(pname, input, "inputs")
|
||||
}
|
||||
|
||||
// Print Service Inputs
|
||||
fmt.Printf(serviceInputHeader)
|
||||
for name, input := range servInputs {
|
||||
printConfig(name, input, "inputs")
|
||||
}
|
||||
}
|
||||
|
||||
type printer interface {
|
||||
Description() string
|
||||
SampleConfig() string
|
||||
}
|
||||
|
||||
func printConfig(name string, p printer, op string) {
|
||||
fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name)
|
||||
config := p.SampleConfig()
|
||||
if config == "" {
|
||||
fmt.Printf("\n # no configuration\n")
|
||||
} else {
|
||||
fmt.Printf(config)
|
||||
}
|
||||
}
|
||||
|
||||
func sliceContains(name string, list []string) bool {
|
||||
for _, b := range list {
|
||||
if b == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PrintInputConfig prints the config usage of a single input.
|
||||
func PrintInputConfig(name string) error {
|
||||
if creator, ok := inputs.Inputs[name]; ok {
|
||||
printConfig(name, creator(), "inputs")
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Input %s not found", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrintOutputConfig prints the config usage of a single output.
|
||||
func PrintOutputConfig(name string) error {
|
||||
if creator, ok := outputs.Outputs[name]; ok {
|
||||
printConfig(name, creator(), "outputs")
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("Output %s not found", name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
directoryEntries, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range directoryEntries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
if len(name) < 6 || name[len(name)-5:] != ".conf" {
|
||||
continue
|
||||
}
|
||||
err := c.LoadConfig(filepath.Join(path, name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig loads the given config file and applies it to c
|
||||
func (c *Config) LoadConfig(path string) error {
|
||||
tbl, err := config.ParseFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, val := range tbl.Fields {
|
||||
subTable, ok := val.(*ast.Table)
|
||||
if !ok {
|
||||
return errors.New("invalid configuration")
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "agent":
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("Could not parse [agent] config\n")
|
||||
return err
|
||||
}
|
||||
case "global_tags", "tags":
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("Could not parse [global_tags] config\n")
|
||||
return err
|
||||
}
|
||||
case "outputs":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
|
||||
return err
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addOutput(pluginName, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s",
|
||||
pluginName)
|
||||
}
|
||||
}
|
||||
case "inputs", "plugins":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case *ast.Table:
|
||||
if err = c.addInput(pluginName, pluginSubTable); err != nil {
|
||||
return err
|
||||
}
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addInput(pluginName, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s",
|
||||
pluginName)
|
||||
}
|
||||
}
|
||||
// Assume it's an input input for legacy config file support if no other
|
||||
// identifiers are present
|
||||
default:
|
||||
if err = c.addInput(name, subTable); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
|
||||
return nil
|
||||
}
|
||||
creator, ok := outputs.Outputs[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested output: %s", name)
|
||||
}
|
||||
output := creator()
|
||||
|
||||
// If the output has a SetSerializer function, then this means it can write
|
||||
// arbitrary types of output, so build the serializer and set it.
|
||||
switch t := output.(type) {
|
||||
case serializers.SerializerOutput:
|
||||
serializer, err := buildSerializer(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.SetSerializer(serializer)
|
||||
}
|
||||
|
||||
outputConfig, err := buildOutput(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, output); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ro := internal_models.NewRunningOutput(name, output, outputConfig)
|
||||
if c.Agent.MetricBufferLimit > 0 {
|
||||
ro.MetricBufferLimit = c.Agent.MetricBufferLimit
|
||||
}
|
||||
ro.FlushBufferWhenFull = c.Agent.FlushBufferWhenFull
|
||||
c.Outputs = append(c.Outputs, ro)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
|
||||
return nil
|
||||
}
|
||||
// Legacy support renaming io input to diskio
|
||||
if name == "io" {
|
||||
name = "diskio"
|
||||
}
|
||||
|
||||
creator, ok := inputs.Inputs[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested input: %s", name)
|
||||
}
|
||||
input := creator()
|
||||
|
||||
// If the input has a SetParser function, then this means it can accept
|
||||
// arbitrary types of input, so build the parser and set it.
|
||||
switch t := input.(type) {
|
||||
case parsers.ParserInput:
|
||||
parser, err := buildParser(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.SetParser(parser)
|
||||
}
|
||||
|
||||
pluginConfig, err := buildInput(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp := &internal_models.RunningInput{
|
||||
Name: name,
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
}
|
||||
c.Inputs = append(c.Inputs, rp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildFilter builds a Filter
|
||||
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
||||
// be inserted into the internal_models.OutputConfig/internal_models.InputConfig to be used for prefix
|
||||
// filtering on tags and measurements
|
||||
func buildFilter(tbl *ast.Table) internal_models.Filter {
|
||||
f := internal_models.Filter{}
|
||||
|
||||
if node, ok := tbl.Fields["namepass"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NamePass = append(f.NamePass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["namedrop"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.NameDrop = append(f.NameDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fields := []string{"pass", "fieldpass"}
|
||||
for _, field := range fields {
|
||||
if node, ok := tbl.Fields[field]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldPass = append(f.FieldPass, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fields = []string{"drop", "fielddrop"}
|
||||
for _, field := range fields {
|
||||
if node, ok := tbl.Fields[field]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
f.FieldDrop = append(f.FieldDrop, str.Value)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tagpass"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.TagPass = append(f.TagPass, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tagdrop"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
for name, val := range subtbl.Fields {
|
||||
if kv, ok := val.(*ast.KeyValue); ok {
|
||||
tagfilter := &internal_models.TagFilter{Name: name}
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
tagfilter.Filter = append(tagfilter.Filter, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.TagDrop = append(f.TagDrop, *tagfilter)
|
||||
f.IsActive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "namedrop")
|
||||
delete(tbl.Fields, "namepass")
|
||||
delete(tbl.Fields, "fielddrop")
|
||||
delete(tbl.Fields, "fieldpass")
|
||||
delete(tbl.Fields, "drop")
|
||||
delete(tbl.Fields, "pass")
|
||||
delete(tbl.Fields, "tagdrop")
|
||||
delete(tbl.Fields, "tagpass")
|
||||
return f
|
||||
}
|
||||
|
||||
// buildInput parses input specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// internal_models.InputConfig to be inserted into internal_models.RunningInput
|
||||
func buildInput(name string, tbl *ast.Table) (*internal_models.InputConfig, error) {
|
||||
cp := &internal_models.InputConfig{Name: name}
|
||||
if node, ok := tbl.Fields["interval"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp.Interval = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementPrefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_suffix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementSuffix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_override"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.NameOverride = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "name_prefix")
|
||||
delete(tbl.Fields, "name_suffix")
|
||||
delete(tbl.Fields, "name_override")
|
||||
delete(tbl.Fields, "interval")
|
||||
delete(tbl.Fields, "tags")
|
||||
cp.Filter = buildFilter(tbl)
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// buildParser grabs the necessary entries from the ast.Table for creating
|
||||
// a parsers.Parser object, and creates it, which can then be added onto
|
||||
// an Input object.
|
||||
func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
|
||||
c := &parsers.Config{}
|
||||
|
||||
if node, ok := tbl.Fields["data_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DataFormat = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy support, exec plugin originally parsed JSON by default.
|
||||
if name == "exec" && c.DataFormat == "" {
|
||||
c.DataFormat = "json"
|
||||
} else if c.DataFormat == "" {
|
||||
c.DataFormat = "influx"
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["separator"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.Separator = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["templates"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
c.Templates = append(c.Templates, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["tag_keys"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if ary, ok := kv.Value.(*ast.Array); ok {
|
||||
for _, elem := range ary.Value {
|
||||
if str, ok := elem.(*ast.String); ok {
|
||||
c.TagKeys = append(c.TagKeys, str.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.MetricName = name
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "separator")
|
||||
delete(tbl.Fields, "templates")
|
||||
delete(tbl.Fields, "tag_keys")
|
||||
|
||||
return parsers.NewParser(c)
|
||||
}
|
||||
|
||||
// buildSerializer grabs the necessary entries from the ast.Table for creating
|
||||
// a serializers.Serializer object, and creates it, which can then be added onto
|
||||
// an Output object.
|
||||
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
|
||||
c := &serializers.Config{}
|
||||
|
||||
if node, ok := tbl.Fields["data_format"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.DataFormat = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.DataFormat == "" {
|
||||
c.DataFormat = "influx"
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
c.Prefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "data_format")
|
||||
delete(tbl.Fields, "prefix")
|
||||
return serializers.NewSerializer(c)
|
||||
}
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
|
||||
// internal_models.OutputConfig to be inserted into internal_models.RunningInput
|
||||
// Note: error exists in the return for future calls that might require error
|
||||
func buildOutput(name string, tbl *ast.Table) (*internal_models.OutputConfig, error) {
|
||||
oc := &internal_models.OutputConfig{
|
||||
Name: name,
|
||||
Filter: buildFilter(tbl),
|
||||
}
|
||||
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
|
||||
if len(oc.Filter.FieldDrop) > 0 {
|
||||
oc.Filter.NameDrop = oc.Filter.FieldDrop
|
||||
}
|
||||
if len(oc.Filter.FieldPass) > 0 {
|
||||
oc.Filter.NamePass = oc.Filter.FieldPass
|
||||
}
|
||||
return oc, nil
|
||||
}
|
||||
129
internal/config/config_test.go
Normal file
129
internal/config/config_test.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfig_LoadSingleInput(t *testing.T) {
|
||||
c := NewConfig()
|
||||
c.LoadConfig("./testdata/single_plugin.toml")
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: internal_models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
}
|
||||
|
||||
func TestConfig_LoadDirectory(t *testing.T) {
|
||||
c := NewConfig()
|
||||
err := c.LoadConfig("./testdata/single_plugin.toml")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = c.LoadDirectory("./testdata/subconfig")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
||||
memcached.Servers = []string{"localhost"}
|
||||
|
||||
mConfig := &internal_models.InputConfig{
|
||||
Name: "memcached",
|
||||
Filter: internal_models.Filter{
|
||||
NameDrop: []string{"metricname2"},
|
||||
NamePass: []string{"metricname1"},
|
||||
FieldDrop: []string{"other", "stuff"},
|
||||
FieldPass: []string{"some", "strings"},
|
||||
TagDrop: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "badtag",
|
||||
Filter: []string{"othertag"},
|
||||
},
|
||||
},
|
||||
TagPass: []internal_models.TagFilter{
|
||||
internal_models.TagFilter{
|
||||
Name: "goodtag",
|
||||
Filter: []string{"mytag"},
|
||||
},
|
||||
},
|
||||
IsActive: true,
|
||||
},
|
||||
Interval: 5 * time.Second,
|
||||
}
|
||||
mConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||
p, err := parsers.NewJSONParser("exec", nil, nil)
|
||||
assert.NoError(t, err)
|
||||
ex.SetParser(p)
|
||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||
eConfig := &internal_models.InputConfig{
|
||||
Name: "exec",
|
||||
MeasurementSuffix: "_myothercollector",
|
||||
}
|
||||
eConfig.Tags = make(map[string]string)
|
||||
assert.Equal(t, ex, c.Inputs[1].Input,
|
||||
"Merged Testdata did not produce a correct exec struct.")
|
||||
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
||||
"Merged Testdata did not produce correct exec metadata.")
|
||||
|
||||
memcached.Servers = []string{"192.168.1.1"}
|
||||
assert.Equal(t, memcached, c.Inputs[2].Input,
|
||||
"Testdata did not produce a correct memcached struct.")
|
||||
assert.Equal(t, mConfig, c.Inputs[2].Config,
|
||||
"Testdata did not produce correct memcached metadata.")
|
||||
|
||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
||||
|
||||
pConfig := &internal_models.InputConfig{Name: "procstat"}
|
||||
pConfig.Tags = make(map[string]string)
|
||||
|
||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
||||
"Merged Testdata did not produce a correct procstat struct.")
|
||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||
"Merged Testdata did not produce correct procstat metadata.")
|
||||
}
|
||||
11
internal/config/testdata/single_plugin.toml
vendored
Normal file
11
internal/config/testdata/single_plugin.toml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["localhost"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
fieldpass = ["some", "strings"]
|
||||
fielddrop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
4
internal/config/testdata/subconfig/exec.conf
vendored
Normal file
4
internal/config/testdata/subconfig/exec.conf
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/myothercollector --foo=bar"
|
||||
name_suffix = "_myothercollector"
|
||||
11
internal/config/testdata/subconfig/memcached.conf
vendored
Normal file
11
internal/config/testdata/subconfig/memcached.conf
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
[[inputs.memcached]]
|
||||
servers = ["192.168.1.1"]
|
||||
namepass = ["metricname1"]
|
||||
namedrop = ["metricname2"]
|
||||
pass = ["some", "strings"]
|
||||
drop = ["other", "stuff"]
|
||||
interval = "5s"
|
||||
[inputs.memcached.tagpass]
|
||||
goodtag = ["mytag"]
|
||||
[inputs.memcached.tagdrop]
|
||||
badtag = ["othertag"]
|
||||
2
internal/config/testdata/subconfig/procstat.conf
vendored
Normal file
2
internal/config/testdata/subconfig/procstat.conf
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
[[inputs.procstat]]
|
||||
pid_file = "/var/run/grafana-server.pid"
|
||||
310
internal/config/testdata/telegraf-agent.toml
vendored
Normal file
310
internal/config/testdata/telegraf-agent.toml
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs.
|
||||
|
||||
# Even if a plugin has no configuration, it must be declared in here
|
||||
# to be active. Declaring a plugin means just specifying the name
|
||||
# as a section with no variables. To deactivate a plugin, comment
|
||||
# out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.toml -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# One rule that plugins conform to is wherever a connection string
|
||||
# can be passed, the values '' and 'localhost' are treated specially.
|
||||
# They indicate to the plugin to use their own builtin configuration to
|
||||
# connect to the local system.
|
||||
|
||||
# NOTE: The configuration has a few required parameters. They are marked
|
||||
# with 'required'. Be sure to edit those to make this configuration work.
|
||||
|
||||
# Tags can also be specified via a normal map, but only one form at a time:
|
||||
[global_tags]
|
||||
dc = "us-east-1"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
# Default data collection interval for all plugins
|
||||
interval = "10s"
|
||||
|
||||
# run telegraf in debug mode
|
||||
debug = false
|
||||
|
||||
# Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP endpoint URL for your InfluxDB instance
|
||||
# Multiple urls can be specified for InfluxDB cluster support. Server to
|
||||
# write to will be randomly chosen each interval.
|
||||
urls = ["http://localhost:8086"] # required.
|
||||
|
||||
# The target database for metrics. This database must already exist
|
||||
database = "telegraf" # required.
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = ["udp://localhost:8089"]
|
||||
database = "udp-telegraf"
|
||||
|
||||
# Configuration for the Kafka server to send metrics to
|
||||
[[outputs.kafka]]
|
||||
# URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
# Kafka topic for producer messages
|
||||
topic = "telegraf"
|
||||
# Telegraf tag to use as a routing key
|
||||
# ie, if this tag exists, it's value will be used as the routing key
|
||||
routing_tag = "host"
|
||||
|
||||
|
||||
###############################################################################
|
||||
# PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# Read Apache status information (mod_status)
|
||||
[[inputs.apache]]
|
||||
# An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
|
||||
# Read metrics about cpu usage
|
||||
[[inputs.cpu]]
|
||||
# Whether to report per-cpu stats or not
|
||||
percpu = true
|
||||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many disque servers
|
||||
[[inputs.disque]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read stats from one or more Elasticsearch servers or clusters
|
||||
[[inputs.elasticsearch]]
|
||||
# specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
# set local to false when you want to read the indices stats from all nodes
|
||||
# within the cluster
|
||||
local = true
|
||||
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
# Read metrics of haproxy, via socket or csv stats page
|
||||
[[inputs.haproxy]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
#
|
||||
# If no servers are specified, then default to 127.0.0.1:1936
|
||||
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
||||
# Or you can also use local socket(not work yet)
|
||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||
|
||||
# Read flattened metrics from one or more JSON HTTP endpoints
|
||||
[[inputs.httpjson]]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[inputs.diskio]]
|
||||
# no configuration
|
||||
|
||||
# read metrics from a Kafka topic
|
||||
[[inputs.kafka_consumer]]
|
||||
# topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
# an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
# the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
# Maximum number of points to buffer between collection intervals
|
||||
point_buffer = 100000
|
||||
# Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
# Read metrics from a LeoFS Server via SNMP
|
||||
[[inputs.leofs]]
|
||||
# An array of URI to gather stats about LeoFS.
|
||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port.
|
||||
servers = ["127.0.0.1:4021"]
|
||||
|
||||
# Read metrics from local Lustre service on OST, MDS
|
||||
[[inputs.lustre2]]
|
||||
# An array of /proc globs to search for Lustre stats
|
||||
# If not specified, the default will work on Lustre 2.5.x
|
||||
#
|
||||
# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"]
|
||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||
|
||||
# Read metrics about memory usage
|
||||
[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many memcached servers
|
||||
[[inputs.memcached]]
|
||||
# An array of address to gather stats about. Specify an ip on hostname
|
||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Telegraf plugin for gathering metrics from N Mesos masters
|
||||
[[inputs.mesos]]
|
||||
# Timeout, in ms.
|
||||
timeout = 100
|
||||
# A list of Mesos masters, default value is localhost:5050.
|
||||
masters = ["localhost:5050"]
|
||||
# Metrics groups to be collected, by default, all enabled.
|
||||
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"]
|
||||
|
||||
# Read metrics from one or many MongoDB servers
|
||||
[[inputs.mongodb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port.
|
||||
servers = ["127.0.0.1:27017"]
|
||||
|
||||
# Read metrics from one or many mysql servers
|
||||
[[inputs.mysql]]
|
||||
# specify servers via a url matching:
|
||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# e.g.
|
||||
# servers = ["root:root@http://10.0.0.18/?tls=false"]
|
||||
# servers = ["root:passwd@tcp(127.0.0.1:3306)/"]
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics about network interface usage
|
||||
[[inputs.net]]
|
||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||
# regardless of status.
|
||||
#
|
||||
# interfaces = ["eth0", ... ]
|
||||
|
||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||
[[inputs.nginx]]
|
||||
# An array of Nginx stub_status URI to gather stats.
|
||||
urls = ["http://localhost/status"]
|
||||
|
||||
# Ping given url(s) and return statistics
|
||||
[[inputs.ping]]
|
||||
# urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
# number of pings to send (ping -c <COUNT>)
|
||||
count = 1 # required
|
||||
# interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||
ping_interval = 0.0
|
||||
# ping timeout, in s. 0 == no timeout (ping -t <TIMEOUT>)
|
||||
timeout = 0.0
|
||||
# interface to send ping from (ping -I <INTERFACE>)
|
||||
interface = ""
|
||||
|
||||
# Read metrics from one or many postgresql servers
|
||||
[[inputs.postgresql]]
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
#
|
||||
# Without the dbname parameter, the driver will default to a database
|
||||
# with the same name as the user. This dbname is just for instantiating a
|
||||
# connection with the server and doesn't restrict the databases we are trying
|
||||
# to grab metrics for.
|
||||
#
|
||||
|
||||
address = "sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
|
||||
# Read metrics from one or many prometheus clients
|
||||
[[inputs.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://localhost:9100/metrics"]
|
||||
|
||||
# Read metrics from one or many RabbitMQ servers via the management API
|
||||
[[inputs.rabbitmq]]
|
||||
# Specify servers via an array of tables
|
||||
# name = "rmq-server-1" # optional tag
|
||||
# url = "http://localhost:15672"
|
||||
# username = "guest"
|
||||
# password = "guest"
|
||||
|
||||
# A list of nodes to pull metrics about. If not specified, metrics for
|
||||
# all nodes are gathered.
|
||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
|
||||
# Read metrics from one or many redis servers
|
||||
[[inputs.redis]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
||||
# 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
|
||||
# Read metrics from one or many RethinkDB servers
|
||||
[[inputs.rethinkdb]]
|
||||
# An array of URI to gather stats about. Specify an ip or hostname
|
||||
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
||||
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||
#
|
||||
# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port.
|
||||
servers = ["127.0.0.1:28015"]
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about system load & uptime
|
||||
[[inputs.system]]
|
||||
# no configuration
|
||||
177
internal/internal.go
Normal file
177
internal/internal.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
// Duration just wraps time.Duration
|
||||
type Duration struct {
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Duration = dur
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var NotImplementedError = errors.New("not implemented yet")
|
||||
|
||||
// ReadLines reads contents from a file and splits them by new lines.
|
||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||
func ReadLines(filename string) ([]string, error) {
|
||||
return ReadLinesOffsetN(filename, 0, -1)
|
||||
}
|
||||
|
||||
// ReadLines reads contents from file and splits them by new line.
|
||||
// The offset tells at which line number to start.
|
||||
// The count determines the number of lines to read (starting from offset):
|
||||
// n >= 0: at most n lines
|
||||
// n < 0: whole file
|
||||
func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return []string{""}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var ret []string
|
||||
|
||||
r := bufio.NewReader(f)
|
||||
for i := 0; i < n+int(offset) || n < 0; i++ {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if i < int(offset) {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, strings.Trim(line, "\n"))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// RandomString returns a random string of alpha-numeric characters
|
||||
func RandomString(n int) string {
|
||||
var bytes = make([]byte, n)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||
// you must give the full path to the files.
|
||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||
func GetTLSConfig(
|
||||
SSLCert, SSLKey, SSLCA string,
|
||||
InsecureSkipVerify bool,
|
||||
) (*tls.Config, error) {
|
||||
t := &tls.Config{}
|
||||
if SSLCert != "" && SSLKey != "" && SSLCA != "" {
|
||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf(
|
||||
"Could not load TLS client key/certificate: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
caCert, err := ioutil.ReadFile(SSLCA)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||
err))
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
t = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caCertPool,
|
||||
InsecureSkipVerify: InsecureSkipVerify,
|
||||
}
|
||||
} else {
|
||||
if InsecureSkipVerify {
|
||||
t.InsecureSkipVerify = true
|
||||
} else {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
// will be nil by default if nothing is provided
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Glob will test a string pattern, potentially containing globs, against a
|
||||
// subject string. The result is a simple true/false, determining whether or
|
||||
// not the glob pattern matched the subject text.
|
||||
//
|
||||
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
|
||||
// thanks Ryan Uber!
|
||||
func Glob(pattern, measurement string) bool {
|
||||
// Empty pattern can only match empty subject
|
||||
if pattern == "" {
|
||||
return measurement == pattern
|
||||
}
|
||||
|
||||
// If the pattern _is_ a glob, it matches everything
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
parts := strings.Split(pattern, "*")
|
||||
|
||||
if len(parts) == 1 {
|
||||
// No globs in pattern, so test for match
|
||||
return pattern == measurement
|
||||
}
|
||||
|
||||
leadingGlob := strings.HasPrefix(pattern, "*")
|
||||
trailingGlob := strings.HasSuffix(pattern, "*")
|
||||
end := len(parts) - 1
|
||||
|
||||
for i, part := range parts {
|
||||
switch i {
|
||||
case 0:
|
||||
if leadingGlob {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(measurement, part) {
|
||||
return false
|
||||
}
|
||||
case end:
|
||||
if len(measurement) > 0 {
|
||||
return trailingGlob || strings.HasSuffix(measurement, part)
|
||||
}
|
||||
default:
|
||||
if !strings.Contains(measurement, part) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Trim evaluated text from measurement as we loop over the pattern.
|
||||
idx := strings.Index(measurement, part) + len(part)
|
||||
measurement = measurement[idx:]
|
||||
}
|
||||
|
||||
// All parts of the pattern matched
|
||||
return true
|
||||
}
|
||||
44
internal/internal_test.go
Normal file
44
internal/internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package internal
|
||||
|
||||
import "testing"
|
||||
|
||||
func testGlobMatch(t *testing.T, pattern, subj string) {
|
||||
if !Glob(pattern, subj) {
|
||||
t.Errorf("%s should match %s", pattern, subj)
|
||||
}
|
||||
}
|
||||
|
||||
func testGlobNoMatch(t *testing.T, pattern, subj string) {
|
||||
if Glob(pattern, subj) {
|
||||
t.Errorf("%s should not match %s", pattern, subj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyPattern(t *testing.T) {
|
||||
testGlobMatch(t, "", "")
|
||||
testGlobNoMatch(t, "", "test")
|
||||
}
|
||||
|
||||
func TestPatternWithoutGlobs(t *testing.T) {
|
||||
testGlobMatch(t, "test", "test")
|
||||
}
|
||||
|
||||
func TestGlob(t *testing.T) {
|
||||
for _, pattern := range []string{
|
||||
"*test", // Leading glob
|
||||
"this*", // Trailing glob
|
||||
"*is*a*", // Lots of globs
|
||||
"**test**", // Double glob characters
|
||||
"**is**a***test*", // Varying number of globs
|
||||
} {
|
||||
testGlobMatch(t, pattern, "this_is_a_test")
|
||||
}
|
||||
|
||||
for _, pattern := range []string{
|
||||
"test*", // Implicit substring match should fail
|
||||
"*is", // Partial match should fail
|
||||
"*no*", // Globs without a match between them should fail
|
||||
} {
|
||||
testGlobNoMatch(t, pattern, "this_is_a_test")
|
||||
}
|
||||
}
|
||||
123
internal/models/filter.go
Normal file
123
internal/models/filter.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
)
|
||||
|
||||
// TagFilter is the name of a tag, and the values on which to filter
|
||||
type TagFilter struct {
|
||||
Name string
|
||||
Filter []string
|
||||
}
|
||||
|
||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||
type Filter struct {
|
||||
NameDrop []string
|
||||
NamePass []string
|
||||
|
||||
FieldDrop []string
|
||||
FieldPass []string
|
||||
|
||||
TagDrop []TagFilter
|
||||
TagPass []TagFilter
|
||||
|
||||
IsActive bool
|
||||
}
|
||||
|
||||
func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
|
||||
if f.ShouldNamePass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldNamePass(key string) bool {
|
||||
if f.NamePass != nil {
|
||||
for _, pat := range f.NamePass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.NameDrop != nil {
|
||||
for _, pat := range f.NameDrop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldFieldsPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldFieldsPass(key string) bool {
|
||||
if f.FieldPass != nil {
|
||||
for _, pat := range f.FieldPass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.FieldDrop != nil {
|
||||
for _, pat := range f.FieldDrop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(key, pat) || internal.Glob(pat, key) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldTagsPass returns true if the metric should pass, false if should drop
|
||||
// based on the tagdrop/tagpass filter parameters
|
||||
func (f Filter) ShouldTagsPass(tags map[string]string) bool {
|
||||
if f.TagPass != nil {
|
||||
for _, pat := range f.TagPass {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if f.TagDrop != nil {
|
||||
for _, pat := range f.TagDrop {
|
||||
if tagval, ok := tags[pat.Name]; ok {
|
||||
for _, filter := range pat.Filter {
|
||||
if internal.Glob(filter, tagval) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
243
internal/models/filter_test.go
Normal file
243
internal/models/filter_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilter_Empty(t *testing.T) {
|
||||
f := Filter{}
|
||||
|
||||
measurements := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"barfoo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_NamePass(t *testing.T) {
|
||||
f := Filter{
|
||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_NameDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldNamePass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FieldPass(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_FieldDrop(t *testing.T) {
|
||||
f := Filter{
|
||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
||||
}
|
||||
|
||||
drops := []string{
|
||||
"foo",
|
||||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"cpu_usage_idle",
|
||||
}
|
||||
|
||||
passes := []string{
|
||||
"bar",
|
||||
"barfoo",
|
||||
"bar_foo",
|
||||
"cpu_usage_busy",
|
||||
}
|
||||
|
||||
for _, measurement := range passes {
|
||||
if !f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to pass", measurement)
|
||||
}
|
||||
}
|
||||
|
||||
for _, measurement := range drops {
|
||||
if f.ShouldFieldsPass(measurement) {
|
||||
t.Errorf("Expected measurement %s to drop", measurement)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagPass(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagPass: filters,
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter_TagDrop(t *testing.T) {
|
||||
filters := []TagFilter{
|
||||
TagFilter{
|
||||
Name: "cpu",
|
||||
Filter: []string{"cpu-*"},
|
||||
},
|
||||
TagFilter{
|
||||
Name: "mem",
|
||||
Filter: []string{"mem_free"},
|
||||
}}
|
||||
f := Filter{
|
||||
TagDrop: filters,
|
||||
}
|
||||
|
||||
drops := []map[string]string{
|
||||
{"cpu": "cpu-total"},
|
||||
{"cpu": "cpu-0"},
|
||||
{"cpu": "cpu-1"},
|
||||
{"cpu": "cpu-2"},
|
||||
{"mem": "mem_free"},
|
||||
}
|
||||
|
||||
passes := []map[string]string{
|
||||
{"cpu": "cputotal"},
|
||||
{"cpu": "cpu0"},
|
||||
{"cpu": "cpu1"},
|
||||
{"cpu": "cpu2"},
|
||||
{"mem": "mem_used"},
|
||||
}
|
||||
|
||||
for _, tags := range passes {
|
||||
if !f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to pass", tags)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tags := range drops {
|
||||
if f.ShouldTagsPass(tags) {
|
||||
t.Errorf("Expected tags %v to drop", tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
24
internal/models/running_input.go
Normal file
24
internal/models/running_input.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
type InputConfig struct {
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
138
internal/models/running_output.go
Normal file
138
internal/models/running_output.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default number of metrics kept between flushes.
|
||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||
|
||||
// Limit how many full metric buffers are kept due to failed writes.
|
||||
FULL_METRIC_BUFFERS_LIMIT = 100
|
||||
)
|
||||
|
||||
type RunningOutput struct {
|
||||
Name string
|
||||
Output telegraf.Output
|
||||
Config *OutputConfig
|
||||
Quiet bool
|
||||
MetricBufferLimit int
|
||||
FlushBufferWhenFull bool
|
||||
|
||||
metrics []telegraf.Metric
|
||||
tmpmetrics map[int][]telegraf.Metric
|
||||
overwriteI int
|
||||
mapI int
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewRunningOutput(
|
||||
name string,
|
||||
output telegraf.Output,
|
||||
conf *OutputConfig,
|
||||
) *RunningOutput {
|
||||
ro := &RunningOutput{
|
||||
Name: name,
|
||||
metrics: make([]telegraf.Metric, 0),
|
||||
tmpmetrics: make(map[int][]telegraf.Metric),
|
||||
Output: output,
|
||||
Config: conf,
|
||||
MetricBufferLimit: DEFAULT_METRIC_BUFFER_LIMIT,
|
||||
}
|
||||
return ro
|
||||
}
|
||||
|
||||
// AddMetric adds a metric to the output. This function can also write cached
|
||||
// points if FlushBufferWhenFull is true.
|
||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
if ro.Config.Filter.IsActive {
|
||||
if !ro.Config.Filter.ShouldMetricPass(metric) {
|
||||
return
|
||||
}
|
||||
}
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
|
||||
if len(ro.metrics) < ro.MetricBufferLimit {
|
||||
ro.metrics = append(ro.metrics, metric)
|
||||
} else {
|
||||
if ro.FlushBufferWhenFull {
|
||||
ro.metrics = append(ro.metrics, metric)
|
||||
tmpmetrics := make([]telegraf.Metric, len(ro.metrics))
|
||||
copy(tmpmetrics, ro.metrics)
|
||||
ro.metrics = make([]telegraf.Metric, 0)
|
||||
err := ro.write(tmpmetrics)
|
||||
if err != nil {
|
||||
log.Printf("ERROR writing full metric buffer to output %s, %s",
|
||||
ro.Name, err)
|
||||
if len(ro.tmpmetrics) == FULL_METRIC_BUFFERS_LIMIT {
|
||||
ro.mapI = 0
|
||||
// overwrite one
|
||||
ro.tmpmetrics[ro.mapI] = tmpmetrics
|
||||
ro.mapI++
|
||||
} else {
|
||||
ro.tmpmetrics[ro.mapI] = tmpmetrics
|
||||
ro.mapI++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("WARNING: overwriting cached metrics, you may want to " +
|
||||
"increase the metric_buffer_limit setting in your [agent] " +
|
||||
"config if you do not wish to overwrite metrics.\n")
|
||||
if ro.overwriteI == len(ro.metrics) {
|
||||
ro.overwriteI = 0
|
||||
}
|
||||
ro.metrics[ro.overwriteI] = metric
|
||||
ro.overwriteI++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
ro.Lock()
|
||||
defer ro.Unlock()
|
||||
err := ro.write(ro.metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
ro.metrics = make([]telegraf.Metric, 0)
|
||||
ro.overwriteI = 0
|
||||
}
|
||||
|
||||
// Write any cached metric buffers that failed previously
|
||||
for i, tmpmetrics := range ro.tmpmetrics {
|
||||
if err := ro.write(tmpmetrics); err != nil {
|
||||
return err
|
||||
} else {
|
||||
delete(ro.tmpmetrics, i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
start := time.Now()
|
||||
err := ro.Output.Write(metrics)
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Wrote %d metrics to output %s in %s\n",
|
||||
len(metrics), ro.Name, elapsed)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
type OutputConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
}
|
||||
265
internal/models/running_output_test.go
Normal file
265
internal/models/running_output_test.go
Normal file
@@ -0,0 +1,265 @@
|
||||
package internal_models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var first5 = []telegraf.Metric{
|
||||
testutil.TestMetric(101, "metric1"),
|
||||
testutil.TestMetric(101, "metric2"),
|
||||
testutil.TestMetric(101, "metric3"),
|
||||
testutil.TestMetric(101, "metric4"),
|
||||
testutil.TestMetric(101, "metric5"),
|
||||
}
|
||||
|
||||
var next5 = []telegraf.Metric{
|
||||
testutil.TestMetric(101, "metric6"),
|
||||
testutil.TestMetric(101, "metric7"),
|
||||
testutil.TestMetric(101, "metric8"),
|
||||
testutil.TestMetric(101, "metric9"),
|
||||
testutil.TestMetric(101, "metric10"),
|
||||
}
|
||||
|
||||
// Test that we can write metrics with simple default setup.
|
||||
func TestRunningOutputDefault(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
// Test that the first metric gets overwritten if there is a buffer overflow.
|
||||
func TestRunningOutputOverwrite(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.MetricBufferLimit = 4
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
require.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, m.Metrics(), 4)
|
||||
|
||||
var expected, actual []string
|
||||
for i, exp := range first5[1:] {
|
||||
expected = append(expected, exp.String())
|
||||
actual = append(actual, m.Metrics()[i].String())
|
||||
}
|
||||
|
||||
sort.Strings(expected)
|
||||
sort.Strings(actual)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
// Test that multiple buffer overflows are handled properly.
|
||||
func TestRunningOutputMultiOverwrite(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.MetricBufferLimit = 3
|
||||
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
require.Len(t, m.Metrics(), 0)
|
||||
|
||||
err := ro.Write()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, m.Metrics(), 3)
|
||||
|
||||
var expected, actual []string
|
||||
for i, exp := range next5[2:] {
|
||||
expected = append(expected, exp.String())
|
||||
actual = append(actual, m.Metrics()[i].String())
|
||||
}
|
||||
|
||||
sort.Strings(expected)
|
||||
sort.Strings(actual)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
// Test that running output doesn't flush until it's full when
|
||||
// FlushBufferWhenFull is set.
|
||||
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.FlushBufferWhenFull = true
|
||||
ro.MetricBufferLimit = 5
|
||||
|
||||
// Fill buffer to limit
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// add one more metric
|
||||
ro.AddMetric(next5[0])
|
||||
// now it flushed
|
||||
assert.Len(t, m.Metrics(), 6)
|
||||
|
||||
// add one more metric and write it manually
|
||||
ro.AddMetric(next5[1])
|
||||
err := ro.Write()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, m.Metrics(), 7)
|
||||
}
|
||||
|
||||
// Test that running output doesn't flush until it's full when
|
||||
// FlushBufferWhenFull is set, twice.
|
||||
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.FlushBufferWhenFull = true
|
||||
ro.MetricBufferLimit = 4
|
||||
|
||||
// Fill buffer past limit twive
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// flushed twice
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
func TestRunningOutputWriteFail(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
IsActive: false,
|
||||
},
|
||||
}
|
||||
|
||||
m := &mockOutput{}
|
||||
m.failWrite = true
|
||||
ro := NewRunningOutput("test", m, conf)
|
||||
ro.FlushBufferWhenFull = true
|
||||
ro.MetricBufferLimit = 4
|
||||
|
||||
// Fill buffer past limit twice
|
||||
for _, metric := range first5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
for _, metric := range next5 {
|
||||
ro.AddMetric(metric)
|
||||
}
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
// manual write fails
|
||||
err := ro.Write()
|
||||
require.Error(t, err)
|
||||
// no successful flush yet
|
||||
assert.Len(t, m.Metrics(), 0)
|
||||
|
||||
m.failWrite = false
|
||||
err = ro.Write()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, m.Metrics(), 10)
|
||||
}
|
||||
|
||||
type mockOutput struct {
|
||||
sync.Mutex
|
||||
|
||||
metrics []telegraf.Metric
|
||||
|
||||
// if true, mock a write failure
|
||||
failWrite bool
|
||||
}
|
||||
|
||||
func (m *mockOutput) Connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Description() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *mockOutput) SampleConfig() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *mockOutput) Write(metrics []telegraf.Metric) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if m.failWrite {
|
||||
return fmt.Errorf("Failed Write!")
|
||||
}
|
||||
|
||||
if m.metrics == nil {
|
||||
m.metrics = []telegraf.Metric{}
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
m.metrics = append(m.metrics, metric)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockOutput) Metrics() []telegraf.Metric {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.metrics
|
||||
}
|
||||
94
metric.go
Normal file
94
metric.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type Metric interface {
|
||||
// Name returns the measurement name of the metric
|
||||
Name() string
|
||||
|
||||
// Name returns the tags associated with the metric
|
||||
Tags() map[string]string
|
||||
|
||||
// Time return the timestamp for the metric
|
||||
Time() time.Time
|
||||
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
// Fields returns the fields for the metric
|
||||
Fields() map[string]interface{}
|
||||
|
||||
// String returns a line-protocol string of the metric
|
||||
String() string
|
||||
|
||||
// PrecisionString returns a line-protocol string of the metric, at precision
|
||||
PrecisionString(precison string) string
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
Point() *client.Point
|
||||
}
|
||||
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt *client.Point
|
||||
}
|
||||
|
||||
// NewMetric returns a metric with the given timestamp. If a timestamp is not
|
||||
// given, then data is sent to the database without a timestamp, in which case
|
||||
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||
// send data with a timestamp.
|
||||
func NewMetric(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
fields map[string]interface{},
|
||||
t ...time.Time,
|
||||
) (Metric, error) {
|
||||
var T time.Time
|
||||
if len(t) > 0 {
|
||||
T = t[0]
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(name, tags, fields, T)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metric{
|
||||
pt: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *metric) Name() string {
|
||||
return m.pt.Name()
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
return m.pt.Tags()
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
return m.pt.Time()
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
|
||||
func (m *metric) Fields() map[string]interface{} {
|
||||
return m.pt.Fields()
|
||||
}
|
||||
|
||||
func (m *metric) String() string {
|
||||
return m.pt.String()
|
||||
}
|
||||
|
||||
func (m *metric) PrecisionString(precison string) string {
|
||||
return m.pt.PrecisionString(precison)
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
return m.pt
|
||||
}
|
||||
83
metric_test.go
Normal file
83
metric_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package telegraf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
"datacenter": "us-east-1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tags, m.Tags())
|
||||
assert.Equal(t, fields, m.Fields())
|
||||
assert.Equal(t, "cpu", m.Name())
|
||||
assert.Equal(t, now, m.Time())
|
||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
||||
}
|
||||
|
||||
func TestNewMetricString(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.UnixNano())
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
||||
now.Unix())
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricStringNoTime(t *testing.T) {
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
}
|
||||
m, err := NewMetric("cpu", tags, fields)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProto, m.String())
|
||||
|
||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||
}
|
||||
|
||||
func TestNewMetricFailNaN(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
tags := map[string]string{
|
||||
"host": "localhost",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": math.NaN(),
|
||||
}
|
||||
|
||||
_, err := NewMetric("cpu", tags, fields, now)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
31
output.go
Normal file
31
output.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package telegraf
|
||||
|
||||
type Output interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(metrics []Metric) error
|
||||
}
|
||||
|
||||
type ServiceOutput interface {
|
||||
// Connect to the Output
|
||||
Connect() error
|
||||
// Close any connections to the Output
|
||||
Close() error
|
||||
// Description returns a one-sentence description on the Output
|
||||
Description() string
|
||||
// SampleConfig returns the default configuration of the Output
|
||||
SampleConfig() string
|
||||
// Write takes in group of points to be written to the Output
|
||||
Write(metrics []Metric) error
|
||||
// Start the "service" that will provide an Output
|
||||
Start() error
|
||||
// Stop the "service" that will provide an Output
|
||||
Stop()
|
||||
}
|
||||
349
package.sh
349
package.sh
@@ -1,349 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
###########################################################################
|
||||
# Packaging script which creates debian and RPM packages. It optionally
|
||||
# tags the repo with the given version.
|
||||
#
|
||||
# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS
|
||||
# CLI tools must also be installed.
|
||||
#
|
||||
# https://github.com/jordansissel/fpm
|
||||
# http://aws.amazon.com/cli/
|
||||
#
|
||||
# Packaging process: to package a build, simple execute:
|
||||
#
|
||||
# package.sh <version>
|
||||
#
|
||||
# where <version> is the desired version. If generation of a debian and RPM
|
||||
# package is successful, the script will offer to tag the repo using the
|
||||
# supplied version string.
|
||||
#
|
||||
# AWS upload: the script will also offer to upload the packages to S3. If
|
||||
# this option is selected, the credentials should be present in the file
|
||||
# ~/aws.conf. The contents should be of the form:
|
||||
#
|
||||
# [default]
|
||||
# aws_access_key_id=<access ID>
|
||||
# aws_secret_access_key=<secret key>
|
||||
# region = us-east-1
|
||||
#
|
||||
# Trim the leading spaces when creating the file. The script will exit if
|
||||
# S3 upload is requested, but this file does not exist.
|
||||
|
||||
AWS_FILE=~/aws.conf
|
||||
|
||||
INSTALL_ROOT_DIR=/opt/influxdb
|
||||
TELEGRAF_LOG_DIR=/var/log/influxdb
|
||||
CONFIG_ROOT_DIR=/etc/opt/influxdb
|
||||
|
||||
SAMPLE_CONFIGURATION=etc/config.sample.toml
|
||||
INITD_SCRIPT=scripts/init.sh
|
||||
|
||||
TMP_WORK_DIR=`mktemp -d`
|
||||
POST_INSTALL_PATH=`mktemp`
|
||||
ARCH=`uname -i`
|
||||
LICENSE=MIT
|
||||
URL=influxdb.com
|
||||
MAINTAINER=support@influxdb.com
|
||||
VENDOR=InfluxDB
|
||||
DESCRIPTION="InfluxDB Telegraf agent"
|
||||
PKG_DEPS=(coreutils)
|
||||
GO_VERSION="go1.4.2"
|
||||
GOPATH_INSTALL=
|
||||
BINS=(
|
||||
telegraf
|
||||
)
|
||||
|
||||
###########################################################################
|
||||
# Helper functions.
|
||||
|
||||
# usage prints simple usage information.
|
||||
usage() {
|
||||
echo -e "$0 [<version>] [-h]\n"
|
||||
cleanup_exit $1
|
||||
}
|
||||
|
||||
# cleanup_exit removes all resources created during the process and exits with
|
||||
# the supplied returned code.
|
||||
cleanup_exit() {
|
||||
rm -r $TMP_WORK_DIR
|
||||
rm $POST_INSTALL_PATH
|
||||
exit $1
|
||||
}
|
||||
|
||||
# check_gopath sanity checks the value of the GOPATH env variable, and determines
|
||||
# the path where build artifacts are installed. GOPATH may be a colon-delimited
|
||||
# list of directories.
|
||||
check_gopath() {
|
||||
[ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1
|
||||
GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1`
|
||||
[ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1
|
||||
echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation."
|
||||
}
|
||||
|
||||
check_gvm() {
|
||||
source $HOME/.gvm/scripts/gvm
|
||||
which gvm
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "gvm not found -- aborting."
|
||||
cleanup_exit $1
|
||||
fi
|
||||
gvm use $GO_VERSION
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "gvm cannot find Go version $GO_VERSION -- aborting."
|
||||
cleanup_exit $1
|
||||
fi
|
||||
}
|
||||
|
||||
# check_clean_tree ensures that no source file is locally modified.
|
||||
check_clean_tree() {
|
||||
modified=$(git ls-files --modified | wc -l)
|
||||
if [ $modified -ne 0 ]; then
|
||||
echo "The source tree is not clean -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Git tree is clean."
|
||||
}
|
||||
|
||||
# update_tree ensures the tree is in-sync with the repo.
|
||||
update_tree() {
|
||||
git pull origin master
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to pull latest code -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
git fetch --tags
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to fetch tags -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Git tree updated successfully."
|
||||
}
|
||||
|
||||
# check_tag_exists checks if the existing release already exists in the tags.
|
||||
check_tag_exists () {
|
||||
version=$1
|
||||
git tag | grep -q "^v$version$"
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Proposed version $version already exists as a tag -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# make_dir_tree creates the directory structure within the packages.
|
||||
make_dir_tree() {
|
||||
work_dir=$1
|
||||
version=$2
|
||||
mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create installation directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
mkdir -p $work_dir/$CONFIG_ROOT_DIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create configuration directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# do_build builds the code. The version and commit must be passed in.
|
||||
do_build() {
|
||||
version=$1
|
||||
commit=`git rev-parse HEAD`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Unable to retrieve current commit -- aborting"
|
||||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
for b in ${BINS[*]}; do
|
||||
rm -f $GOPATH_INSTALL/bin/$b
|
||||
done
|
||||
go get -u -f ./...
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "WARNING: failed to 'go get' packages."
|
||||
fi
|
||||
go install -a -ldflags="-X main.Version $version -X main.Commit $commit" ./...
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Build failed, unable to create package -- aborting"
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Build completed successfully."
|
||||
}
|
||||
|
||||
# generate_postinstall_script creates the post-install script for the
|
||||
# package. It must be passed the version.
|
||||
generate_postinstall_script() {
|
||||
version=$1
|
||||
cat <<EOF >$POST_INSTALL_PATH
|
||||
rm -f $INSTALL_ROOT_DIR/telegraf
|
||||
rm -f $INSTALL_ROOT_DIR/init.sh
|
||||
ln -s $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf
|
||||
ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh
|
||||
|
||||
rm -f /etc/init.d/telegraf
|
||||
ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf
|
||||
chmod +x /etc/init.d/telegraf
|
||||
if which update-rc.d > /dev/null 2>&1 ; then
|
||||
update-rc.d -f telegraf remove
|
||||
update-rc.d telegraf defaults
|
||||
else
|
||||
chkconfig --add telegraf
|
||||
fi
|
||||
|
||||
if ! id telegraf >/dev/null 2>&1; then
|
||||
useradd --system -U -M telegraf
|
||||
fi
|
||||
chown -R -L telegraf:telegraf $INSTALL_ROOT_DIR
|
||||
chmod -R a+rX $INSTALL_ROOT_DIR
|
||||
|
||||
mkdir -p $TELEGRAF_LOG_DIR
|
||||
chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR
|
||||
EOF
|
||||
echo "Post-install script created successfully at $POST_INSTALL_PATH"
|
||||
}
|
||||
|
||||
###########################################################################
|
||||
# Start the packaging process.
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage 1
|
||||
elif [ $1 == "-h" ]; then
|
||||
usage 0
|
||||
else
|
||||
VERSION=$1
|
||||
fi
|
||||
|
||||
echo -e "\nStarting package process...\n"
|
||||
|
||||
check_gvm
|
||||
check_gopath
|
||||
check_clean_tree
|
||||
update_tree
|
||||
check_tag_exists $VERSION
|
||||
do_build $VERSION
|
||||
make_dir_tree $TMP_WORK_DIR $VERSION
|
||||
|
||||
###########################################################################
|
||||
# Copy the assets to the installation directories.
|
||||
|
||||
for b in ${BINS[*]}; do
|
||||
cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy binaries to packaging directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION"
|
||||
|
||||
cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy init.d script to packaging directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
|
||||
|
||||
cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/telegraf.conf
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
generate_postinstall_script $VERSION
|
||||
|
||||
###########################################################################
|
||||
# Create the actual packages.
|
||||
|
||||
echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] "
|
||||
read response
|
||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
||||
if [ "x$response" == "xn" ]; then
|
||||
echo "Packaging aborted."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
if [ $ARCH == "i386" ]; then
|
||||
rpm_package=telegraf-$VERSION-1.i686.rpm
|
||||
debian_package=telegraf_${VERSION}_i686.deb
|
||||
deb_args="-a i686"
|
||||
rpm_args="setarch i686"
|
||||
elif [ $ARCH == "arm" ]; then
|
||||
rpm_package=telegraf-$VERSION-1.armel.rpm
|
||||
debian_package=telegraf_${VERSION}_armel.deb
|
||||
else
|
||||
rpm_package=telegraf-$VERSION-1.x86_64.rpm
|
||||
debian_package=telegraf_${VERSION}_amd64.deb
|
||||
fi
|
||||
|
||||
COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ."
|
||||
$rpm_args fpm -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create RPM package -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "RPM package created successfully."
|
||||
|
||||
fpm -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create Debian package -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
echo "Debian package created successfully."
|
||||
|
||||
###########################################################################
|
||||
# Offer to tag the repo.
|
||||
|
||||
echo -n "Tag source tree with v$VERSION and push to repo? [y/N] "
|
||||
read response
|
||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
||||
if [ "x$response" == "xy" ]; then
|
||||
echo "Creating tag v$VERSION and pushing to repo"
|
||||
git tag v$VERSION
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create tag v$VERSION -- aborting"
|
||||
cleanup_exit 1
|
||||
fi
|
||||
git push origin v$VERSION
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to push tag v$VERSION to repo -- aborting"
|
||||
cleanup_exit 1
|
||||
fi
|
||||
else
|
||||
echo "Not creating tag v$VERSION."
|
||||
fi
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Offer to publish the packages.
|
||||
|
||||
echo -n "Publish packages to S3? [y/N] "
|
||||
read response
|
||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
||||
if [ "x$response" == "xy" ]; then
|
||||
echo "Publishing packages to S3."
|
||||
if [ ! -e "$AWS_FILE" ]; then
|
||||
echo "$AWS_FILE does not exist -- aborting."
|
||||
cleanup_exit 1
|
||||
fi
|
||||
|
||||
for filepath in `ls *.{deb,rpm}`; do
|
||||
echo "Uploading $filepath to S3"
|
||||
filename=`basename $filepath`
|
||||
echo "Uploading $filename to s3://get.influxdb.org/telegraf/$filename"
|
||||
AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://get.influxdb.org/telegraf/$filename --acl public-read --region us-east-1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Upload failed -- aborting".
|
||||
cleanup_exit 1
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "Not publishing packages to S3."
|
||||
fi
|
||||
|
||||
###########################################################################
|
||||
# All done.
|
||||
|
||||
echo -e "\nPackaging process complete."
|
||||
cleanup_exit 0
|
||||
@@ -1,9 +0,0 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdb/telegraf/plugins/memcached"
|
||||
_ "github.com/influxdb/telegraf/plugins/mysql"
|
||||
_ "github.com/influxdb/telegraf/plugins/postgresql"
|
||||
_ "github.com/influxdb/telegraf/plugins/redis"
|
||||
_ "github.com/influxdb/telegraf/plugins/system"
|
||||
)
|
||||
39
plugins/inputs/EXAMPLE_README.md
Normal file
39
plugins/inputs/EXAMPLE_README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Example Input Plugin
|
||||
|
||||
The example plugin gathers metrics about example things
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# Description
|
||||
[[inputs.example]]
|
||||
# SampleConfig
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
<optional description>
|
||||
|
||||
- measurement1
|
||||
- field1 (type, unit)
|
||||
- field2 (float, percent)
|
||||
- measurement2
|
||||
- field3 (integer, bytes)
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- tag1 (optional description)
|
||||
- tag2
|
||||
- measurement2 has the following tags:
|
||||
- tag3
|
||||
|
||||
### Example Output:
|
||||
|
||||
Give an example `-test` output here
|
||||
|
||||
```
|
||||
$ ./telegraf -config telegraf.conf -input-filter example -test
|
||||
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
|
||||
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
|
||||
```
|
||||
265
plugins/inputs/aerospike/README.md
Normal file
265
plugins/inputs/aerospike/README.md
Normal file
@@ -0,0 +1,265 @@
|
||||
## Telegraf Plugin: Aerospike
|
||||
|
||||
#### Plugin arguments:
|
||||
- **servers** string array: List of aerospike servers to query (def: 127.0.0.1:3000)
|
||||
|
||||
#### Description
|
||||
|
||||
The aerospike plugin queries aerospike server(s) and get node statistics. It also collects stats for
|
||||
all the configured namespaces.
|
||||
|
||||
For what the measurements mean, please consult the [Aerospike Metrics Reference Docs](http://www.aerospike.com/docs/reference/metrics).
|
||||
|
||||
The metric names, to make it less complicated in querying, have replaced all `-` with `_` as Aerospike metrics come in both forms (no idea why).
|
||||
|
||||
# Measurements:
|
||||
#### Aerospike Statistics [values]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
|
||||
Measurement names:
|
||||
- batch_index_queue
|
||||
- batch_index_unused_buffers
|
||||
- batch_queue
|
||||
- batch_tree_count
|
||||
- client_connections
|
||||
- data_used_bytes_memory
|
||||
- index_used_bytes_memory
|
||||
- info_queue
|
||||
- migrate_progress_recv
|
||||
- migrate_progress_send
|
||||
- migrate_rx_objs
|
||||
- migrate_tx_objs
|
||||
- objects
|
||||
- ongoing_write_reqs
|
||||
- partition_absent
|
||||
- partition_actual
|
||||
- partition_desync
|
||||
- partition_object_count
|
||||
- partition_ref_count
|
||||
- partition_replica
|
||||
- proxy_in_progress
|
||||
- query_agg_avg_rec_count
|
||||
- query_avg_rec_count
|
||||
- query_lookup_avg_rec_count
|
||||
- queue
|
||||
- record_locks
|
||||
- record_refs
|
||||
- sindex_used_bytes_memory
|
||||
- sindex_gc_garbage_cleaned
|
||||
- system_free_mem_pct
|
||||
- total_bytes_disk
|
||||
- total_bytes_memory
|
||||
- tree_count
|
||||
- scans_active
|
||||
- uptime
|
||||
- used_bytes_disk
|
||||
- used_bytes_memory
|
||||
- cluster_size
|
||||
- waiting_transactions
|
||||
|
||||
#### Aerospike Statistics [cumulative]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
|
||||
Measurement names:
|
||||
- batch_errors
|
||||
- batch_index_complete
|
||||
- batch_index_errors
|
||||
- batch_index_initiate
|
||||
- batch_index_timeout
|
||||
- batch_initiate
|
||||
- batch_timeout
|
||||
- err_duplicate_proxy_request
|
||||
- err_out_of_space
|
||||
- err_replica_non_null_node
|
||||
- err_replica_null_node
|
||||
- err_rw_cant_put_unique
|
||||
- err_rw_pending_limit
|
||||
- err_rw_request_not_found
|
||||
- err_storage_queue_full
|
||||
- err_sync_copy_null_master
|
||||
- err_sync_copy_null_node
|
||||
- err_tsvc_requests
|
||||
- err_write_fail_bin_exists
|
||||
- err_write_fail_generation
|
||||
- err_write_fail_generation_xdr
|
||||
- err_write_fail_incompatible_type
|
||||
- err_write_fail_key_exists
|
||||
- err_write_fail_key_mismatch
|
||||
- err_write_fail_not_found
|
||||
- err_write_fail_noxdr
|
||||
- err_write_fail_parameter
|
||||
- err_write_fail_prole_delete
|
||||
- err_write_fail_prole_generation
|
||||
- err_write_fail_prole_unknown
|
||||
- err_write_fail_unknown
|
||||
- fabric_msgs_rcvd
|
||||
- fabric_msgs_sent
|
||||
- heartbeat_received_foreign
|
||||
- heartbeat_received_self
|
||||
- migrate_msgs_recv
|
||||
- migrate_msgs_sent
|
||||
- migrate_num_incoming_accepted
|
||||
- migrate_num_incoming_refused
|
||||
- proxy_action
|
||||
- proxy_initiate
|
||||
- proxy_retry
|
||||
- proxy_retry_new_dest
|
||||
- proxy_retry_q_full
|
||||
- proxy_retry_same_dest
|
||||
- proxy_unproxy
|
||||
- query_abort
|
||||
- query_agg
|
||||
- query_agg_abort
|
||||
- query_agg_err
|
||||
- query_agg_success
|
||||
- query_bad_records
|
||||
- query_fail
|
||||
- query_long_queue_full
|
||||
- query_long_running
|
||||
- query_lookup_abort
|
||||
- query_lookup_err
|
||||
- query_lookups
|
||||
- query_lookup_success
|
||||
- query_reqs
|
||||
- query_short_queue_full
|
||||
- query_short_running
|
||||
- query_success
|
||||
- query_tracked
|
||||
- read_dup_prole
|
||||
- reaped_fds
|
||||
- rw_err_ack_badnode
|
||||
- rw_err_ack_internal
|
||||
- rw_err_ack_nomatch
|
||||
- rw_err_dup_cluster_key
|
||||
- rw_err_dup_internal
|
||||
- rw_err_dup_send
|
||||
- rw_err_write_cluster_key
|
||||
- rw_err_write_internal
|
||||
- rw_err_write_send
|
||||
- sindex_ucgarbage_found
|
||||
- sindex_gc_locktimedout
|
||||
- sindex_gc_inactivity_dur
|
||||
- sindex_gc_activity_dur
|
||||
- sindex_gc_list_creation_time
|
||||
- sindex_gc_list_deletion_time
|
||||
- sindex_gc_objects_validated
|
||||
- sindex_gc_garbage_found
|
||||
- stat_cluster_key_err_ack_dup_trans_reenqueue
|
||||
- stat_cluster_key_err_ack_rw_trans_reenqueue
|
||||
- stat_cluster_key_prole_retry
|
||||
- stat_cluster_key_regular_processed
|
||||
- stat_cluster_key_trans_to_proxy_retry
|
||||
- stat_deleted_set_object
|
||||
- stat_delete_success
|
||||
- stat_duplicate_operation
|
||||
- stat_evicted_objects
|
||||
- stat_evicted_objects_time
|
||||
- stat_evicted_set_objects
|
||||
- stat_expired_objects
|
||||
- stat_nsup_deletes_not_shipped
|
||||
- stat_proxy_errs
|
||||
- stat_proxy_reqs
|
||||
- stat_proxy_reqs_xdr
|
||||
- stat_proxy_success
|
||||
- stat_read_errs_notfound
|
||||
- stat_read_errs_other
|
||||
- stat_read_reqs
|
||||
- stat_read_reqs_xdr
|
||||
- stat_read_success
|
||||
- stat_rw_timeout
|
||||
- stat_slow_trans_queue_batch_pop
|
||||
- stat_slow_trans_queue_pop
|
||||
- stat_slow_trans_queue_push
|
||||
- stat_write_errs
|
||||
- stat_write_errs_notfound
|
||||
- stat_write_errs_other
|
||||
- stat_write_reqs
|
||||
- stat_write_reqs_xdr
|
||||
- stat_write_success
|
||||
- stat_xdr_pipe_miss
|
||||
- stat_xdr_pipe_writes
|
||||
- stat_zero_bin_records
|
||||
- storage_defrag_corrupt_record
|
||||
- storage_defrag_wait
|
||||
- transactions
|
||||
- basic_scans_succeeded
|
||||
- basic_scans_failed
|
||||
- aggr_scans_succeeded
|
||||
- aggr_scans_failed
|
||||
- udf_bg_scans_succeeded
|
||||
- udf_bg_scans_failed
|
||||
- udf_delete_err_others
|
||||
- udf_delete_reqs
|
||||
- udf_delete_success
|
||||
- udf_lua_errs
|
||||
- udf_query_rec_reqs
|
||||
- udf_read_errs_other
|
||||
- udf_read_reqs
|
||||
- udf_read_success
|
||||
- udf_replica_writes
|
||||
- udf_scan_rec_reqs
|
||||
- udf_write_err_others
|
||||
- udf_write_reqs
|
||||
- udf_write_success
|
||||
- write_master
|
||||
- write_prole
|
||||
|
||||
#### Aerospike Statistics [percentage]:
|
||||
|
||||
Meta:
|
||||
- units: percent (out of 100)
|
||||
|
||||
Measurement names:
|
||||
- free_pct_disk
|
||||
- free_pct_memory
|
||||
|
||||
# Measurements:
|
||||
#### Aerospike Namespace Statistics [values]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- available_bin_names
|
||||
- available_pct
|
||||
- current_time
|
||||
- data_used_bytes_memory
|
||||
- index_used_bytes_memory
|
||||
- master_objects
|
||||
- max_evicted_ttl
|
||||
- max_void_time
|
||||
- non_expirable_objects
|
||||
- objects
|
||||
- prole_objects
|
||||
- sindex_used_bytes_memory
|
||||
- total_bytes_disk
|
||||
- total_bytes_memory
|
||||
- used_bytes_disk
|
||||
- used_bytes_memory
|
||||
|
||||
#### Aerospike Namespace Statistics [cumulative]:
|
||||
|
||||
Meta:
|
||||
- units: Integer
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- evicted_objects
|
||||
- expired_objects
|
||||
- set_deleted_objects
|
||||
- set_evicted_objects
|
||||
|
||||
#### Aerospike Namespace Statistics [percentage]:
|
||||
|
||||
Meta:
|
||||
- units: percent (out of 100)
|
||||
- tags: `namespace=<namespace>`
|
||||
|
||||
Measurement names:
|
||||
- free_pct_disk
|
||||
- free_pct_memory
|
||||
341
plugins/inputs/aerospike/aerospike.go
Normal file
341
plugins/inputs/aerospike/aerospike.go
Normal file
@@ -0,0 +1,341 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
MSG_HEADER_SIZE = 8
|
||||
MSG_TYPE = 1 // Info is 1
|
||||
MSG_VERSION = 2
|
||||
)
|
||||
|
||||
var (
|
||||
STATISTICS_COMMAND = []byte("statistics\n")
|
||||
NAMESPACES_COMMAND = []byte("namespaces\n")
|
||||
)
|
||||
|
||||
type aerospikeMessageHeader struct {
|
||||
Version uint8
|
||||
Type uint8
|
||||
DataLen [6]byte
|
||||
}
|
||||
|
||||
type aerospikeMessage struct {
|
||||
aerospikeMessageHeader
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func (msg *aerospikeMessage) Serialize() []byte {
|
||||
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
||||
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type aerospikeInfoCommand struct {
|
||||
msg *aerospikeMessage
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/info.go
|
||||
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
||||
responses := make(map[string]string)
|
||||
offset := int64(0)
|
||||
begin := int64(0)
|
||||
|
||||
dataLen := int64(len(nfo.msg.Data))
|
||||
|
||||
// Create reusable StringBuilder for performance.
|
||||
for offset < dataLen {
|
||||
b := nfo.msg.Data[offset]
|
||||
|
||||
if b == '\t' {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
offset++
|
||||
begin = offset
|
||||
|
||||
// Parse field value.
|
||||
for offset < dataLen {
|
||||
if nfo.msg.Data[offset] == '\n' {
|
||||
break
|
||||
}
|
||||
offset++
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
value := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = string(value)
|
||||
} else {
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else if b == '\n' {
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
offset++
|
||||
begin = offset
|
||||
} else {
|
||||
offset++
|
||||
}
|
||||
}
|
||||
|
||||
if offset > begin {
|
||||
name := nfo.msg.Data[begin:offset]
|
||||
responses[string(name)] = ""
|
||||
}
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
type Aerospike struct {
|
||||
Servers []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Aerospike servers to connect to (with port)
|
||||
## This plugin will query all namespaces the aerospike
|
||||
## server has configured and get stats for them.
|
||||
servers = ["localhost:3000"]
|
||||
`
|
||||
|
||||
func (a *Aerospike) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (a *Aerospike) Description() string {
|
||||
return "Read stats from an aerospike server"
|
||||
}
|
||||
|
||||
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
|
||||
if len(a.Servers) == 0 {
|
||||
return a.gatherServer("127.0.0.1:3000", acc)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, server := range a.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
outerr = a.gatherServer(server, acc)
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (a *Aerospike) gatherServer(host string, acc telegraf.Accumulator) error {
|
||||
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||
}
|
||||
readAerospikeStats(aerospikeInfo, acc, host, "")
|
||||
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
||||
}
|
||||
for ix := range namespaces {
|
||||
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
||||
}
|
||||
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMap(key []byte, host string) (map[string]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
}
|
||||
parsed, err := unmarshalMapInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func getList(key []byte, host string) ([]string, error) {
|
||||
data, err := get(key, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||
}
|
||||
parsed, err := unmarshalListInfo(data, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func get(key []byte, host string) (map[string]string, error) {
|
||||
var err error
|
||||
var data map[string]string
|
||||
|
||||
asInfo := &aerospikeInfoCommand{
|
||||
msg: &aerospikeMessage{
|
||||
aerospikeMessageHeader: aerospikeMessageHeader{
|
||||
Version: uint8(MSG_VERSION),
|
||||
Type: uint8(MSG_TYPE),
|
||||
DataLen: msgLenToBytes(int64(len(key))),
|
||||
},
|
||||
Data: key,
|
||||
},
|
||||
}
|
||||
|
||||
cmd := asInfo.msg.Serialize()
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Write(cmd)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
||||
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read header: %s", err)
|
||||
}
|
||||
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
||||
}
|
||||
|
||||
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
||||
|
||||
if int64(len(asInfo.msg.Data)) != msgLen {
|
||||
asInfo.msg.Data = make([]byte, msgLen)
|
||||
}
|
||||
|
||||
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
||||
}
|
||||
|
||||
data, err = asInfo.parseMultiResponse()
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc telegraf.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
res := map[string]string{}
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return res, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
for i := range values {
|
||||
kv := strings.Split(values[i], "=")
|
||||
if len(kv) > 1 {
|
||||
res[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
||||
key = strings.TrimSuffix(key, "\n")
|
||||
|
||||
v, exists := infoMap[key]
|
||||
if !exists {
|
||||
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
||||
}
|
||||
|
||||
values := strings.Split(v, ";")
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
||||
var r int
|
||||
for total < length {
|
||||
r, err = c.Read(buffer[total:length])
|
||||
total += r
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenToBytes(DataLen int64) [6]byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
||||
res := [6]byte{}
|
||||
copy(res[:], b[2:])
|
||||
return res
|
||||
}
|
||||
|
||||
// Taken from aerospike-client-go/types/message.go
|
||||
func msgLenFromBytes(buf [6]byte) int64 {
|
||||
nbytes := append([]byte{0, 0}, buf[:]...)
|
||||
DataLen := binary.BigEndian.Uint64(nbytes)
|
||||
return int64(DataLen)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("aerospike", func() telegraf.Input {
|
||||
return &Aerospike{}
|
||||
})
|
||||
}
|
||||
118
plugins/inputs/aerospike/aerospike_test.go
Normal file
118
plugins/inputs/aerospike/aerospike_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package aerospike
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAerospikeStatistics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
a := &Aerospike{
|
||||
Servers: []string{testutil.GetLocalHost() + ":3000"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only use a few of the metrics
|
||||
asMetrics := []string{
|
||||
"transactions",
|
||||
"stat_write_errs",
|
||||
"stat_read_reqs",
|
||||
"stat_write_reqs",
|
||||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
||||
var i int64 = 8
|
||||
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||
// Also test for re-writing
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat-write-errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat_write_errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "one;two;three",
|
||||
}
|
||||
|
||||
expected := []string{"one", "two", "three"}
|
||||
|
||||
list, err := unmarshalListInfo(i, "test2")
|
||||
assert.True(t, err != nil)
|
||||
|
||||
list, err = unmarshalListInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
equal := true
|
||||
for ix := range expected {
|
||||
if list[ix] != expected[ix] {
|
||||
equal = false
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, equal)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "key1=value1;key2=value2",
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
m, err := unmarshalMapInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
assert.True(t, reflect.DeepEqual(m, expected))
|
||||
}
|
||||
55
plugins/inputs/all/all.go
Normal file
55
plugins/inputs/all/all.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/github_webhooks"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/prometheus"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/raindrops"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/system"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
|
||||
)
|
||||
45
plugins/inputs/apache/README.md
Normal file
45
plugins/inputs/apache/README.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Telegraf plugin: Apache
|
||||
|
||||
#### Plugin arguments:
|
||||
- **urls** []string: List of apache-status URLs to collect from.
|
||||
|
||||
#### Description
|
||||
|
||||
The Apache plugin collects from the /server-status?auto URL. See
|
||||
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
|
||||
example. And
|
||||
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
|
||||
mod_status documentation.
|
||||
|
||||
# Measurements:
|
||||
|
||||
Meta:
|
||||
- tags: `port=<port>`, `server=url`
|
||||
|
||||
- apache_TotalAccesses
|
||||
- apache_TotalkBytes
|
||||
- apache_CPULoad
|
||||
- apache_Uptime
|
||||
- apache_ReqPerSec
|
||||
- apache_BytesPerSec
|
||||
- apache_BytesPerReq
|
||||
- apache_BusyWorkers
|
||||
- apache_IdleWorkers
|
||||
- apache_ConnsTotal
|
||||
- apache_ConnsAsyncWriting
|
||||
- apache_ConnsAsyncKeepAlive
|
||||
- apache_ConnsAsyncClosing
|
||||
|
||||
### Scoreboard measurements
|
||||
|
||||
- apache_scboard_waiting
|
||||
- apache_scboard_starting
|
||||
- apache_scboard_reading
|
||||
- apache_scboard_sending
|
||||
- apache_scboard_keepalive
|
||||
- apache_scboard_dnslookup
|
||||
- apache_scboard_closing
|
||||
- apache_scboard_logging
|
||||
- apache_scboard_finishing
|
||||
- apache_scboard_idle_cleanup
|
||||
- apache_scboard_open
|
||||
171
plugins/inputs/apache/apache.go
Normal file
171
plugins/inputs/apache/apache.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package apache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Apache struct {
|
||||
Urls []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of Apache status URI to gather stats.
|
||||
urls = ["http://localhost/server-status?auto"]
|
||||
`
|
||||
|
||||
func (n *Apache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (n *Apache) Description() string {
|
||||
return "Read Apache status information (mod_status)"
|
||||
}
|
||||
|
||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
|
||||
for _, u := range n.Urls {
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(addr *url.URL) {
|
||||
defer wg.Done()
|
||||
outerr = n.gatherUrl(addr, acc)
|
||||
}(addr)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{Transport: tr}
|
||||
|
||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
resp, err := client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
|
||||
}
|
||||
|
||||
tags := getTags(addr)
|
||||
|
||||
sc := bufio.NewScanner(resp.Body)
|
||||
fields := make(map[string]interface{})
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if strings.Contains(line, ":") {
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
|
||||
|
||||
switch key {
|
||||
case "Scoreboard":
|
||||
for field, value := range n.gatherScores(part) {
|
||||
fields[field] = value
|
||||
}
|
||||
default:
|
||||
value, err := strconv.ParseFloat(part, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("apache", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherScores(data string) map[string]interface{} {
|
||||
var waiting, open int = 0, 0
|
||||
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
for _, s := range strings.Split(data, "") {
|
||||
|
||||
switch s {
|
||||
case "_":
|
||||
waiting++
|
||||
case "S":
|
||||
S++
|
||||
case "R":
|
||||
R++
|
||||
case "W":
|
||||
W++
|
||||
case "K":
|
||||
K++
|
||||
case "D":
|
||||
D++
|
||||
case "C":
|
||||
C++
|
||||
case "L":
|
||||
L++
|
||||
case "G":
|
||||
G++
|
||||
case "I":
|
||||
I++
|
||||
case ".":
|
||||
open++
|
||||
}
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"scboard_waiting": float64(waiting),
|
||||
"scboard_starting": float64(S),
|
||||
"scboard_reading": float64(R),
|
||||
"scboard_sending": float64(W),
|
||||
"scboard_keepalive": float64(K),
|
||||
"scboard_dnslookup": float64(D),
|
||||
"scboard_closing": float64(C),
|
||||
"scboard_logging": float64(L),
|
||||
"scboard_finishing": float64(G),
|
||||
"scboard_idle_cleanup": float64(I),
|
||||
"scboard_open": float64(open),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// Get tag(s) for the apache plugin
|
||||
func getTags(addr *url.URL) map[string]string {
|
||||
h := addr.Host
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
host = addr.Host
|
||||
if addr.Scheme == "http" {
|
||||
port = "80"
|
||||
} else if addr.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = ""
|
||||
}
|
||||
}
|
||||
return map[string]string{"server": host, "port": port}
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("apache", func() telegraf.Input {
|
||||
return &Apache{}
|
||||
})
|
||||
}
|
||||
73
plugins/inputs/apache/apache_test.go
Normal file
73
plugins/inputs/apache/apache_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package apache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var apacheStatus = `
|
||||
Total Accesses: 129811861
|
||||
Total kBytes: 5213701865
|
||||
CPULoad: 6.51929
|
||||
Uptime: 941553
|
||||
ReqPerSec: 137.87
|
||||
BytesPerSec: 5670240
|
||||
BytesPerReq: 41127.4
|
||||
BusyWorkers: 270
|
||||
IdleWorkers: 630
|
||||
ConnsTotal: 1451
|
||||
ConnsAsyncWriting: 32
|
||||
ConnsAsyncKeepAlive: 945
|
||||
ConnsAsyncClosing: 205
|
||||
Scoreboard: WW_____W_RW_R_W__RRR____WR_W___WW________W_WW_W_____R__R_WR__WRWR_RRRW___R_RWW__WWWRW__R_RW___RR_RW_R__W__WR_WWW______WWR__R___R_WR_W___RW______RR________________W______R__RR______W________________R____R__________________________RW_W____R_____W_R_________________R____RR__W___R_R____RW______R____W______W_W_R_R______R__R_R__________R____W_______WW____W____RR__W_____W_R_______W__________W___W____________W_______WRR_R_W____W_____R____W_WW_R____RRW__W............................................................................................................................................................................................................................................................................................................WRRWR____WR__RR_R___RWR_________W_R____RWRRR____R_R__RW_R___WWW_RW__WR_RRR____W___R____WW_R__R___RR_W_W_RRRRWR__RRWR__RRW_W_RRRW_R_RR_W__RR_RWRR_R__R___RR_RR______R__RR____R_____W_R_R_R__R__R__________W____WW_R___R_R___R_________RR__RR____RWWWW___W_R________R_R____R_W___W___R___W_WRRWW_______R__W_RW_______R________RR__R________W_______________________W_W______________RW_________WR__R___R__R_______________WR_R_________W___RW_____R____________W____......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
|
||||
`
|
||||
|
||||
func TestHTTPApache(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, apacheStatus)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := Apache{
|
||||
Urls: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"TotalAccesses": float64(1.29811861e+08),
|
||||
"TotalkBytes": float64(5.213701865e+09),
|
||||
"CPULoad": float64(6.51929),
|
||||
"Uptime": float64(941553),
|
||||
"ReqPerSec": float64(137.87),
|
||||
"BytesPerSec": float64(5.67024e+06),
|
||||
"BytesPerReq": float64(41127.4),
|
||||
"BusyWorkers": float64(270),
|
||||
"IdleWorkers": float64(630),
|
||||
"ConnsTotal": float64(1451),
|
||||
"ConnsAsyncWriting": float64(32),
|
||||
"ConnsAsyncKeepAlive": float64(945),
|
||||
"ConnsAsyncClosing": float64(205),
|
||||
"scboard_waiting": float64(630),
|
||||
"scboard_starting": float64(0),
|
||||
"scboard_reading": float64(157),
|
||||
"scboard_sending": float64(113),
|
||||
"scboard_keepalive": float64(0),
|
||||
"scboard_dnslookup": float64(0),
|
||||
"scboard_closing": float64(0),
|
||||
"scboard_logging": float64(0),
|
||||
"scboard_finishing": float64(0),
|
||||
"scboard_idle_cleanup": float64(0),
|
||||
"scboard_open": float64(2850),
|
||||
}
|
||||
acc.AssertContainsFields(t, "apache", fields)
|
||||
}
|
||||
89
plugins/inputs/bcache/README.md
Normal file
89
plugins/inputs/bcache/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Telegraf plugin: bcache
|
||||
|
||||
Get bcache stat from stats_total directory and dirty_data file.
|
||||
|
||||
# Measurements
|
||||
|
||||
Meta:
|
||||
|
||||
- tags: `backing_dev=dev bcache_dev=dev`
|
||||
|
||||
Measurement names:
|
||||
|
||||
- dirty_data
|
||||
- bypassed
|
||||
- cache_bypass_hits
|
||||
- cache_bypass_misses
|
||||
- cache_hit_ratio
|
||||
- cache_hits
|
||||
- cache_miss_collisions
|
||||
- cache_misses
|
||||
- cache_readaheads
|
||||
|
||||
### Description
|
||||
|
||||
```
|
||||
dirty_data
|
||||
Amount of dirty data for this backing device in the cache. Continuously
|
||||
updated unlike the cache set's version, but may be slightly off.
|
||||
|
||||
bypassed
|
||||
Amount of IO (both reads and writes) that has bypassed the cache
|
||||
|
||||
|
||||
cache_bypass_hits
|
||||
cache_bypass_misses
|
||||
Hits and misses for IO that is intended to skip the cache are still counted,
|
||||
but broken out here.
|
||||
|
||||
cache_hits
|
||||
cache_misses
|
||||
cache_hit_ratio
|
||||
Hits and misses are counted per individual IO as bcache sees them; a
|
||||
partial hit is counted as a miss.
|
||||
|
||||
cache_miss_collisions
|
||||
Counts instances where data was going to be inserted into the cache from a
|
||||
cache miss, but raced with a write and data was already present (usually 0
|
||||
since the synchronization for cache misses was rewritten)
|
||||
|
||||
cache_readaheads
|
||||
Count of times readahead occurred.
|
||||
```
|
||||
|
||||
# Example output
|
||||
|
||||
Using this configuration:
|
||||
|
||||
```
|
||||
[bcache]
|
||||
# Bcache sets path
|
||||
# If not specified, then default is:
|
||||
# bcachePath = "/sys/fs/bcache"
|
||||
#
|
||||
# By default, telegraf gather stats for all bcache devices
|
||||
# Setting devices will restrict the stats to the specified
|
||||
# bcache devices.
|
||||
# bcacheDevs = ["bcache0", ...]
|
||||
```
|
||||
|
||||
When run with:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -input-filter bcache -test
|
||||
```
|
||||
|
||||
It produces:
|
||||
|
||||
```
|
||||
* Plugin: bcache, Collection 1
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_hits value=146270986
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_bypass_misses value=0
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hit_ratio value=90
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_hits value=511941651
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_miss_collisions value=157678
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_misses value=50647396
|
||||
> [backing_dev="md10" bcache_dev="bcache0"] bcache_cache_readaheads value=0
|
||||
```
|
||||
142
plugins/inputs/bcache/bcache.go
Normal file
142
plugins/inputs/bcache/bcache.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package bcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Bcache struct {
|
||||
BcachePath string
|
||||
BcacheDevs []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Bcache sets path
|
||||
## If not specified, then default is:
|
||||
bcachePath = "/sys/fs/bcache"
|
||||
|
||||
## By default, telegraf gather stats for all bcache devices
|
||||
## Setting devices will restrict the stats to the specified
|
||||
## bcache devices.
|
||||
bcacheDevs = ["bcache0"]
|
||||
`
|
||||
|
||||
func (b *Bcache) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (b *Bcache) Description() string {
|
||||
return "Read metrics of bcache from stats_total and dirty_data"
|
||||
}
|
||||
|
||||
func getTags(bdev string) map[string]string {
|
||||
backingDevFile, _ := os.Readlink(bdev)
|
||||
backingDevPath := strings.Split(backingDevFile, "/")
|
||||
backingDev := backingDevPath[len(backingDevPath)-2]
|
||||
|
||||
bcacheDevFile, _ := os.Readlink(bdev + "/dev")
|
||||
bcacheDevPath := strings.Split(bcacheDevFile, "/")
|
||||
bcacheDev := bcacheDevPath[len(bcacheDevPath)-1]
|
||||
|
||||
return map[string]string{"backing_dev": backingDev, "bcache_dev": bcacheDev}
|
||||
}
|
||||
|
||||
func prettyToBytes(v string) uint64 {
|
||||
var factors = map[string]uint64{
|
||||
"k": 1 << 10,
|
||||
"M": 1 << 20,
|
||||
"G": 1 << 30,
|
||||
"T": 1 << 40,
|
||||
"P": 1 << 50,
|
||||
"E": 1 << 60,
|
||||
}
|
||||
var factor uint64
|
||||
factor = 1
|
||||
prefix := v[len(v)-1 : len(v)]
|
||||
if factors[prefix] != 0 {
|
||||
v = v[:len(v)-1]
|
||||
factor = factors[prefix]
|
||||
}
|
||||
result, _ := strconv.ParseFloat(v, 32)
|
||||
result = result * float64(factor)
|
||||
|
||||
return uint64(result)
|
||||
}
|
||||
|
||||
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
|
||||
tags := getTags(bdev)
|
||||
metrics, err := filepath.Glob(bdev + "/stats_total/*")
|
||||
if len(metrics) < 0 {
|
||||
return errors.New("Can't read any stats file")
|
||||
}
|
||||
file, err := ioutil.ReadFile(bdev + "/dirty_data")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
value := prettyToBytes(rawValue)
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fields["dirty_data"] = value
|
||||
|
||||
for _, path := range metrics {
|
||||
key := filepath.Base(path)
|
||||
file, err := ioutil.ReadFile(path)
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if key == "bypassed" {
|
||||
value := prettyToBytes(rawValue)
|
||||
fields[key] = value
|
||||
} else {
|
||||
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("bcache", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bcache) Gather(acc telegraf.Accumulator) error {
|
||||
bcacheDevsChecked := make(map[string]bool)
|
||||
var restrictDevs bool
|
||||
if len(b.BcacheDevs) != 0 {
|
||||
restrictDevs = true
|
||||
for _, bcacheDev := range b.BcacheDevs {
|
||||
bcacheDevsChecked[bcacheDev] = true
|
||||
}
|
||||
}
|
||||
|
||||
bcachePath := b.BcachePath
|
||||
if len(bcachePath) == 0 {
|
||||
bcachePath = "/sys/fs/bcache"
|
||||
}
|
||||
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
||||
if len(bdevs) < 1 {
|
||||
return errors.New("Can't find any bcache device")
|
||||
}
|
||||
for _, bdev := range bdevs {
|
||||
if restrictDevs {
|
||||
bcacheDev := getTags(bdev)["bcache_dev"]
|
||||
if !bcacheDevsChecked[bcacheDev] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
b.gatherBcache(bdev, acc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("bcache", func() telegraf.Input {
|
||||
return &Bcache{}
|
||||
})
|
||||
}
|
||||
121
plugins/inputs/bcache/bcache_test.go
Normal file
121
plugins/inputs/bcache/bcache_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package bcache
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
dirty_data = "1.5G"
|
||||
bypassed = "4.7T"
|
||||
cache_bypass_hits = "146155333"
|
||||
cache_bypass_misses = "0"
|
||||
cache_hit_ratio = "90"
|
||||
cache_hits = "511469583"
|
||||
cache_miss_collisions = "157567"
|
||||
cache_misses = "50616331"
|
||||
cache_readaheads = "2"
|
||||
)
|
||||
|
||||
var (
|
||||
testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache"
|
||||
testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411"
|
||||
testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0"
|
||||
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
||||
)
|
||||
|
||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheDevPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
|
||||
[]byte(dirty_data), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
|
||||
[]byte(bypassed), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||
[]byte(cache_bypass_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||
[]byte(cache_bypass_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||
[]byte(cache_hit_ratio), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
|
||||
[]byte(cache_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||
[]byte(cache_miss_collisions), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
|
||||
[]byte(cache_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
|
||||
[]byte(cache_readaheads), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"dirty_data": uint64(1610612736),
|
||||
"bypassed": uint64(5167704440832),
|
||||
"cache_bypass_hits": uint64(146155333),
|
||||
"cache_bypass_misses": uint64(0),
|
||||
"cache_hit_ratio": uint64(90),
|
||||
"cache_hits": uint64(511469583),
|
||||
"cache_miss_collisions": uint64(157567),
|
||||
"cache_misses": uint64(50616331),
|
||||
"cache_readaheads": uint64(2),
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"backing_dev": "md10",
|
||||
"bcache_dev": "bcache0",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
// all devs
|
||||
b := &Bcache{BcachePath: testBcachePath}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
// one exist dev
|
||||
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
255
plugins/inputs/couchdb/README.md
Normal file
255
plugins/inputs/couchdb/README.md
Normal file
@@ -0,0 +1,255 @@
|
||||
# CouchDB Input Plugin
|
||||
---
|
||||
|
||||
The CouchDB plugin gathers metrics of CouchDB using [_stats](http://docs.couchdb.org/en/1.6.1/api/server/common.html?highlight=stats#get--_stats) endpoint.
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Sample Config:
|
||||
[[inputs.couchdb]]
|
||||
hosts = ["http://localhost:5984/_stats"]
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Statistics specific to the internals of CouchDB:
|
||||
|
||||
- couchdb_auth_cache_misses
|
||||
- couchdb_database_writes
|
||||
- couchdb_open_databases
|
||||
- couchdb_auth_cache_hits
|
||||
- couchdb_request_time
|
||||
- couchdb_database_reads
|
||||
- couchdb_open_os_files
|
||||
|
||||
Statistics of HTTP requests by method:
|
||||
|
||||
- httpd_request_methods_put
|
||||
- httpd_request_methods_get
|
||||
- httpd_request_methods_copy
|
||||
- httpd_request_methods_delete
|
||||
- httpd_request_methods_post
|
||||
- httpd_request_methods_head
|
||||
|
||||
Statistics of HTTP requests by response code:
|
||||
|
||||
- httpd_status_codes_200
|
||||
- httpd_status_codes_201
|
||||
- httpd_status_codes_202
|
||||
- httpd_status_codes_301
|
||||
- httpd_status_codes_304
|
||||
- httpd_status_codes_400
|
||||
- httpd_status_codes_401
|
||||
- httpd_status_codes_403
|
||||
- httpd_status_codes_404
|
||||
- httpd_status_codes_405
|
||||
- httpd_status_codes_409
|
||||
- httpd_status_codes_412
|
||||
- httpd_status_codes_500
|
||||
|
||||
httpd statistics:
|
||||
|
||||
- httpd_clients_requesting_changes
|
||||
- httpd_temporary_view_reads
|
||||
- httpd_requests
|
||||
- httpd_bulk_requests
|
||||
- httpd_view_reads
|
||||
|
||||
### Tags:
|
||||
|
||||
- server (url of the couchdb _stats endpoint)
|
||||
|
||||
### Example output:
|
||||
|
||||
```
|
||||
➜ telegraf git:(master) ✗ ./telegraf -config ./config.conf -input-filter couchdb -test
|
||||
* Plugin: couchdb,
|
||||
Collection 1
|
||||
> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0,
|
||||
couchdb_auth_cache_hits_max=0,
|
||||
couchdb_auth_cache_hits_mean=0,
|
||||
couchdb_auth_cache_hits_min=0,
|
||||
couchdb_auth_cache_hits_stddev=0,
|
||||
couchdb_auth_cache_hits_sum=0,
|
||||
couchdb_auth_cache_misses_current=0,
|
||||
couchdb_auth_cache_misses_max=0,
|
||||
couchdb_auth_cache_misses_mean=0,
|
||||
couchdb_auth_cache_misses_min=0,
|
||||
couchdb_auth_cache_misses_stddev=0,
|
||||
couchdb_auth_cache_misses_sum=0,
|
||||
couchdb_database_reads_current=0,
|
||||
couchdb_database_reads_max=0,
|
||||
couchdb_database_reads_mean=0,
|
||||
couchdb_database_reads_min=0,
|
||||
couchdb_database_reads_stddev=0,
|
||||
couchdb_database_reads_sum=0,
|
||||
couchdb_database_writes_current=1102,
|
||||
couchdb_database_writes_max=131,
|
||||
couchdb_database_writes_mean=0.116,
|
||||
couchdb_database_writes_min=0,
|
||||
couchdb_database_writes_stddev=3.536,
|
||||
couchdb_database_writes_sum=1102,
|
||||
couchdb_open_databases_current=1,
|
||||
couchdb_open_databases_max=1,
|
||||
couchdb_open_databases_mean=0,
|
||||
couchdb_open_databases_min=0,
|
||||
couchdb_open_databases_stddev=0.01,
|
||||
couchdb_open_databases_sum=1,
|
||||
couchdb_open_os_files_current=2,
|
||||
couchdb_open_os_files_max=2,
|
||||
couchdb_open_os_files_mean=0,
|
||||
couchdb_open_os_files_min=0,
|
||||
couchdb_open_os_files_stddev=0.02,
|
||||
couchdb_open_os_files_sum=2,
|
||||
couchdb_request_time_current=242.21,
|
||||
couchdb_request_time_max=102,
|
||||
couchdb_request_time_mean=5.767,
|
||||
couchdb_request_time_min=1,
|
||||
couchdb_request_time_stddev=17.369,
|
||||
couchdb_request_time_sum=242.21,
|
||||
httpd_bulk_requests_current=0,
|
||||
httpd_bulk_requests_max=0,
|
||||
httpd_bulk_requests_mean=0,
|
||||
httpd_bulk_requests_min=0,
|
||||
httpd_bulk_requests_stddev=0,
|
||||
httpd_bulk_requests_sum=0,
|
||||
httpd_clients_requesting_changes_current=0,
|
||||
httpd_clients_requesting_changes_max=0,
|
||||
httpd_clients_requesting_changes_mean=0,
|
||||
httpd_clients_requesting_changes_min=0,
|
||||
httpd_clients_requesting_changes_stddev=0,
|
||||
httpd_clients_requesting_changes_sum=0,
|
||||
httpd_request_methods_copy_current=0,
|
||||
httpd_request_methods_copy_max=0,
|
||||
httpd_request_methods_copy_mean=0,
|
||||
httpd_request_methods_copy_min=0,
|
||||
httpd_request_methods_copy_stddev=0,
|
||||
httpd_request_methods_copy_sum=0,
|
||||
httpd_request_methods_delete_current=0,
|
||||
httpd_request_methods_delete_max=0,
|
||||
httpd_request_methods_delete_mean=0,
|
||||
httpd_request_methods_delete_min=0,
|
||||
httpd_request_methods_delete_stddev=0,
|
||||
httpd_request_methods_delete_sum=0,
|
||||
httpd_request_methods_get_current=31,
|
||||
httpd_request_methods_get_max=1,
|
||||
httpd_request_methods_get_mean=0.003,
|
||||
httpd_request_methods_get_min=0,
|
||||
httpd_request_methods_get_stddev=0.057,
|
||||
httpd_request_methods_get_sum=31,
|
||||
httpd_request_methods_head_current=0,
|
||||
httpd_request_methods_head_max=0,
|
||||
httpd_request_methods_head_mean=0,
|
||||
httpd_request_methods_head_min=0,
|
||||
httpd_request_methods_head_stddev=0,
|
||||
httpd_request_methods_head_sum=0,
|
||||
httpd_request_methods_post_current=1102,
|
||||
httpd_request_methods_post_max=131,
|
||||
httpd_request_methods_post_mean=0.116,
|
||||
httpd_request_methods_post_min=0,
|
||||
httpd_request_methods_post_stddev=3.536,
|
||||
httpd_request_methods_post_sum=1102,
|
||||
httpd_request_methods_put_current=1,
|
||||
httpd_request_methods_put_max=1,
|
||||
httpd_request_methods_put_mean=0,
|
||||
httpd_request_methods_put_min=0,
|
||||
httpd_request_methods_put_stddev=0.01,
|
||||
httpd_request_methods_put_sum=1,
|
||||
httpd_requests_current=1133,
|
||||
httpd_requests_max=130,
|
||||
httpd_requests_mean=0.118,
|
||||
httpd_requests_min=0,
|
||||
httpd_requests_stddev=3.512,
|
||||
httpd_requests_sum=1133,
|
||||
httpd_status_codes_200_current=31,
|
||||
httpd_status_codes_200_max=1,
|
||||
httpd_status_codes_200_mean=0.003,
|
||||
httpd_status_codes_200_min=0,
|
||||
httpd_status_codes_200_stddev=0.057,
|
||||
httpd_status_codes_200_sum=31,
|
||||
httpd_status_codes_201_current=1103,
|
||||
httpd_status_codes_201_max=130,
|
||||
httpd_status_codes_201_mean=0.116,
|
||||
httpd_status_codes_201_min=0,
|
||||
httpd_status_codes_201_stddev=3.532,
|
||||
httpd_status_codes_201_sum=1103,
|
||||
httpd_status_codes_202_current=0,
|
||||
httpd_status_codes_202_max=0,
|
||||
httpd_status_codes_202_mean=0,
|
||||
httpd_status_codes_202_min=0,
|
||||
httpd_status_codes_202_stddev=0,
|
||||
httpd_status_codes_202_sum=0,
|
||||
httpd_status_codes_301_current=0,
|
||||
httpd_status_codes_301_max=0,
|
||||
httpd_status_codes_301_mean=0,
|
||||
httpd_status_codes_301_min=0,
|
||||
httpd_status_codes_301_stddev=0,
|
||||
httpd_status_codes_301_sum=0,
|
||||
httpd_status_codes_304_current=0,
|
||||
httpd_status_codes_304_max=0,
|
||||
httpd_status_codes_304_mean=0,
|
||||
httpd_status_codes_304_min=0,
|
||||
httpd_status_codes_304_stddev=0,
|
||||
httpd_status_codes_304_sum=0,
|
||||
httpd_status_codes_400_current=0,
|
||||
httpd_status_codes_400_max=0,
|
||||
httpd_status_codes_400_mean=0,
|
||||
httpd_status_codes_400_min=0,
|
||||
httpd_status_codes_400_stddev=0,
|
||||
httpd_status_codes_400_sum=0,
|
||||
httpd_status_codes_401_current=0,
|
||||
httpd_status_codes_401_max=0,
|
||||
httpd_status_codes_401_mean=0,
|
||||
httpd_status_codes_401_min=0,
|
||||
httpd_status_codes_401_stddev=0,
|
||||
httpd_status_codes_401_sum=0,
|
||||
httpd_status_codes_403_current=0,
|
||||
httpd_status_codes_403_max=0,
|
||||
httpd_status_codes_403_mean=0,
|
||||
httpd_status_codes_403_min=0,
|
||||
httpd_status_codes_403_stddev=0,
|
||||
httpd_status_codes_403_sum=0,
|
||||
httpd_status_codes_404_current=0,
|
||||
httpd_status_codes_404_max=0,
|
||||
httpd_status_codes_404_mean=0,
|
||||
httpd_status_codes_404_min=0,
|
||||
httpd_status_codes_404_stddev=0,
|
||||
httpd_status_codes_404_sum=0,
|
||||
httpd_status_codes_405_current=0,
|
||||
httpd_status_codes_405_max=0,
|
||||
httpd_status_codes_405_mean=0,
|
||||
httpd_status_codes_405_min=0,
|
||||
httpd_status_codes_405_stddev=0,
|
||||
httpd_status_codes_405_sum=0,
|
||||
httpd_status_codes_409_current=0,
|
||||
httpd_status_codes_409_max=0,
|
||||
httpd_status_codes_409_mean=0,
|
||||
httpd_status_codes_409_min=0,
|
||||
httpd_status_codes_409_stddev=0,
|
||||
httpd_status_codes_409_sum=0,
|
||||
httpd_status_codes_412_current=0,
|
||||
httpd_status_codes_412_max=0,
|
||||
httpd_status_codes_412_mean=0,
|
||||
httpd_status_codes_412_min=0,
|
||||
httpd_status_codes_412_stddev=0,
|
||||
httpd_status_codes_412_sum=0,
|
||||
httpd_status_codes_500_current=0,
|
||||
httpd_status_codes_500_max=0,
|
||||
httpd_status_codes_500_mean=0,
|
||||
httpd_status_codes_500_min=0,
|
||||
httpd_status_codes_500_stddev=0,
|
||||
httpd_status_codes_500_sum=0,
|
||||
httpd_temporary_view_reads_current=0,
|
||||
httpd_temporary_view_reads_max=0,
|
||||
httpd_temporary_view_reads_mean=0,
|
||||
httpd_temporary_view_reads_min=0,
|
||||
httpd_temporary_view_reads_stddev=0,
|
||||
httpd_temporary_view_reads_sum=0,
|
||||
httpd_view_reads_current=0,
|
||||
httpd_view_reads_max=0,
|
||||
httpd_view_reads_mean=0,
|
||||
httpd_view_reads_min=0,
|
||||
httpd_view_reads_stddev=0,
|
||||
httpd_view_reads_sum=0 1454692257621938169
|
||||
```
|
||||
205
plugins/inputs/couchdb/couchdb.go
Normal file
205
plugins/inputs/couchdb/couchdb.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package couchdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Schema:
|
||||
type metaData struct {
|
||||
Description string `json:"description"`
|
||||
Current float64 `json:"current"`
|
||||
Sum float64 `json:"sum"`
|
||||
Mean float64 `json:"mean"`
|
||||
Stddev float64 `json:"stddev"`
|
||||
Min float64 `json:"min"`
|
||||
Max float64 `json:"max"`
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Couchdb struct {
|
||||
AuthCacheMisses metaData `json:"auth_cache_misses"`
|
||||
DatabaseWrites metaData `json:"database_writes"`
|
||||
OpenDatabases metaData `json:"open_databases"`
|
||||
AuthCacheHits metaData `json:"auth_cache_hits"`
|
||||
RequestTime metaData `json:"request_time"`
|
||||
DatabaseReads metaData `json:"database_reads"`
|
||||
OpenOsFiles metaData `json:"open_os_files"`
|
||||
} `json:"couchdb"`
|
||||
HttpdRequestMethods struct {
|
||||
Put metaData `json:"PUT"`
|
||||
Get metaData `json:"GET"`
|
||||
Copy metaData `json:"COPY"`
|
||||
Delete metaData `json:"DELETE"`
|
||||
Post metaData `json:"POST"`
|
||||
Head metaData `json:"HEAD"`
|
||||
} `json:"httpd_request_methods"`
|
||||
HttpdStatusCodes struct {
|
||||
Status200 metaData `json:"200"`
|
||||
Status201 metaData `json:"201"`
|
||||
Status202 metaData `json:"202"`
|
||||
Status301 metaData `json:"301"`
|
||||
Status304 metaData `json:"304"`
|
||||
Status400 metaData `json:"400"`
|
||||
Status401 metaData `json:"401"`
|
||||
Status403 metaData `json:"403"`
|
||||
Status404 metaData `json:"404"`
|
||||
Status405 metaData `json:"405"`
|
||||
Status409 metaData `json:"409"`
|
||||
Status412 metaData `json:"412"`
|
||||
Status500 metaData `json:"500"`
|
||||
} `json:"httpd_status_codes"`
|
||||
Httpd struct {
|
||||
ClientsRequestingChanges metaData `json:"clients_requesting_changes"`
|
||||
TemporaryViewReads metaData `json:"temporary_view_reads"`
|
||||
Requests metaData `json:"requests"`
|
||||
BulkRequests metaData `json:"bulk_requests"`
|
||||
ViewReads metaData `json:"view_reads"`
|
||||
} `json:"httpd"`
|
||||
}
|
||||
|
||||
type CouchDB struct {
|
||||
HOSTs []string `toml:"hosts"`
|
||||
}
|
||||
|
||||
func (*CouchDB) Description() string {
|
||||
return "Read CouchDB Stats from one or more servers"
|
||||
}
|
||||
|
||||
func (*CouchDB) SampleConfig() string {
|
||||
return `
|
||||
## Works with CouchDB stats endpoints out of the box
|
||||
## Multiple HOSTs from which to read CouchDB stats:
|
||||
hosts = ["http://localhost:8086/_stats"]
|
||||
`
|
||||
}
|
||||
|
||||
func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error {
|
||||
errorChannel := make(chan error, len(c.HOSTs))
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range c.HOSTs {
|
||||
wg.Add(1)
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if err := c.fetchAndInsertData(accumulator, host); err != nil {
|
||||
errorChannel <- fmt.Errorf("[host=%s]: %s", host, err)
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
|
||||
}
|
||||
|
||||
func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error {
|
||||
|
||||
response, error := http.Get(host)
|
||||
if error != nil {
|
||||
return error
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
var stats Stats
|
||||
decoder := json.NewDecoder(response.Body)
|
||||
decoder.Decode(&stats)
|
||||
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
// CouchDB meta stats:
|
||||
c.MapCopy(fields, c.generateFields("couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_database_writes", stats.Couchdb.DatabaseWrites))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_open_databases", stats.Couchdb.OpenDatabases))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_request_time", stats.Couchdb.RequestTime))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_database_reads", stats.Couchdb.DatabaseReads))
|
||||
c.MapCopy(fields, c.generateFields("couchdb_open_os_files", stats.Couchdb.OpenOsFiles))
|
||||
|
||||
// http request methods stats:
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_put", stats.HttpdRequestMethods.Put))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_get", stats.HttpdRequestMethods.Get))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_copy", stats.HttpdRequestMethods.Copy))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_delete", stats.HttpdRequestMethods.Delete))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_post", stats.HttpdRequestMethods.Post))
|
||||
c.MapCopy(fields, c.generateFields("httpd_request_methods_head", stats.HttpdRequestMethods.Head))
|
||||
|
||||
// status code stats:
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_200", stats.HttpdStatusCodes.Status200))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_201", stats.HttpdStatusCodes.Status201))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_202", stats.HttpdStatusCodes.Status202))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_301", stats.HttpdStatusCodes.Status301))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_304", stats.HttpdStatusCodes.Status304))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_400", stats.HttpdStatusCodes.Status400))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_401", stats.HttpdStatusCodes.Status401))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_403", stats.HttpdStatusCodes.Status403))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_404", stats.HttpdStatusCodes.Status404))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_405", stats.HttpdStatusCodes.Status405))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_409", stats.HttpdStatusCodes.Status409))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_412", stats.HttpdStatusCodes.Status412))
|
||||
c.MapCopy(fields, c.generateFields("httpd_status_codes_500", stats.HttpdStatusCodes.Status500))
|
||||
|
||||
// httpd stats:
|
||||
c.MapCopy(fields, c.generateFields("httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges))
|
||||
c.MapCopy(fields, c.generateFields("httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads))
|
||||
c.MapCopy(fields, c.generateFields("httpd_requests", stats.Httpd.Requests))
|
||||
c.MapCopy(fields, c.generateFields("httpd_bulk_requests", stats.Httpd.BulkRequests))
|
||||
c.MapCopy(fields, c.generateFields("httpd_view_reads", stats.Httpd.ViewReads))
|
||||
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
}
|
||||
accumulator.AddFields("couchdb", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*CouchDB) MapCopy(dst, src interface{}) {
|
||||
dv, sv := reflect.ValueOf(dst), reflect.ValueOf(src)
|
||||
for _, k := range sv.MapKeys() {
|
||||
dv.SetMapIndex(k, sv.MapIndex(k))
|
||||
}
|
||||
}
|
||||
|
||||
func (*CouchDB) safeCheck(value interface{}) interface{} {
|
||||
if value == nil {
|
||||
return 0.0
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (c *CouchDB) generateFields(prefix string, obj metaData) map[string]interface{} {
|
||||
fields := map[string]interface{}{
|
||||
prefix + "_current": c.safeCheck(obj.Current),
|
||||
prefix + "_sum": c.safeCheck(obj.Sum),
|
||||
prefix + "_mean": c.safeCheck(obj.Mean),
|
||||
prefix + "_stddev": c.safeCheck(obj.Stddev),
|
||||
prefix + "_min": c.safeCheck(obj.Min),
|
||||
prefix + "_max": c.safeCheck(obj.Max),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("couchdb", func() telegraf.Input {
|
||||
return &CouchDB{}
|
||||
})
|
||||
}
|
||||
320
plugins/inputs/couchdb/couchdb_test.go
Normal file
320
plugins/inputs/couchdb/couchdb_test.go
Normal file
@@ -0,0 +1,320 @@
|
||||
package couchdb_test
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs/couchdb"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
js := `
|
||||
{
|
||||
"couchdb": {
|
||||
"auth_cache_misses": {
|
||||
"description": "number of authentication cache misses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"database_writes": {
|
||||
"description": "number of times a database was changed",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"open_databases": {
|
||||
"description": "number of open databases",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"auth_cache_hits": {
|
||||
"description": "number of authentication cache hits",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"request_time": {
|
||||
"description": "length of a request inside CouchDB without MochiWeb",
|
||||
"current": 18.0,
|
||||
"sum": 18.0,
|
||||
"mean": 18.0,
|
||||
"stddev": null,
|
||||
"min": 18.0,
|
||||
"max": 18.0
|
||||
},
|
||||
"database_reads": {
|
||||
"description": "number of times a document was read from a database",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"open_os_files": {
|
||||
"description": "number of file descriptors CouchDB has open",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
},
|
||||
"httpd_request_methods": {
|
||||
"PUT": {
|
||||
"description": "number of HTTP PUT requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"GET": {
|
||||
"description": "number of HTTP GET requests",
|
||||
"current": 2.0,
|
||||
"sum": 2.0,
|
||||
"mean": 0.25,
|
||||
"stddev": 0.70699999999999996181,
|
||||
"min": 0,
|
||||
"max": 2
|
||||
},
|
||||
"COPY": {
|
||||
"description": "number of HTTP COPY requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"DELETE": {
|
||||
"description": "number of HTTP DELETE requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"POST": {
|
||||
"description": "number of HTTP POST requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"HEAD": {
|
||||
"description": "number of HTTP HEAD requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
},
|
||||
"httpd_status_codes": {
|
||||
"403": {
|
||||
"description": "number of HTTP 403 Forbidden responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"202": {
|
||||
"description": "number of HTTP 202 Accepted responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"401": {
|
||||
"description": "number of HTTP 401 Unauthorized responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"409": {
|
||||
"description": "number of HTTP 409 Conflict responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"200": {
|
||||
"description": "number of HTTP 200 OK responses",
|
||||
"current": 1.0,
|
||||
"sum": 1.0,
|
||||
"mean": 0.125,
|
||||
"stddev": 0.35399999999999998135,
|
||||
"min": 0,
|
||||
"max": 1
|
||||
},
|
||||
"405": {
|
||||
"description": "number of HTTP 405 Method Not Allowed responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"400": {
|
||||
"description": "number of HTTP 400 Bad Request responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"201": {
|
||||
"description": "number of HTTP 201 Created responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"404": {
|
||||
"description": "number of HTTP 404 Not Found responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"500": {
|
||||
"description": "number of HTTP 500 Internal Server Error responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"412": {
|
||||
"description": "number of HTTP 412 Precondition Failed responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"301": {
|
||||
"description": "number of HTTP 301 Moved Permanently responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"304": {
|
||||
"description": "number of HTTP 304 Not Modified responses",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
},
|
||||
"httpd": {
|
||||
"clients_requesting_changes": {
|
||||
"description": "number of clients for continuous _changes",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"temporary_view_reads": {
|
||||
"description": "number of temporary view reads",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"requests": {
|
||||
"description": "number of HTTP requests",
|
||||
"current": 2.0,
|
||||
"sum": 2.0,
|
||||
"mean": 0.25,
|
||||
"stddev": 0.70699999999999996181,
|
||||
"min": 0,
|
||||
"max": 2
|
||||
},
|
||||
"bulk_requests": {
|
||||
"description": "number of bulk requests",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
},
|
||||
"view_reads": {
|
||||
"description": "number of view reads",
|
||||
"current": null,
|
||||
"sum": null,
|
||||
"mean": null,
|
||||
"stddev": null,
|
||||
"min": null,
|
||||
"max": null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
`
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/_stats" {
|
||||
_, _ = w.Write([]byte(js))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
plugin := &couchdb.CouchDB{
|
||||
HOSTs: []string{fakeServer.URL + "/_stats"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
}
|
||||
205
plugins/inputs/disque/disque.go
Normal file
205
plugins/inputs/disque/disque.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package disque
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Disque struct {
|
||||
Servers []string
|
||||
|
||||
c net.Conn
|
||||
buf []byte
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of URI to gather stats about. Specify an ip or hostname
|
||||
## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||
## 10.0.0.1:10000, etc.
|
||||
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost"]
|
||||
`
|
||||
|
||||
func (r *Disque) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *Disque) Description() string {
|
||||
return "Read metrics from one or many disque servers"
|
||||
}
|
||||
|
||||
var Tracking = map[string]string{
|
||||
"uptime_in_seconds": "uptime",
|
||||
"connected_clients": "clients",
|
||||
"blocked_clients": "blocked_clients",
|
||||
"used_memory": "used_memory",
|
||||
"used_memory_rss": "used_memory_rss",
|
||||
"used_memory_peak": "used_memory_peak",
|
||||
"total_connections_received": "total_connections_received",
|
||||
"total_commands_processed": "total_commands_processed",
|
||||
"instantaneous_ops_per_sec": "instantaneous_ops_per_sec",
|
||||
"latest_fork_usec": "latest_fork_usec",
|
||||
"mem_fragmentation_ratio": "mem_fragmentation_ratio",
|
||||
"used_cpu_sys": "used_cpu_sys",
|
||||
"used_cpu_user": "used_cpu_user",
|
||||
"used_cpu_sys_children": "used_cpu_sys_children",
|
||||
"used_cpu_user_children": "used_cpu_user_children",
|
||||
"registered_jobs": "registered_jobs",
|
||||
"registered_queues": "registered_queues",
|
||||
}
|
||||
|
||||
var ErrProtocolError = errors.New("disque protocol error")
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *Disque) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
url := &url.URL{
|
||||
Host: ":7711",
|
||||
}
|
||||
g.gatherServer(url, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
u, err := url.Parse(serv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
|
||||
} else if u.Scheme == "" {
|
||||
// fallback to simple string based address (i.e. "10.0.0.1:10000")
|
||||
u.Scheme = "tcp"
|
||||
u.Host = serv
|
||||
u.Path = ""
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(u, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
const defaultPort = "7711"
|
||||
|
||||
func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
if g.c == nil {
|
||||
|
||||
_, _, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
addr.Host = addr.Host + ":" + defaultPort
|
||||
}
|
||||
|
||||
c, err := net.Dial("tcp", addr.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err)
|
||||
}
|
||||
|
||||
if addr.User != nil {
|
||||
pwd, set := addr.User.Password()
|
||||
if set && pwd != "" {
|
||||
c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd)))
|
||||
|
||||
r := bufio.NewReader(c)
|
||||
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if line[0] != '+' {
|
||||
return fmt.Errorf("%s", strings.TrimSpace(line)[1:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
g.c = c
|
||||
}
|
||||
|
||||
g.c.Write([]byte("info\r\n"))
|
||||
|
||||
r := bufio.NewReader(g.c)
|
||||
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if line[0] != '$' {
|
||||
return fmt.Errorf("bad line start: %s", ErrProtocolError)
|
||||
}
|
||||
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
szStr := line[1:]
|
||||
|
||||
sz, err := strconv.Atoi(szStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad size string <<%s>>: %s", szStr, ErrProtocolError)
|
||||
}
|
||||
|
||||
var read int
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"host": addr.String()}
|
||||
for read < sz {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
read += len(line)
|
||||
|
||||
if len(line) == 1 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
|
||||
name := string(parts[0])
|
||||
|
||||
metric, ok := Tracking[name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
val := strings.TrimSpace(parts[1])
|
||||
|
||||
ival, err := strconv.ParseUint(val, 10, 64)
|
||||
if err == nil {
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
fval, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields[metric] = fval
|
||||
}
|
||||
acc.AddFields("disque", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("disque", func() telegraf.Input {
|
||||
return &Disque{}
|
||||
})
|
||||
}
|
||||
217
plugins/inputs/disque/disque_test.go
Normal file
217
plugins/inputs/disque/disque_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package disque
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDisqueGeneratesMetrics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(c)
|
||||
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if line != "info\r\n" {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(c, "$%d\n", len(testOutput))
|
||||
c.Write([]byte(testOutput))
|
||||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer l.Close()
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(c)
|
||||
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if line != "info\r\n" {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(c, "$%d\n", len(testOutput))
|
||||
c.Write([]byte(testOutput))
|
||||
}
|
||||
}()
|
||||
|
||||
addr := fmt.Sprintf("disque://%s", l.Addr().String())
|
||||
|
||||
r := &Disque{
|
||||
Servers: []string{addr},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
const testOutput = `# Server
|
||||
disque_version:0.0.1
|
||||
disque_git_sha1:b5247598
|
||||
disque_git_dirty:0
|
||||
disque_build_id:379fda78983a60c6
|
||||
os:Linux 3.13.0-44-generic x86_64
|
||||
arch_bits:64
|
||||
multiplexing_api:epoll
|
||||
gcc_version:4.8.2
|
||||
process_id:32420
|
||||
run_id:1cfdfa4c6bc3f285182db5427522a8a4c16e42e4
|
||||
tcp_port:7711
|
||||
uptime_in_seconds:1452705
|
||||
uptime_in_days:16
|
||||
hz:10
|
||||
config_file:/usr/local/etc/disque/disque.conf
|
||||
|
||||
# Clients
|
||||
connected_clients:31
|
||||
client_longest_output_list:0
|
||||
client_biggest_input_buf:0
|
||||
blocked_clients:13
|
||||
|
||||
# Memory
|
||||
used_memory:1840104
|
||||
used_memory_human:1.75M
|
||||
used_memory_rss:3227648
|
||||
used_memory_peak:89603656
|
||||
used_memory_peak_human:85.45M
|
||||
mem_fragmentation_ratio:1.75
|
||||
mem_allocator:jemalloc-3.6.0
|
||||
|
||||
# Jobs
|
||||
registered_jobs:360
|
||||
|
||||
# Queues
|
||||
registered_queues:12
|
||||
|
||||
# Persistence
|
||||
loading:0
|
||||
aof_enabled:1
|
||||
aof_state:on
|
||||
aof_rewrite_in_progress:0
|
||||
aof_rewrite_scheduled:0
|
||||
aof_last_rewrite_time_sec:0
|
||||
aof_current_rewrite_time_sec:-1
|
||||
aof_last_bgrewrite_status:ok
|
||||
aof_last_write_status:ok
|
||||
aof_current_size:41952430
|
||||
aof_base_size:9808
|
||||
aof_pending_rewrite:0
|
||||
aof_buffer_length:0
|
||||
aof_rewrite_buffer_length:0
|
||||
aof_pending_bio_fsync:0
|
||||
aof_delayed_fsync:1
|
||||
|
||||
# Stats
|
||||
total_connections_received:5062777
|
||||
total_commands_processed:12308396
|
||||
instantaneous_ops_per_sec:18
|
||||
total_net_input_bytes:1346996528
|
||||
total_net_output_bytes:1967551763
|
||||
instantaneous_input_kbps:1.38
|
||||
instantaneous_output_kbps:1.78
|
||||
rejected_connections:0
|
||||
latest_fork_usec:1644
|
||||
|
||||
# CPU
|
||||
used_cpu_sys:19585.73
|
||||
used_cpu_user:11255.96
|
||||
used_cpu_sys_children:1.75
|
||||
used_cpu_user_children:1.91
|
||||
`
|
||||
51
plugins/inputs/dns_query/README.md
Normal file
51
plugins/inputs/dns_query/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# DNS Query Input Plugin
|
||||
|
||||
The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\))
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Sample Config:
|
||||
[[inputs.dns_query]]
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"] # required
|
||||
|
||||
## Domains or subdomains to query. "." (root) is default
|
||||
domains = ["."] # optional
|
||||
|
||||
## Query record type. Posible values: A, AAAA, ANY, CNAME, MX, NS, PTR, SOA, SPF, SRV, TXT. Default is "NS"
|
||||
record_type = "A" # optional
|
||||
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
```
|
||||
|
||||
For querying more than one record type make:
|
||||
|
||||
```
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "A"
|
||||
|
||||
[[inputs.dns_query]]
|
||||
domains = ["mjasion.pl"]
|
||||
servers = ["8.8.8.8", "8.8.4.4"]
|
||||
record_type = "MX"
|
||||
```
|
||||
|
||||
### Tags:
|
||||
|
||||
- server
|
||||
- domain
|
||||
- record_type
|
||||
|
||||
### Example output:
|
||||
|
||||
```
|
||||
./telegraf -config telegraf.conf -test -input-filter dns_query -test
|
||||
> dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680
|
||||
```
|
||||
159
plugins/inputs/dns_query/dns_query.go
Normal file
159
plugins/inputs/dns_query/dns_query.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package dns_query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/miekg/dns"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DnsQuery struct {
|
||||
// Domains or subdomains to query
|
||||
Domains []string
|
||||
|
||||
// Server to query
|
||||
Servers []string
|
||||
|
||||
// Record type
|
||||
RecordType string `toml:"record_type"`
|
||||
|
||||
// DNS server port number
|
||||
Port int
|
||||
|
||||
// Dns query timeout in seconds. 0 means no timeout
|
||||
Timeout int
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## servers to query
|
||||
servers = ["8.8.8.8"] # required
|
||||
|
||||
## Domains or subdomains to query. "."(root) is default
|
||||
domains = ["."] # optional
|
||||
|
||||
## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS"
|
||||
record_type = "A" # optional
|
||||
|
||||
## Dns server port. 53 is default
|
||||
port = 53 # optional
|
||||
|
||||
## Query timeout in seconds. Default is 2 seconds
|
||||
timeout = 2 # optional
|
||||
`
|
||||
|
||||
func (d *DnsQuery) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (d *DnsQuery) Description() string {
|
||||
return "Query given DNS server and gives statistics"
|
||||
}
|
||||
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
|
||||
d.setDefaultValues()
|
||||
for _, domain := range d.Domains {
|
||||
for _, server := range d.Servers {
|
||||
dnsQueryTime, err := d.getDnsQueryTime(domain, server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tags := map[string]string{
|
||||
"server": server,
|
||||
"domain": domain,
|
||||
"record_type": d.RecordType,
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{"query_time_ms": dnsQueryTime}
|
||||
acc.AddFields("dns_query", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) setDefaultValues() {
|
||||
if len(d.RecordType) == 0 {
|
||||
d.RecordType = "NS"
|
||||
}
|
||||
|
||||
if len(d.Domains) == 0 {
|
||||
d.Domains = []string{"."}
|
||||
d.RecordType = "NS"
|
||||
}
|
||||
|
||||
if d.Port == 0 {
|
||||
d.Port = 53
|
||||
}
|
||||
|
||||
if d.Timeout == 0 {
|
||||
d.Timeout = 2
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error) {
|
||||
dnsQueryTime := float64(0)
|
||||
|
||||
c := new(dns.Client)
|
||||
c.ReadTimeout = time.Duration(d.Timeout) * time.Second
|
||||
|
||||
m := new(dns.Msg)
|
||||
recordType, err := d.parseRecordType()
|
||||
if err != nil {
|
||||
return dnsQueryTime, err
|
||||
}
|
||||
m.SetQuestion(dns.Fqdn(domain), recordType)
|
||||
m.RecursionDesired = true
|
||||
|
||||
r, rtt, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port)))
|
||||
if err != nil {
|
||||
return dnsQueryTime, err
|
||||
}
|
||||
if r.Rcode != dns.RcodeSuccess {
|
||||
return dnsQueryTime, errors.New(fmt.Sprintf("Invalid answer name %s after %s query for %s\n", domain, d.RecordType, domain))
|
||||
}
|
||||
dnsQueryTime = float64(rtt.Nanoseconds()) / 1e6
|
||||
return dnsQueryTime, nil
|
||||
}
|
||||
|
||||
func (d *DnsQuery) parseRecordType() (uint16, error) {
|
||||
var recordType uint16
|
||||
var error error
|
||||
|
||||
switch d.RecordType {
|
||||
case "A":
|
||||
recordType = dns.TypeA
|
||||
case "AAAA":
|
||||
recordType = dns.TypeAAAA
|
||||
case "ANY":
|
||||
recordType = dns.TypeANY
|
||||
case "CNAME":
|
||||
recordType = dns.TypeCNAME
|
||||
case "MX":
|
||||
recordType = dns.TypeMX
|
||||
case "NS":
|
||||
recordType = dns.TypeNS
|
||||
case "PTR":
|
||||
recordType = dns.TypePTR
|
||||
case "SOA":
|
||||
recordType = dns.TypeSOA
|
||||
case "SPF":
|
||||
recordType = dns.TypeSPF
|
||||
case "SRV":
|
||||
recordType = dns.TypeSRV
|
||||
case "TXT":
|
||||
recordType = dns.TypeTXT
|
||||
default:
|
||||
error = errors.New(fmt.Sprintf("Record type %s not recognized", d.RecordType))
|
||||
}
|
||||
|
||||
return recordType, error
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dns_query", func() telegraf.Input {
|
||||
return &DnsQuery{}
|
||||
})
|
||||
}
|
||||
192
plugins/inputs/dns_query/dns_query_test.go
Normal file
192
plugins/inputs/dns_query/dns_query_test.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package dns_query
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var servers = []string{"8.8.8.8"}
|
||||
var domains = []string{"google.com"}
|
||||
|
||||
func TestGathering(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
}
|
||||
|
||||
func TestGatheringMxRecord(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.RecordType = "MX"
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
}
|
||||
|
||||
func TestGatheringRootDomain(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: []string{"."},
|
||||
RecordType: "MX",
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
tags := map[string]string{
|
||||
"server": "8.8.8.8",
|
||||
"domain": ".",
|
||||
"record_type": "MX",
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
acc.AssertContainsTaggedFields(t, "dns_query", fields, tags)
|
||||
}
|
||||
|
||||
func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
tags := map[string]string{
|
||||
"server": "8.8.8.8",
|
||||
"domain": "google.com",
|
||||
"record_type": "NS",
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
acc.AssertContainsTaggedFields(t, "dns_query", fields, tags)
|
||||
}
|
||||
|
||||
func TestGatheringTimeout(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{
|
||||
Servers: servers,
|
||||
Domains: domains,
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.Port = 60054
|
||||
dnsConfig.Timeout = 1
|
||||
var err error
|
||||
|
||||
channel := make(chan error, 1)
|
||||
go func() {
|
||||
channel <- dnsConfig.Gather(&acc)
|
||||
}()
|
||||
select {
|
||||
case res := <-channel:
|
||||
err = res
|
||||
case <-time.After(time.Second * 2):
|
||||
err = nil
|
||||
}
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "i/o timeout")
|
||||
}
|
||||
|
||||
func TestSettingDefaultValues(t *testing.T) {
|
||||
dnsConfig := DnsQuery{}
|
||||
|
||||
dnsConfig.setDefaultValues()
|
||||
|
||||
assert.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"")
|
||||
assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'")
|
||||
assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53")
|
||||
assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2")
|
||||
|
||||
dnsConfig = DnsQuery{Domains: []string{"."}}
|
||||
|
||||
dnsConfig.setDefaultValues()
|
||||
|
||||
assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'")
|
||||
}
|
||||
|
||||
func TestRecordTypeParser(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{}
|
||||
var recordType uint16
|
||||
|
||||
dnsConfig.RecordType = "A"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeA, recordType)
|
||||
|
||||
dnsConfig.RecordType = "AAAA"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeAAAA, recordType)
|
||||
|
||||
dnsConfig.RecordType = "ANY"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeANY, recordType)
|
||||
|
||||
dnsConfig.RecordType = "CNAME"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeCNAME, recordType)
|
||||
|
||||
dnsConfig.RecordType = "MX"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeMX, recordType)
|
||||
|
||||
dnsConfig.RecordType = "NS"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeNS, recordType)
|
||||
|
||||
dnsConfig.RecordType = "PTR"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypePTR, recordType)
|
||||
|
||||
dnsConfig.RecordType = "SOA"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeSOA, recordType)
|
||||
|
||||
dnsConfig.RecordType = "SPF"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeSPF, recordType)
|
||||
|
||||
dnsConfig.RecordType = "SRV"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeSRV, recordType)
|
||||
|
||||
dnsConfig.RecordType = "TXT"
|
||||
recordType, _ = dnsConfig.parseRecordType()
|
||||
assert.Equal(t, dns.TypeTXT, recordType)
|
||||
}
|
||||
|
||||
func TestRecordTypeParserError(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{}
|
||||
var err error
|
||||
|
||||
dnsConfig.RecordType = "nil"
|
||||
_, err = dnsConfig.parseRecordType()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
148
plugins/inputs/docker/README.md
Normal file
148
plugins/inputs/docker/README.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Docker Input Plugin
|
||||
|
||||
The docker plugin uses the docker remote API to gather metrics on running
|
||||
docker containers. You can read Docker's documentation for their remote API
|
||||
[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage)
|
||||
|
||||
The docker plugin uses the excellent
|
||||
[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to
|
||||
gather stats. Documentation for the library can be found
|
||||
[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation
|
||||
for the stat structure can be found
|
||||
[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats)
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read metrics about docker containers
|
||||
[[inputs.docker]]
|
||||
# Docker Endpoint
|
||||
# To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
# Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Every effort was made to preserve the names based on the JSON response from the
|
||||
docker API.
|
||||
|
||||
Note that the docker_cpu metric may appear multiple times per collection, based
|
||||
on the availability of per-cpu stats on your system.
|
||||
|
||||
- docker_mem
|
||||
- total_pgmafault
|
||||
- cache
|
||||
- mapped_file
|
||||
- total_inactive_file
|
||||
- pgpgout
|
||||
- rss
|
||||
- total_mapped_file
|
||||
- writeback
|
||||
- unevictable
|
||||
- pgpgin
|
||||
- total_unevictable
|
||||
- pgmajfault
|
||||
- total_rss
|
||||
- total_rss_huge
|
||||
- total_writeback
|
||||
- total_inactive_anon
|
||||
- rss_huge
|
||||
- hierarchical_memory_limit
|
||||
- total_pgfault
|
||||
- total_active_file
|
||||
- active_anon
|
||||
- total_active_anon
|
||||
- total_pgpgout
|
||||
- total_cache
|
||||
- inactive_anon
|
||||
- active_file
|
||||
- pgfault
|
||||
- inactive_file
|
||||
- total_pgpgin
|
||||
- max_usage
|
||||
- usage
|
||||
- failcnt
|
||||
- limit
|
||||
- docker_cpu
|
||||
- throttling_periods
|
||||
- throttling_throttled_periods
|
||||
- throttling_throttled_time
|
||||
- usage_in_kernelmode
|
||||
- usage_in_usermode
|
||||
- usage_system
|
||||
- usage_total
|
||||
- docker_net
|
||||
- rx_dropped
|
||||
- rx_bytes
|
||||
- rx_errors
|
||||
- tx_packets
|
||||
- tx_dropped
|
||||
- rx_packets
|
||||
- tx_errors
|
||||
- tx_bytes
|
||||
- docker_blkio
|
||||
- io_service_bytes_recursive_async
|
||||
- io_service_bytes_recursive_read
|
||||
- io_service_bytes_recursive_sync
|
||||
- io_service_bytes_recursive_total
|
||||
- io_service_bytes_recursive_write
|
||||
- io_serviced_recursive_async
|
||||
- io_serviced_recursive_read
|
||||
- io_serviced_recursive_sync
|
||||
- io_serviced_recursive_total
|
||||
- io_serviced_recursive_write
|
||||
|
||||
### Tags:
|
||||
|
||||
- All stats have the following tags:
|
||||
- cont_id (container ID)
|
||||
- cont_image (container image)
|
||||
- cont_name (container name)
|
||||
- docker_cpu specific:
|
||||
- cpu
|
||||
- docker_net specific:
|
||||
- network
|
||||
- docker_blkio specific:
|
||||
- device
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test
|
||||
* Plugin: docker, Collection 1
|
||||
> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka \
|
||||
active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\
|
||||
hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\
|
||||
inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\
|
||||
max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\
|
||||
pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\
|
||||
total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\
|
||||
total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\
|
||||
total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\
|
||||
total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\
|
||||
total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713
|
||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \
|
||||
throttling_periods=0i,throttling_throttled_periods=0i,\
|
||||
throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\
|
||||
usage_in_usermode=2290000000i,usage_system=84795360000000i,\
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \
|
||||
usage_total=6628208865i 1453409536840126713
|
||||
> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,network=eth0 \
|
||||
rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\
|
||||
tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713
|
||||
> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\
|
||||
cont_image=spotify/kafka,cont_name=kafka,device=8:0 \
|
||||
io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\
|
||||
io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\
|
||||
io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\
|
||||
io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\
|
||||
io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713
|
||||
```
|
||||
341
plugins/inputs/docker/docker.go
Normal file
341
plugins/inputs/docker/docker.go
Normal file
@@ -0,0 +1,341 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
type Docker struct {
|
||||
Endpoint string
|
||||
ContainerNames []string
|
||||
|
||||
client *docker.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Docker Endpoint
|
||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
## Only collect metrics for these containers, collect all if empty
|
||||
container_names = []
|
||||
`
|
||||
|
||||
func (d *Docker) Description() string {
|
||||
return "Read metrics about docker containers"
|
||||
}
|
||||
|
||||
func (d *Docker) SampleConfig() string { return sampleConfig }
|
||||
|
||||
func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
if d.client == nil {
|
||||
var c *docker.Client
|
||||
var err error
|
||||
if d.Endpoint == "ENV" {
|
||||
c, err = docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if d.Endpoint == "" {
|
||||
c, err = docker.NewClient("unix:///var/run/docker.sock")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c, err = docker.NewClient(d.Endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.client = c
|
||||
}
|
||||
|
||||
opts := docker.ListContainersOptions{}
|
||||
containers, err := d.client.ListContainers(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(containers))
|
||||
for _, container := range containers {
|
||||
|
||||
go func(c docker.APIContainers) {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}(container)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Docker) gatherContainer(
|
||||
container docker.APIContainers,
|
||||
acc telegraf.Accumulator,
|
||||
) error {
|
||||
// Parse container name
|
||||
cname := "unknown"
|
||||
if len(container.Names) > 0 {
|
||||
// Not sure what to do with other names, just take the first.
|
||||
cname = strings.TrimPrefix(container.Names[0], "/")
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cont_id": container.ID,
|
||||
"cont_name": cname,
|
||||
"cont_image": container.Image,
|
||||
}
|
||||
if len(d.ContainerNames) > 0 {
|
||||
if !sliceContains(cname, d.ContainerNames) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
statChan := make(chan *docker.Stats)
|
||||
done := make(chan bool)
|
||||
statOpts := docker.StatsOptions{
|
||||
Stream: false,
|
||||
ID: container.ID,
|
||||
Stats: statChan,
|
||||
Done: done,
|
||||
Timeout: time.Duration(time.Second * 5),
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := d.client.Stats(statOpts)
|
||||
if err != nil {
|
||||
log.Printf("Error getting docker stats: %s\n", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
stat := <-statChan
|
||||
close(done)
|
||||
|
||||
if stat == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add labels to tags
|
||||
for k, v := range container.Labels {
|
||||
tags[k] = v
|
||||
}
|
||||
|
||||
gatherContainerStats(stat, acc, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherContainerStats(
|
||||
stat *docker.Stats,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
) {
|
||||
now := stat.Read
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": stat.MemoryStats.MaxUsage,
|
||||
"usage": stat.MemoryStats.Usage,
|
||||
"fail_count": stat.MemoryStats.Failcnt,
|
||||
"limit": stat.MemoryStats.Limit,
|
||||
"total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault,
|
||||
"cache": stat.MemoryStats.Stats.Cache,
|
||||
"mapped_file": stat.MemoryStats.Stats.MappedFile,
|
||||
"total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile,
|
||||
"pgpgout": stat.MemoryStats.Stats.Pgpgout,
|
||||
"rss": stat.MemoryStats.Stats.Rss,
|
||||
"total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile,
|
||||
"writeback": stat.MemoryStats.Stats.Writeback,
|
||||
"unevictable": stat.MemoryStats.Stats.Unevictable,
|
||||
"pgpgin": stat.MemoryStats.Stats.Pgpgin,
|
||||
"total_unevictable": stat.MemoryStats.Stats.TotalUnevictable,
|
||||
"pgmajfault": stat.MemoryStats.Stats.Pgmajfault,
|
||||
"total_rss": stat.MemoryStats.Stats.TotalRss,
|
||||
"total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge,
|
||||
"total_writeback": stat.MemoryStats.Stats.TotalWriteback,
|
||||
"total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon,
|
||||
"rss_huge": stat.MemoryStats.Stats.RssHuge,
|
||||
"hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit,
|
||||
"total_pgfault": stat.MemoryStats.Stats.TotalPgfault,
|
||||
"total_active_file": stat.MemoryStats.Stats.TotalActiveFile,
|
||||
"active_anon": stat.MemoryStats.Stats.ActiveAnon,
|
||||
"total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon,
|
||||
"total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout,
|
||||
"total_cache": stat.MemoryStats.Stats.TotalCache,
|
||||
"inactive_anon": stat.MemoryStats.Stats.InactiveAnon,
|
||||
"active_file": stat.MemoryStats.Stats.ActiveFile,
|
||||
"pgfault": stat.MemoryStats.Stats.Pgfault,
|
||||
"inactive_file": stat.MemoryStats.Stats.InactiveFile,
|
||||
"total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin,
|
||||
"usage_percent": calculateMemPercent(stat),
|
||||
}
|
||||
acc.AddFields("docker_mem", memfields, tags, now)
|
||||
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": stat.CPUStats.CPUUsage.TotalUsage,
|
||||
"usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode,
|
||||
"usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode,
|
||||
"usage_system": stat.CPUStats.SystemCPUUsage,
|
||||
"throttling_periods": stat.CPUStats.ThrottlingData.Periods,
|
||||
"throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods,
|
||||
"throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime,
|
||||
"usage_percent": calculateCPUPercent(stat),
|
||||
}
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
acc.AddFields("docker_cpu", cpufields, cputags, now)
|
||||
|
||||
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
|
||||
percputags := copyTags(tags)
|
||||
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
|
||||
acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
|
||||
}
|
||||
|
||||
for network, netstats := range stat.Networks {
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": netstats.RxDropped,
|
||||
"rx_bytes": netstats.RxBytes,
|
||||
"rx_errors": netstats.RxErrors,
|
||||
"tx_packets": netstats.TxPackets,
|
||||
"tx_dropped": netstats.TxDropped,
|
||||
"rx_packets": netstats.RxPackets,
|
||||
"tx_errors": netstats.TxErrors,
|
||||
"tx_bytes": netstats.TxBytes,
|
||||
}
|
||||
// Create a new network tag dictionary for the "network" tag
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = network
|
||||
acc.AddFields("docker_net", netfields, nettags, now)
|
||||
}
|
||||
|
||||
gatherBlockIOMetrics(stat, acc, tags, now)
|
||||
}
|
||||
|
||||
func calculateMemPercent(stat *docker.Stats) float64 {
|
||||
var memPercent = 0.0
|
||||
if stat.MemoryStats.Limit > 0 {
|
||||
memPercent = float64(stat.MemoryStats.Usage) / float64(stat.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
return memPercent
|
||||
}
|
||||
|
||||
func calculateCPUPercent(stat *docker.Stats) float64 {
|
||||
var cpuPercent = 0.0
|
||||
// calculate the change for the cpu and system usage of the container in between readings
|
||||
cpuDelta := float64(stat.CPUStats.CPUUsage.TotalUsage) - float64(stat.PreCPUStats.CPUUsage.TotalUsage)
|
||||
systemDelta := float64(stat.CPUStats.SystemCPUUsage) - float64(stat.PreCPUStats.SystemCPUUsage)
|
||||
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stat.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func gatherBlockIOMetrics(
|
||||
stat *docker.Stats,
|
||||
acc telegraf.Accumulator,
|
||||
tags map[string]string,
|
||||
now time.Time,
|
||||
) {
|
||||
blkioStats := stat.BlkioStats
|
||||
// Make a map of devices to their block io stats
|
||||
deviceStatMap := make(map[string]map[string]interface{})
|
||||
|
||||
for _, metric := range blkioStats.IOServiceBytesRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOServicedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
_, ok := deviceStatMap[device]
|
||||
if !ok {
|
||||
deviceStatMap[device] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOQueueRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOServiceTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOWaitTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOMergedRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.IOTimeRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for _, metric := range blkioStats.SectorsRecursive {
|
||||
device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor)
|
||||
field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op))
|
||||
deviceStatMap[device][field] = metric.Value
|
||||
}
|
||||
|
||||
for device, fields := range deviceStatMap {
|
||||
iotags := copyTags(tags)
|
||||
iotags["device"] = device
|
||||
acc.AddFields("docker_blkio", fields, iotags, now)
|
||||
}
|
||||
}
|
||||
|
||||
func copyTags(in map[string]string) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func sliceContains(in string, sl []string) bool {
|
||||
for _, str := range sl {
|
||||
if str == in {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("docker", func() telegraf.Input {
|
||||
return &Docker{}
|
||||
})
|
||||
}
|
||||
196
plugins/inputs/docker/docker_test.go
Normal file
196
plugins/inputs/docker/docker_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := testStats()
|
||||
|
||||
tags := map[string]string{
|
||||
"cont_id": "foobarbaz",
|
||||
"cont_name": "redis",
|
||||
"cont_image": "redis/image",
|
||||
}
|
||||
gatherContainerStats(stats, &acc, tags)
|
||||
|
||||
// test docker_net measurement
|
||||
netfields := map[string]interface{}{
|
||||
"rx_dropped": uint64(1),
|
||||
"rx_bytes": uint64(2),
|
||||
"rx_errors": uint64(3),
|
||||
"tx_packets": uint64(4),
|
||||
"tx_dropped": uint64(1),
|
||||
"rx_packets": uint64(2),
|
||||
"tx_errors": uint64(3),
|
||||
"tx_bytes": uint64(4),
|
||||
}
|
||||
nettags := copyTags(tags)
|
||||
nettags["network"] = "eth0"
|
||||
acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags)
|
||||
|
||||
// test docker_blkio measurement
|
||||
blkiotags := copyTags(tags)
|
||||
blkiotags["device"] = "6:0"
|
||||
blkiofields := map[string]interface{}{
|
||||
"io_service_bytes_recursive_read": uint64(100),
|
||||
"io_serviced_recursive_write": uint64(101),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags)
|
||||
|
||||
// test docker_mem measurement
|
||||
memfields := map[string]interface{}{
|
||||
"max_usage": uint64(1001),
|
||||
"usage": uint64(1111),
|
||||
"fail_count": uint64(1),
|
||||
"limit": uint64(2000),
|
||||
"total_pgmafault": uint64(0),
|
||||
"cache": uint64(0),
|
||||
"mapped_file": uint64(0),
|
||||
"total_inactive_file": uint64(0),
|
||||
"pgpgout": uint64(0),
|
||||
"rss": uint64(0),
|
||||
"total_mapped_file": uint64(0),
|
||||
"writeback": uint64(0),
|
||||
"unevictable": uint64(0),
|
||||
"pgpgin": uint64(0),
|
||||
"total_unevictable": uint64(0),
|
||||
"pgmajfault": uint64(0),
|
||||
"total_rss": uint64(44),
|
||||
"total_rss_huge": uint64(444),
|
||||
"total_writeback": uint64(55),
|
||||
"total_inactive_anon": uint64(0),
|
||||
"rss_huge": uint64(0),
|
||||
"hierarchical_memory_limit": uint64(0),
|
||||
"total_pgfault": uint64(0),
|
||||
"total_active_file": uint64(0),
|
||||
"active_anon": uint64(0),
|
||||
"total_active_anon": uint64(0),
|
||||
"total_pgpgout": uint64(0),
|
||||
"total_cache": uint64(0),
|
||||
"inactive_anon": uint64(0),
|
||||
"active_file": uint64(1),
|
||||
"pgfault": uint64(2),
|
||||
"inactive_file": uint64(3),
|
||||
"total_pgpgin": uint64(4),
|
||||
"usage_percent": float64(55.55),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags)
|
||||
|
||||
// test docker_cpu measurement
|
||||
cputags := copyTags(tags)
|
||||
cputags["cpu"] = "cpu-total"
|
||||
cpufields := map[string]interface{}{
|
||||
"usage_total": uint64(500),
|
||||
"usage_in_usermode": uint64(100),
|
||||
"usage_in_kernelmode": uint64(200),
|
||||
"usage_system": uint64(100),
|
||||
"throttling_periods": uint64(1),
|
||||
"throttling_throttled_periods": uint64(0),
|
||||
"throttling_throttled_time": uint64(0),
|
||||
"usage_percent": float64(400.0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu0"
|
||||
cpu0fields := map[string]interface{}{
|
||||
"usage_total": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags)
|
||||
|
||||
cputags["cpu"] = "cpu1"
|
||||
cpu1fields := map[string]interface{}{
|
||||
"usage_total": uint64(1002),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags)
|
||||
}
|
||||
|
||||
func testStats() *docker.Stats {
|
||||
stats := &docker.Stats{
|
||||
Read: time.Now(),
|
||||
Networks: make(map[string]docker.NetworkStats),
|
||||
}
|
||||
|
||||
stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002}
|
||||
stats.CPUStats.CPUUsage.UsageInUsermode = 100
|
||||
stats.CPUStats.CPUUsage.TotalUsage = 500
|
||||
stats.CPUStats.CPUUsage.UsageInKernelmode = 200
|
||||
stats.CPUStats.SystemCPUUsage = 100
|
||||
stats.CPUStats.ThrottlingData.Periods = 1
|
||||
|
||||
stats.PreCPUStats.CPUUsage.TotalUsage = 400
|
||||
stats.PreCPUStats.SystemCPUUsage = 50
|
||||
|
||||
stats.MemoryStats.Stats.TotalPgmafault = 0
|
||||
stats.MemoryStats.Stats.Cache = 0
|
||||
stats.MemoryStats.Stats.MappedFile = 0
|
||||
stats.MemoryStats.Stats.TotalInactiveFile = 0
|
||||
stats.MemoryStats.Stats.Pgpgout = 0
|
||||
stats.MemoryStats.Stats.Rss = 0
|
||||
stats.MemoryStats.Stats.TotalMappedFile = 0
|
||||
stats.MemoryStats.Stats.Writeback = 0
|
||||
stats.MemoryStats.Stats.Unevictable = 0
|
||||
stats.MemoryStats.Stats.Pgpgin = 0
|
||||
stats.MemoryStats.Stats.TotalUnevictable = 0
|
||||
stats.MemoryStats.Stats.Pgmajfault = 0
|
||||
stats.MemoryStats.Stats.TotalRss = 44
|
||||
stats.MemoryStats.Stats.TotalRssHuge = 444
|
||||
stats.MemoryStats.Stats.TotalWriteback = 55
|
||||
stats.MemoryStats.Stats.TotalInactiveAnon = 0
|
||||
stats.MemoryStats.Stats.RssHuge = 0
|
||||
stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0
|
||||
stats.MemoryStats.Stats.TotalPgfault = 0
|
||||
stats.MemoryStats.Stats.TotalActiveFile = 0
|
||||
stats.MemoryStats.Stats.ActiveAnon = 0
|
||||
stats.MemoryStats.Stats.TotalActiveAnon = 0
|
||||
stats.MemoryStats.Stats.TotalPgpgout = 0
|
||||
stats.MemoryStats.Stats.TotalCache = 0
|
||||
stats.MemoryStats.Stats.InactiveAnon = 0
|
||||
stats.MemoryStats.Stats.ActiveFile = 1
|
||||
stats.MemoryStats.Stats.Pgfault = 2
|
||||
stats.MemoryStats.Stats.InactiveFile = 3
|
||||
stats.MemoryStats.Stats.TotalPgpgin = 4
|
||||
|
||||
stats.MemoryStats.MaxUsage = 1001
|
||||
stats.MemoryStats.Usage = 1111
|
||||
stats.MemoryStats.Failcnt = 1
|
||||
stats.MemoryStats.Limit = 2000
|
||||
|
||||
stats.Networks["eth0"] = docker.NetworkStats{
|
||||
RxDropped: 1,
|
||||
RxBytes: 2,
|
||||
RxErrors: 3,
|
||||
TxPackets: 4,
|
||||
TxDropped: 1,
|
||||
RxPackets: 2,
|
||||
TxErrors: 3,
|
||||
TxBytes: 4,
|
||||
}
|
||||
|
||||
sbr := docker.BlkioStatsEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "read",
|
||||
Value: 100,
|
||||
}
|
||||
sr := docker.BlkioStatsEntry{
|
||||
Major: 6,
|
||||
Minor: 0,
|
||||
Op: "write",
|
||||
Value: 101,
|
||||
}
|
||||
|
||||
stats.BlkioStats.IOServiceBytesRecursive = append(
|
||||
stats.BlkioStats.IOServiceBytesRecursive, sbr)
|
||||
stats.BlkioStats.IOServicedRecursive = append(
|
||||
stats.BlkioStats.IOServicedRecursive, sr)
|
||||
|
||||
return stats
|
||||
}
|
||||
67
plugins/inputs/dovecot/README.md
Normal file
67
plugins/inputs/dovecot/README.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Dovecot Input Plugin
|
||||
|
||||
The dovecot plugin uses the dovecot Stats protocol to gather metrics on configured
|
||||
domains. You can read Dovecot's documentation
|
||||
[here](http://wiki2.dovecot.org/Statistics)
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
# Read metrics about dovecot servers
|
||||
[[inputs.dovecot]]
|
||||
# Dovecot servers
|
||||
# specify dovecot servers via an address:port list
|
||||
# e.g.
|
||||
# localhost:24242
|
||||
#
|
||||
# If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost:24242"]
|
||||
# Only collect metrics for these domains, collect all if empty
|
||||
domains = []
|
||||
```
|
||||
|
||||
|
||||
### Tags:
|
||||
server: hostname
|
||||
domain: domain name
|
||||
|
||||
|
||||
### Fields:
|
||||
|
||||
reset_timestamp time.Time
|
||||
last_update time.Time
|
||||
num_logins int64
|
||||
num_cmds int64
|
||||
num_connected_sessions int64
|
||||
user_cpu float32
|
||||
sys_cpu float32
|
||||
clock_time float64
|
||||
min_faults int64
|
||||
maj_faults int64
|
||||
vol_cs int64
|
||||
invol_cs int64
|
||||
disk_input int64
|
||||
disk_output int64
|
||||
read_count int64
|
||||
read_bytes int64
|
||||
write_count int64
|
||||
write_bytes int64
|
||||
mail_lookup_path int64
|
||||
mail_lookup_attr int64
|
||||
mail_read_count int64
|
||||
mail_read_bytes int64
|
||||
mail_cache_hits int64
|
||||
|
||||
|
||||
### Example Output:
|
||||
|
||||
```
|
||||
telegraf -config telegraf.cfg -input-filter dovecot -test
|
||||
* Plugin: dovecot, Collection 1
|
||||
> dovecot,domain=xxxxx.it,server=dovecot--1.mail.sys clock_time=12105746411632.5,disk_input=115285225472i,disk_output=4885067755520i,invol_cs=169701886i,last_update="2016-02-09 08:49:47.000014113 +0100 CET",mail_cache_hits=441828i,mail_lookup_attr=0i,mail_lookup_path=25323i,mail_read_bytes=241188145i,mail_read_count=11719i,maj_faults=3168i,min_faults=321438988i,num_cmds=51635i,num_connected_sessions=2i,num_logins=17149i,read_bytes=7939026951110i,read_count=3716991752i,reset_timestamp="2016-01-28 09:34:36 +0100 CET",sys_cpu=222595.288,user_cpu=267468.08,vol_cs=3288715920i,write_bytes=4483648967059i,write_count=1640646952i 1455004219924838345
|
||||
> dovecot,domain=yyyyy.com,server=dovecot-1.mail.sys clock_time=6650794455331782,disk_input=61957695569920i,disk_output=2638244004487168i,invol_cs=2004805041i,last_update="2016-02-09 08:49:49.000251296 +0100 CET",mail_cache_hits=2499112513i,mail_lookup_attr=506730i,mail_lookup_path=39128227i,mail_read_bytes=1076496874501i,mail_read_count=32615262i,maj_faults=1643304i,min_faults=4216116325i,num_cmds=85785559i,num_connected_sessions=1177i,num_logins=11658255i,read_bytes=4289150974554145i,read_count=1112000703i,reset_timestamp="2016-01-28 09:31:26 +0100 CET",sys_cpu=121125923.032,user_cpu=145561336.428,vol_cs=205451885i,write_bytes=2420130526835796i,write_count=2991367252i 1455004219925152529
|
||||
> dovecot,domain=xxxxx.it,server=dovecot-2.mail.sys clock_time=10710826586999.143,disk_input=79792410624i,disk_output=4496066158592i,invol_cs=150426876i,last_update="2016-02-09 08:48:19.000209134 +0100 CET",mail_cache_hits=5480869i,mail_lookup_attr=0i,mail_lookup_path=122563i,mail_read_bytes=340746273i,mail_read_count=44275i,maj_faults=1722i,min_faults=288071875i,num_cmds=50098i,num_connected_sessions=0i,num_logins=16389i,read_bytes=7259551999517i,read_count=3396625369i,reset_timestamp="2016-01-28 09:31:29 +0100 CET",sys_cpu=200762.792,user_cpu=242477.664,vol_cs=2996657358i,write_bytes=4133381575263i,write_count=1497242759i 1455004219924888283
|
||||
> dovecot,domain=yyyyy.com,server=dovecot-2.mail.sys clock_time=6522131245483702,disk_input=48259150004224i,disk_output=2754333359087616i,invol_cs=2294595260i,last_update="2016-02-09 08:49:49.000251919 +0100 CET",mail_cache_hits=2139113611i,mail_lookup_attr=520276i,mail_lookup_path=37940318i,mail_read_bytes=1088002215022i,mail_read_count=31350271i,maj_faults=994420i,min_faults=1486260543i,num_cmds=40414997i,num_connected_sessions=978i,num_logins=11259672i,read_bytes=4445546612487315i,read_count=1763534543i,reset_timestamp="2016-01-28 09:31:24 +0100 CET",sys_cpu=123655962.668,user_cpu=149259327.032,vol_cs=4215130546i,write_bytes=2531186030222761i,write_count=2186579650i 1455004219925398372
|
||||
```
|
||||
|
||||
166
plugins/inputs/dovecot/dovecot.go
Normal file
166
plugins/inputs/dovecot/dovecot.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package dovecot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Dovecot struct {
|
||||
Servers []string
|
||||
Domains []string
|
||||
}
|
||||
|
||||
func (d *Dovecot) Description() string {
|
||||
return "Read statistics from one or many dovecot servers"
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## specify dovecot servers via an address:port list
|
||||
## e.g.
|
||||
## localhost:24242
|
||||
##
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["localhost:24242"]
|
||||
## Only collect metrics for these domains, collect all if empty
|
||||
domains = []
|
||||
`
|
||||
|
||||
func (d *Dovecot) SampleConfig() string { return sampleConfig }
|
||||
|
||||
const defaultPort = "24242"
|
||||
|
||||
// Reads stats from all configured servers.
|
||||
func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if len(d.Servers) == 0 {
|
||||
d.Servers = append(d.Servers, "127.0.0.1:24242")
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
var domains = make(map[string]bool)
|
||||
|
||||
for _, dom := range d.Domains {
|
||||
domains[dom] = true
|
||||
}
|
||||
|
||||
for _, serv := range d.Servers {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = d.gatherServer(serv, acc, domains)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, doms map[string]bool) error {
|
||||
_, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error: %s on url %s\n", err, addr)
|
||||
}
|
||||
|
||||
c, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
c.Write([]byte("EXPORT\tdomain\n\n"))
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, c)
|
||||
// buf := bufio.NewReader(c)
|
||||
|
||||
host, _, _ := net.SplitHostPort(addr)
|
||||
|
||||
return gatherStats(&buf, acc, doms, host)
|
||||
}
|
||||
|
||||
func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, doms map[string]bool, host string) error {
|
||||
|
||||
lines := strings.Split(buf.String(), "\n")
|
||||
head := strings.Split(lines[0], "\t")
|
||||
vals := lines[1:]
|
||||
|
||||
for i := range vals {
|
||||
if vals[i] == "" {
|
||||
continue
|
||||
}
|
||||
val := strings.Split(vals[i], "\t")
|
||||
fields := make(map[string]interface{})
|
||||
if len(doms) > 0 && !doms[val[0]] {
|
||||
continue
|
||||
}
|
||||
tags := map[string]string{"server": host, "domain": val[0]}
|
||||
for n := range val {
|
||||
switch head[n] {
|
||||
case "domain":
|
||||
continue
|
||||
// fields[head[n]] = val[n]
|
||||
case "user_cpu", "sys_cpu", "clock_time":
|
||||
fields[head[n]] = secParser(val[n])
|
||||
case "reset_timestamp", "last_update":
|
||||
fields[head[n]] = timeParser(val[n])
|
||||
default:
|
||||
ival, _ := splitSec(val[n])
|
||||
fields[head[n]] = ival
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("dovecot", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func splitSec(tm string) (sec int64, msec int64) {
|
||||
var err error
|
||||
ss := strings.Split(tm, ".")
|
||||
|
||||
sec, err = strconv.ParseInt(ss[0], 10, 64)
|
||||
if err != nil {
|
||||
sec = 0
|
||||
}
|
||||
if len(ss) > 1 {
|
||||
msec, err = strconv.ParseInt(ss[1], 10, 64)
|
||||
if err != nil {
|
||||
msec = 0
|
||||
}
|
||||
} else {
|
||||
msec = 0
|
||||
}
|
||||
|
||||
return sec, msec
|
||||
}
|
||||
|
||||
func timeParser(tm string) time.Time {
|
||||
|
||||
sec, msec := splitSec(tm)
|
||||
return time.Unix(sec, msec)
|
||||
}
|
||||
|
||||
func secParser(tm string) float64 {
|
||||
|
||||
sec, msec := splitSec(tm)
|
||||
return float64(sec) + (float64(msec) / 1000000.0)
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("dovecot", func() telegraf.Input {
|
||||
return &Dovecot{}
|
||||
})
|
||||
}
|
||||
61
plugins/inputs/dovecot/dovecot_test.go
Normal file
61
plugins/inputs/dovecot/dovecot_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package dovecot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDovecot(t *testing.T) {
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
tags := map[string]string{"server": "dovecot.test", "domain": "domain.test"}
|
||||
buf := bytes.NewBufferString(sampleStats)
|
||||
|
||||
var doms = map[string]bool{
|
||||
"domain.test": true,
|
||||
}
|
||||
|
||||
err := gatherStats(buf, &acc, doms, "dovecot.test")
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"reset_timestamp": time.Unix(1453969886, 0),
|
||||
"last_update": time.Unix(1454603963, 39864),
|
||||
"num_logins": int64(7503897),
|
||||
"num_cmds": int64(52595715),
|
||||
"num_connected_sessions": int64(1204),
|
||||
"user_cpu": 1.00831175372e+08,
|
||||
"sys_cpu": 8.3849071112e+07,
|
||||
"clock_time": 4.3260019315281835e+15,
|
||||
"min_faults": int64(763950011),
|
||||
"maj_faults": int64(1112443),
|
||||
"vol_cs": int64(4120386897),
|
||||
"invol_cs": int64(3685239306),
|
||||
"disk_input": int64(41679480946688),
|
||||
"disk_output": int64(1819070669176832),
|
||||
"read_count": int64(2368906465),
|
||||
"read_bytes": int64(2957928122981169),
|
||||
"write_count": int64(3545389615),
|
||||
"write_bytes": int64(1666822498251286),
|
||||
"mail_lookup_path": int64(24396105),
|
||||
"mail_lookup_attr": int64(302845),
|
||||
"mail_read_count": int64(20155768),
|
||||
"mail_read_bytes": int64(669946617705),
|
||||
"mail_cache_hits": int64(1557255080),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "dovecot", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
const sampleStats = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits
|
||||
domain.bad 1453970076 1454603947.383029 10749 33828 0 177988.524000 148071.772000 7531838964717.193706 212491179 2125 2190386067 112779200 74487934976 3221808119808 2469948401 5237602841760 1091171292 2951966459802 15363 0 2922 136403379 334372
|
||||
domain.test 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080`
|
||||
320
plugins/inputs/elasticsearch/README.md
Normal file
320
plugins/inputs/elasticsearch/README.md
Normal file
@@ -0,0 +1,320 @@
|
||||
# Elasticsearch plugin
|
||||
|
||||
#### Plugin arguments:
|
||||
- **servers** []string: list of one or more Elasticsearch servers
|
||||
- **local** boolean: If false, it will read the indices stats from all nodes
|
||||
- **cluster_health** boolean: If true, it will also obtain cluster level stats
|
||||
|
||||
#### Description
|
||||
|
||||
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
||||
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
||||
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
[elasticsearch]
|
||||
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
local = true
|
||||
|
||||
cluster_health = true
|
||||
```
|
||||
|
||||
# Measurements
|
||||
#### cluster measurements (utilizes fields instead of single values):
|
||||
|
||||
contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_cluster_health
|
||||
|
||||
contains `status`, `number_of_shards`, `number_of_replicas`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_indices
|
||||
|
||||
#### node measurements:
|
||||
|
||||
field data circuit breaker measurement names:
|
||||
- elasticsearch_breakers_fielddata_estimated_size_in_bytes value=0
|
||||
- elasticsearch_breakers_fielddata_overhead value=1.03
|
||||
- elasticsearch_breakers_fielddata_tripped value=0
|
||||
- elasticsearch_breakers_fielddata_limit_size_in_bytes value=623326003
|
||||
- elasticsearch_breakers_request_estimated_size_in_bytes value=0
|
||||
- elasticsearch_breakers_request_overhead value=1.0
|
||||
- elasticsearch_breakers_request_tripped value=0
|
||||
- elasticsearch_breakers_request_limit_size_in_bytes value=415550668
|
||||
- elasticsearch_breakers_parent_overhead value=1.0
|
||||
- elasticsearch_breakers_parent_tripped value=0
|
||||
- elasticsearch_breakers_parent_limit_size_in_bytes value=727213670
|
||||
- elasticsearch_breakers_parent_estimated_size_in_bytes value=0
|
||||
|
||||
File system information, data path, free disk space, read/write measurement names:
|
||||
- elasticsearch_fs_timestamp value=1436460392946
|
||||
- elasticsearch_fs_total_free_in_bytes value=16909316096
|
||||
- elasticsearch_fs_total_available_in_bytes value=15894814720
|
||||
- elasticsearch_fs_total_total_in_bytes value=19507089408
|
||||
|
||||
indices size, document count, indexing and deletion times, search times,
|
||||
field cache size, merges and flushes measurement names:
|
||||
- elasticsearch_indices_id_cache_memory_size_in_bytes value=0
|
||||
- elasticsearch_indices_completion_size_in_bytes value=0
|
||||
- elasticsearch_indices_suggest_total value=0
|
||||
- elasticsearch_indices_suggest_time_in_millis value=0
|
||||
- elasticsearch_indices_suggest_current value=0
|
||||
- elasticsearch_indices_query_cache_memory_size_in_bytes value=0
|
||||
- elasticsearch_indices_query_cache_evictions value=0
|
||||
- elasticsearch_indices_query_cache_hit_count value=0
|
||||
- elasticsearch_indices_query_cache_miss_count value=0
|
||||
- elasticsearch_indices_store_size_in_bytes value=37715234
|
||||
- elasticsearch_indices_store_throttle_time_in_millis value=215
|
||||
- elasticsearch_indices_merges_current_docs value=0
|
||||
- elasticsearch_indices_merges_current_size_in_bytes value=0
|
||||
- elasticsearch_indices_merges_total value=133
|
||||
- elasticsearch_indices_merges_total_time_in_millis value=21060
|
||||
- elasticsearch_indices_merges_total_docs value=203672
|
||||
- elasticsearch_indices_merges_total_size_in_bytes value=142900226
|
||||
- elasticsearch_indices_merges_current value=0
|
||||
- elasticsearch_indices_filter_cache_memory_size_in_bytes value=7384
|
||||
- elasticsearch_indices_filter_cache_evictions value=0
|
||||
- elasticsearch_indices_indexing_index_total value=84790
|
||||
- elasticsearch_indices_indexing_index_time_in_millis value=29680
|
||||
- elasticsearch_indices_indexing_index_current value=0
|
||||
- elasticsearch_indices_indexing_noop_update_total value=0
|
||||
- elasticsearch_indices_indexing_throttle_time_in_millis value=0
|
||||
- elasticsearch_indices_indexing_delete_tota value=13879
|
||||
- elasticsearch_indices_indexing_delete_time_in_millis value=1139
|
||||
- elasticsearch_indices_indexing_delete_current value=0
|
||||
- elasticsearch_indices_get_exists_time_in_millis value=0
|
||||
- elasticsearch_indices_get_missing_total value=1
|
||||
- elasticsearch_indices_get_missing_time_in_millis value=2
|
||||
- elasticsearch_indices_get_current value=0
|
||||
- elasticsearch_indices_get_total value=1
|
||||
- elasticsearch_indices_get_time_in_millis value=2
|
||||
- elasticsearch_indices_get_exists_total value=0
|
||||
- elasticsearch_indices_refresh_total value=1076
|
||||
- elasticsearch_indices_refresh_total_time_in_millis value=20078
|
||||
- elasticsearch_indices_percolate_current value=0
|
||||
- elasticsearch_indices_percolate_memory_size_in_bytes value=-1
|
||||
- elasticsearch_indices_percolate_queries value=0
|
||||
- elasticsearch_indices_percolate_total value=0
|
||||
- elasticsearch_indices_percolate_time_in_millis value=0
|
||||
- elasticsearch_indices_translog_operations value=17702
|
||||
- elasticsearch_indices_translog_size_in_bytes value=17
|
||||
- elasticsearch_indices_recovery_current_as_source value=0
|
||||
- elasticsearch_indices_recovery_current_as_target value=0
|
||||
- elasticsearch_indices_recovery_throttle_time_in_millis value=0
|
||||
- elasticsearch_indices_docs_count value=29652
|
||||
- elasticsearch_indices_docs_deleted value=5229
|
||||
- elasticsearch_indices_flush_total_time_in_millis value=2401
|
||||
- elasticsearch_indices_flush_total value=115
|
||||
- elasticsearch_indices_fielddata_memory_size_in_bytes value=12996
|
||||
- elasticsearch_indices_fielddata_evictions value=0
|
||||
- elasticsearch_indices_search_fetch_current value=0
|
||||
- elasticsearch_indices_search_open_contexts value=0
|
||||
- elasticsearch_indices_search_query_total value=1452
|
||||
- elasticsearch_indices_search_query_time_in_millis value=5695
|
||||
- elasticsearch_indices_search_query_current value=0
|
||||
- elasticsearch_indices_search_fetch_total value=414
|
||||
- elasticsearch_indices_search_fetch_time_in_millis value=146
|
||||
- elasticsearch_indices_warmer_current value=0
|
||||
- elasticsearch_indices_warmer_total value=2319
|
||||
- elasticsearch_indices_warmer_total_time_in_millis value=448
|
||||
- elasticsearch_indices_segments_count value=134
|
||||
- elasticsearch_indices_segments_memory_in_bytes value=1285212
|
||||
- elasticsearch_indices_segments_index_writer_memory_in_bytes value=0
|
||||
- elasticsearch_indices_segments_index_writer_max_memory_in_bytes value=172368955
|
||||
- elasticsearch_indices_segments_version_map_memory_in_bytes value=611844
|
||||
- elasticsearch_indices_segments_fixed_bit_set_memory_in_bytes value=0
|
||||
|
||||
HTTP connection measurement names:
|
||||
- elasticsearch_http_current_open value=3
|
||||
- elasticsearch_http_total_opened value=3
|
||||
|
||||
JVM stats, memory pool information, garbage collection, buffer pools measurement names:
|
||||
- elasticsearch_jvm_timestamp value=1436460392945
|
||||
- elasticsearch_jvm_uptime_in_millis value=202245
|
||||
- elasticsearch_jvm_mem_non_heap_used_in_bytes value=39634576
|
||||
- elasticsearch_jvm_mem_non_heap_committed_in_bytes value=40841216
|
||||
- elasticsearch_jvm_mem_pools_young_max_in_bytes value=279183360
|
||||
- elasticsearch_jvm_mem_pools_young_peak_used_in_bytes value=71630848
|
||||
- elasticsearch_jvm_mem_pools_young_peak_max_in_bytes value=279183360
|
||||
- elasticsearch_jvm_mem_pools_young_used_in_bytes value=32685760
|
||||
- elasticsearch_jvm_mem_pools_survivor_peak_used_in_bytes value=8912888
|
||||
- elasticsearch_jvm_mem_pools_survivor_peak_max_in_bytes value=34865152
|
||||
- elasticsearch_jvm_mem_pools_survivor_used_in_bytes value=8912880
|
||||
- elasticsearch_jvm_mem_pools_survivor_max_in_bytes value=34865152
|
||||
- elasticsearch_jvm_mem_pools_old_peak_max_in_bytes value=724828160
|
||||
- elasticsearch_jvm_mem_pools_old_used_in_bytes value=11110928
|
||||
- elasticsearch_jvm_mem_pools_old_max_in_bytes value=724828160
|
||||
- elasticsearch_jvm_mem_pools_old_peak_used_in_bytes value=14354608
|
||||
- elasticsearch_jvm_mem_heap_used_in_bytes value=52709568
|
||||
- elasticsearch_jvm_mem_heap_used_percent value=5
|
||||
- elasticsearch_jvm_mem_heap_committed_in_bytes value=259522560
|
||||
- elasticsearch_jvm_mem_heap_max_in_bytes value=1038876672
|
||||
- elasticsearch_jvm_threads_peak_count value=45
|
||||
- elasticsearch_jvm_threads_count value=44
|
||||
- elasticsearch_jvm_gc_collectors_young_collection_count value=2
|
||||
- elasticsearch_jvm_gc_collectors_young_collection_time_in_millis value=98
|
||||
- elasticsearch_jvm_gc_collectors_old_collection_count value=1
|
||||
- elasticsearch_jvm_gc_collectors_old_collection_time_in_millis value=24
|
||||
- elasticsearch_jvm_buffer_pools_direct_count value=40
|
||||
- elasticsearch_jvm_buffer_pools_direct_used_in_bytes value=6304239
|
||||
- elasticsearch_jvm_buffer_pools_direct_total_capacity_in_bytes value=6304239
|
||||
- elasticsearch_jvm_buffer_pools_mapped_count value=0
|
||||
- elasticsearch_jvm_buffer_pools_mapped_used_in_bytes value=0
|
||||
- elasticsearch_jvm_buffer_pools_mapped_total_capacity_in_bytes value=0
|
||||
|
||||
TCP information measurement names:
|
||||
- elasticsearch_network_tcp_in_errs value=0
|
||||
- elasticsearch_network_tcp_passive_opens value=16
|
||||
- elasticsearch_network_tcp_curr_estab value=29
|
||||
- elasticsearch_network_tcp_in_segs value=113
|
||||
- elasticsearch_network_tcp_out_segs value=97
|
||||
- elasticsearch_network_tcp_retrans_segs value=0
|
||||
- elasticsearch_network_tcp_attempt_fails value=0
|
||||
- elasticsearch_network_tcp_active_opens value=13
|
||||
- elasticsearch_network_tcp_estab_resets value=0
|
||||
- elasticsearch_network_tcp_out_rsts value=0
|
||||
|
||||
Operating system stats, load average, cpu, mem, swap measurement names:
|
||||
- elasticsearch_os_swap_used_in_bytes value=0
|
||||
- elasticsearch_os_swap_free_in_bytes value=487997440
|
||||
- elasticsearch_os_timestamp value=1436460392944
|
||||
- elasticsearch_os_uptime_in_millis value=25092
|
||||
- elasticsearch_os_cpu_sys value=0
|
||||
- elasticsearch_os_cpu_user value=0
|
||||
- elasticsearch_os_cpu_idle value=99
|
||||
- elasticsearch_os_cpu_usage value=0
|
||||
- elasticsearch_os_cpu_stolen value=0
|
||||
- elasticsearch_os_mem_free_percent value=74
|
||||
- elasticsearch_os_mem_used_percent value=25
|
||||
- elasticsearch_os_mem_actual_free_in_bytes value=1565470720
|
||||
- elasticsearch_os_mem_actual_used_in_bytes value=534159360
|
||||
- elasticsearch_os_mem_free_in_bytes value=477761536
|
||||
- elasticsearch_os_mem_used_in_bytes value=1621868544
|
||||
|
||||
Process statistics, memory consumption, cpu usage, open file descriptors measurement names:
|
||||
- elasticsearch_process_mem_resident_in_bytes value=246382592
|
||||
- elasticsearch_process_mem_share_in_bytes value=18747392
|
||||
- elasticsearch_process_mem_total_virtual_in_bytes value=4747890688
|
||||
- elasticsearch_process_timestamp value=1436460392945
|
||||
- elasticsearch_process_open_file_descriptors value=160
|
||||
- elasticsearch_process_cpu_total_in_millis value=15480
|
||||
- elasticsearch_process_cpu_percent value=2
|
||||
- elasticsearch_process_cpu_sys_in_millis value=1870
|
||||
- elasticsearch_process_cpu_user_in_millis value=13610
|
||||
|
||||
Statistics about each thread pool, including current size, queue and rejected tasks measurement names:
|
||||
- elasticsearch_thread_pool_merge_threads value=6
|
||||
- elasticsearch_thread_pool_merge_queue value=4
|
||||
- elasticsearch_thread_pool_merge_active value=5
|
||||
- elasticsearch_thread_pool_merge_rejected value=2
|
||||
- elasticsearch_thread_pool_merge_largest value=5
|
||||
- elasticsearch_thread_pool_merge_completed value=1
|
||||
- elasticsearch_thread_pool_bulk_threads value=4
|
||||
- elasticsearch_thread_pool_bulk_queue value=5
|
||||
- elasticsearch_thread_pool_bulk_active value=7
|
||||
- elasticsearch_thread_pool_bulk_rejected value=3
|
||||
- elasticsearch_thread_pool_bulk_largest value=1
|
||||
- elasticsearch_thread_pool_bulk_completed value=4
|
||||
- elasticsearch_thread_pool_warmer_threads value=2
|
||||
- elasticsearch_thread_pool_warmer_queue value=7
|
||||
- elasticsearch_thread_pool_warmer_active value=3
|
||||
- elasticsearch_thread_pool_warmer_rejected value=2
|
||||
- elasticsearch_thread_pool_warmer_largest value=3
|
||||
- elasticsearch_thread_pool_warmer_completed value=1
|
||||
- elasticsearch_thread_pool_get_largest value=2
|
||||
- elasticsearch_thread_pool_get_completed value=1
|
||||
- elasticsearch_thread_pool_get_threads value=1
|
||||
- elasticsearch_thread_pool_get_queue value=8
|
||||
- elasticsearch_thread_pool_get_active value=4
|
||||
- elasticsearch_thread_pool_get_rejected value=3
|
||||
- elasticsearch_thread_pool_index_threads value=6
|
||||
- elasticsearch_thread_pool_index_queue value=8
|
||||
- elasticsearch_thread_pool_index_active value=4
|
||||
- elasticsearch_thread_pool_index_rejected value=2
|
||||
- elasticsearch_thread_pool_index_largest value=3
|
||||
- elasticsearch_thread_pool_index_completed value=6
|
||||
- elasticsearch_thread_pool_suggest_threads value=2
|
||||
- elasticsearch_thread_pool_suggest_queue value=7
|
||||
- elasticsearch_thread_pool_suggest_active value=2
|
||||
- elasticsearch_thread_pool_suggest_rejected value=1
|
||||
- elasticsearch_thread_pool_suggest_largest value=8
|
||||
- elasticsearch_thread_pool_suggest_completed value=3
|
||||
- elasticsearch_thread_pool_fetch_shard_store_queue value=7
|
||||
- elasticsearch_thread_pool_fetch_shard_store_active value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_store_rejected value=2
|
||||
- elasticsearch_thread_pool_fetch_shard_store_largest value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_store_completed value=1
|
||||
- elasticsearch_thread_pool_fetch_shard_store_threads value=1
|
||||
- elasticsearch_thread_pool_management_threads value=2
|
||||
- elasticsearch_thread_pool_management_queue value=3
|
||||
- elasticsearch_thread_pool_management_active value=1
|
||||
- elasticsearch_thread_pool_management_rejected value=6
|
||||
- elasticsearch_thread_pool_management_largest value=2
|
||||
- elasticsearch_thread_pool_management_completed value=22
|
||||
- elasticsearch_thread_pool_percolate_queue value=23
|
||||
- elasticsearch_thread_pool_percolate_active value=13
|
||||
- elasticsearch_thread_pool_percolate_rejected value=235
|
||||
- elasticsearch_thread_pool_percolate_largest value=23
|
||||
- elasticsearch_thread_pool_percolate_completed value=33
|
||||
- elasticsearch_thread_pool_percolate_threads value=123
|
||||
- elasticsearch_thread_pool_listener_active value=4
|
||||
- elasticsearch_thread_pool_listener_rejected value=8
|
||||
- elasticsearch_thread_pool_listener_largest value=1
|
||||
- elasticsearch_thread_pool_listener_completed value=1
|
||||
- elasticsearch_thread_pool_listener_threads value=1
|
||||
- elasticsearch_thread_pool_listener_queue value=2
|
||||
- elasticsearch_thread_pool_search_rejected value=7
|
||||
- elasticsearch_thread_pool_search_largest value=2
|
||||
- elasticsearch_thread_pool_search_completed value=4
|
||||
- elasticsearch_thread_pool_search_threads value=5
|
||||
- elasticsearch_thread_pool_search_queue value=7
|
||||
- elasticsearch_thread_pool_search_active value=2
|
||||
- elasticsearch_thread_pool_fetch_shard_started_threads value=3
|
||||
- elasticsearch_thread_pool_fetch_shard_started_queue value=1
|
||||
- elasticsearch_thread_pool_fetch_shard_started_active value=5
|
||||
- elasticsearch_thread_pool_fetch_shard_started_rejected value=6
|
||||
- elasticsearch_thread_pool_fetch_shard_started_largest value=4
|
||||
- elasticsearch_thread_pool_fetch_shard_started_completed value=54
|
||||
- elasticsearch_thread_pool_refresh_rejected value=4
|
||||
- elasticsearch_thread_pool_refresh_largest value=8
|
||||
- elasticsearch_thread_pool_refresh_completed value=3
|
||||
- elasticsearch_thread_pool_refresh_threads value=23
|
||||
- elasticsearch_thread_pool_refresh_queue value=7
|
||||
- elasticsearch_thread_pool_refresh_active value=3
|
||||
- elasticsearch_thread_pool_optimize_threads value=3
|
||||
- elasticsearch_thread_pool_optimize_queue value=4
|
||||
- elasticsearch_thread_pool_optimize_active value=1
|
||||
- elasticsearch_thread_pool_optimize_rejected value=2
|
||||
- elasticsearch_thread_pool_optimize_largest value=7
|
||||
- elasticsearch_thread_pool_optimize_completed value=3
|
||||
- elasticsearch_thread_pool_snapshot_largest value=1
|
||||
- elasticsearch_thread_pool_snapshot_completed value=0
|
||||
- elasticsearch_thread_pool_snapshot_threads value=8
|
||||
- elasticsearch_thread_pool_snapshot_queue value=5
|
||||
- elasticsearch_thread_pool_snapshot_active value=6
|
||||
- elasticsearch_thread_pool_snapshot_rejected value=2
|
||||
- elasticsearch_thread_pool_generic_threads value=1
|
||||
- elasticsearch_thread_pool_generic_queue value=4
|
||||
- elasticsearch_thread_pool_generic_active value=6
|
||||
- elasticsearch_thread_pool_generic_rejected value=3
|
||||
- elasticsearch_thread_pool_generic_largest value=2
|
||||
- elasticsearch_thread_pool_generic_completed value=27
|
||||
- elasticsearch_thread_pool_flush_threads value=3
|
||||
- elasticsearch_thread_pool_flush_queue value=8
|
||||
- elasticsearch_thread_pool_flush_active value=0
|
||||
- elasticsearch_thread_pool_flush_rejected value=1
|
||||
- elasticsearch_thread_pool_flush_largest value=5
|
||||
- elasticsearch_thread_pool_flush_completed value=3
|
||||
|
||||
Transport statistics about sent and received bytes in cluster communication measurement names:
|
||||
- elasticsearch_transport_server_open value=13
|
||||
- elasticsearch_transport_rx_count value=6
|
||||
- elasticsearch_transport_rx_size_in_bytes value=1380
|
||||
- elasticsearch_transport_tx_count value=6
|
||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
||||
250
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
250
plugins/inputs/elasticsearch/elasticsearch.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
)
|
||||
|
||||
const statsPath = "/_nodes/stats"
|
||||
const statsPathLocal = "/_nodes/_local/stats"
|
||||
const healthPath = "/_cluster/health"
|
||||
|
||||
type node struct {
|
||||
Host string `json:"host"`
|
||||
Name string `json:"name"`
|
||||
Attributes map[string]string `json:"attributes"`
|
||||
Indices interface{} `json:"indices"`
|
||||
OS interface{} `json:"os"`
|
||||
Process interface{} `json:"process"`
|
||||
JVM interface{} `json:"jvm"`
|
||||
ThreadPool interface{} `json:"thread_pool"`
|
||||
FS interface{} `json:"fs"`
|
||||
Transport interface{} `json:"transport"`
|
||||
HTTP interface{} `json:"http"`
|
||||
Breakers interface{} `json:"breakers"`
|
||||
}
|
||||
|
||||
type clusterHealth struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
NumberOfNodes int `json:"number_of_nodes"`
|
||||
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
Indices map[string]indexHealth `json:"indices"`
|
||||
}
|
||||
|
||||
type indexHealth struct {
|
||||
Status string `json:"status"`
|
||||
NumberOfShards int `json:"number_of_shards"`
|
||||
NumberOfReplicas int `json:"number_of_replicas"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
servers = ["http://localhost:9200"]
|
||||
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
local = true
|
||||
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
cluster_health = false
|
||||
`
|
||||
|
||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||
// servers.
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
ClusterHealth bool
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
func NewElasticsearch() *Elasticsearch {
|
||||
return &Elasticsearch{client: http.DefaultClient}
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration for this plugin.
|
||||
func (e *Elasticsearch) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns the plugin description.
|
||||
func (e *Elasticsearch) Description() string {
|
||||
return "Read stats from one or more Elasticsearch servers or clusters"
|
||||
}
|
||||
|
||||
// Gather reads the stats from Elasticsearch and writes it to the
|
||||
// Accumulator.
|
||||
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
errChan := make(chan error, len(e.Servers))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
for _, serv := range e.Servers {
|
||||
go func(s string, acc telegraf.Accumulator) {
|
||||
defer wg.Done()
|
||||
var url string
|
||||
if e.Local {
|
||||
url = s + statsPathLocal
|
||||
} else {
|
||||
url = s + statsPath
|
||||
}
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if e.ClusterHealth {
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
||||
}
|
||||
}(serv, acc)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
// Get all errors and return them as one giant error
|
||||
errStrings := []string{}
|
||||
for err := range errChan {
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errStrings, "\n"))
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*node `json:"nodes"`
|
||||
}{}
|
||||
if err := e.gatherData(url, nodeStats); err != nil {
|
||||
return err
|
||||
}
|
||||
for id, n := range nodeStats.Nodes {
|
||||
tags := map[string]string{
|
||||
"node_id": id,
|
||||
"node_host": n.Host,
|
||||
"node_name": n.Name,
|
||||
"cluster_name": nodeStats.ClusterName,
|
||||
}
|
||||
|
||||
for k, v := range n.Attributes {
|
||||
tags["node_attribute_"+k] = v
|
||||
}
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"indices": n.Indices,
|
||||
"os": n.OS,
|
||||
"process": n.Process,
|
||||
"jvm": n.JVM,
|
||||
"thread_pool": n.ThreadPool,
|
||||
"fs": n.FS,
|
||||
"transport": n.Transport,
|
||||
"http": n.HTTP,
|
||||
"breakers": n.Breakers,
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterHealth{}
|
||||
if err := e.gatherData(url, clusterStats); err != nil {
|
||||
return err
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"status": clusterStats.Status,
|
||||
"timed_out": clusterStats.TimedOut,
|
||||
"number_of_nodes": clusterStats.NumberOfNodes,
|
||||
"number_of_data_nodes": clusterStats.NumberOfDataNodes,
|
||||
"active_primary_shards": clusterStats.ActivePrimaryShards,
|
||||
"active_shards": clusterStats.ActiveShards,
|
||||
"relocating_shards": clusterStats.RelocatingShards,
|
||||
"initializing_shards": clusterStats.InitializingShards,
|
||||
"unassigned_shards": clusterStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": clusterStats.ClusterName},
|
||||
measurementTime,
|
||||
)
|
||||
|
||||
for name, health := range clusterStats.Indices {
|
||||
indexFields := map[string]interface{}{
|
||||
"status": health.Status,
|
||||
"number_of_shards": health.NumberOfShards,
|
||||
"number_of_replicas": health.NumberOfReplicas,
|
||||
"active_primary_shards": health.ActivePrimaryShards,
|
||||
"active_shards": health.ActiveShards,
|
||||
"relocating_shards": health.RelocatingShards,
|
||||
"initializing_shards": health.InitializingShards,
|
||||
"unassigned_shards": health.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_indices",
|
||||
indexFields,
|
||||
map[string]string{"index": name},
|
||||
measurementTime,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
r, err := e.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("elasticsearch", func() telegraf.Input {
|
||||
return NewElasticsearch()
|
||||
})
|
||||
}
|
||||
86
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
86
plugins/inputs/elasticsearch/elasticsearch_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type transportMock struct {
|
||||
statusCode int
|
||||
body string
|
||||
}
|
||||
|
||||
func newTransportMock(statusCode int, body string) http.RoundTripper {
|
||||
return &transportMock{
|
||||
statusCode: statusCode,
|
||||
body: body,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{
|
||||
Header: make(http.Header),
|
||||
Request: r,
|
||||
StatusCode: t.statusCode,
|
||||
}
|
||||
res.Header.Set("Content-Type", "application/json")
|
||||
res.Body = ioutil.NopCloser(strings.NewReader(t.body))
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func TestElasticsearch(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.Gather(&acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
"node_id": "SDFsfSDFsdfFSDSDfSFDSDF",
|
||||
"node_name": "test.host.com",
|
||||
"node_host": "test",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
es := NewElasticsearch()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.Gather(&acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
765
plugins/inputs/elasticsearch/testdata_test.go
Normal file
765
plugins/inputs/elasticsearch/testdata_test.go
Normal file
@@ -0,0 +1,765 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
"indices": {
|
||||
"v1": {
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0
|
||||
},
|
||||
"v2": {
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var clusterHealthExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"timed_out": false,
|
||||
"number_of_nodes": 3,
|
||||
"number_of_data_nodes": 3,
|
||||
"active_primary_shards": 5,
|
||||
"active_shards": 15,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v1IndexExpected = map[string]interface{}{
|
||||
"status": "green",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 10,
|
||||
"active_shards": 20,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 0,
|
||||
}
|
||||
|
||||
var v2IndexExpected = map[string]interface{}{
|
||||
"status": "red",
|
||||
"number_of_shards": 10,
|
||||
"number_of_replicas": 1,
|
||||
"active_primary_shards": 0,
|
||||
"active_shards": 0,
|
||||
"relocating_shards": 0,
|
||||
"initializing_shards": 0,
|
||||
"unassigned_shards": 20,
|
||||
}
|
||||
|
||||
const statsResponse = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
"SDFsfSDFsdfFSDSDfSFDSDF": {
|
||||
"timestamp": 1436365550135,
|
||||
"name": "test.host.com",
|
||||
"transport_address": "inet[/127.0.0.1:9300]",
|
||||
"host": "test",
|
||||
"ip": [
|
||||
"inet[/127.0.0.1:9300]",
|
||||
"NONE"
|
||||
],
|
||||
"attributes": {
|
||||
"master": "true"
|
||||
},
|
||||
"indices": {
|
||||
"docs": {
|
||||
"count": 29652,
|
||||
"deleted": 5229
|
||||
},
|
||||
"store": {
|
||||
"size_in_bytes": 37715234,
|
||||
"throttle_time_in_millis": 215
|
||||
},
|
||||
"indexing": {
|
||||
"index_total": 84790,
|
||||
"index_time_in_millis": 29680,
|
||||
"index_current": 0,
|
||||
"delete_total": 13879,
|
||||
"delete_time_in_millis": 1139,
|
||||
"delete_current": 0,
|
||||
"noop_update_total": 0,
|
||||
"is_throttled": false,
|
||||
"throttle_time_in_millis": 0
|
||||
},
|
||||
"get": {
|
||||
"total": 1,
|
||||
"time_in_millis": 2,
|
||||
"exists_total": 0,
|
||||
"exists_time_in_millis": 0,
|
||||
"missing_total": 1,
|
||||
"missing_time_in_millis": 2,
|
||||
"current": 0
|
||||
},
|
||||
"search": {
|
||||
"open_contexts": 0,
|
||||
"query_total": 1452,
|
||||
"query_time_in_millis": 5695,
|
||||
"query_current": 0,
|
||||
"fetch_total": 414,
|
||||
"fetch_time_in_millis": 146,
|
||||
"fetch_current": 0
|
||||
},
|
||||
"merges": {
|
||||
"current": 0,
|
||||
"current_docs": 0,
|
||||
"current_size_in_bytes": 0,
|
||||
"total": 133,
|
||||
"total_time_in_millis": 21060,
|
||||
"total_docs": 203672,
|
||||
"total_size_in_bytes": 142900226
|
||||
},
|
||||
"refresh": {
|
||||
"total": 1076,
|
||||
"total_time_in_millis": 20078
|
||||
},
|
||||
"flush": {
|
||||
"total": 115,
|
||||
"total_time_in_millis": 2401
|
||||
},
|
||||
"warmer": {
|
||||
"current": 0,
|
||||
"total": 2319,
|
||||
"total_time_in_millis": 448
|
||||
},
|
||||
"filter_cache": {
|
||||
"memory_size_in_bytes": 7384,
|
||||
"evictions": 0
|
||||
},
|
||||
"id_cache": {
|
||||
"memory_size_in_bytes": 0
|
||||
},
|
||||
"fielddata": {
|
||||
"memory_size_in_bytes": 12996,
|
||||
"evictions": 0
|
||||
},
|
||||
"percolate": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0,
|
||||
"memory_size_in_bytes": -1,
|
||||
"memory_size": "-1b",
|
||||
"queries": 0
|
||||
},
|
||||
"completion": {
|
||||
"size_in_bytes": 0
|
||||
},
|
||||
"segments": {
|
||||
"count": 134,
|
||||
"memory_in_bytes": 1285212,
|
||||
"index_writer_memory_in_bytes": 0,
|
||||
"index_writer_max_memory_in_bytes": 172368955,
|
||||
"version_map_memory_in_bytes": 611844,
|
||||
"fixed_bit_set_memory_in_bytes": 0
|
||||
},
|
||||
"translog": {
|
||||
"operations": 17702,
|
||||
"size_in_bytes": 17
|
||||
},
|
||||
"suggest": {
|
||||
"total": 0,
|
||||
"time_in_millis": 0,
|
||||
"current": 0
|
||||
},
|
||||
"query_cache": {
|
||||
"memory_size_in_bytes": 0,
|
||||
"evictions": 0,
|
||||
"hit_count": 0,
|
||||
"miss_count": 0
|
||||
},
|
||||
"recovery": {
|
||||
"current_as_source": 0,
|
||||
"current_as_target": 0,
|
||||
"throttle_time_in_millis": 0
|
||||
}
|
||||
},
|
||||
"os": {
|
||||
"timestamp": 1436460392944,
|
||||
"load_average": [
|
||||
0.01,
|
||||
0.04,
|
||||
0.05
|
||||
],
|
||||
"mem": {
|
||||
"free_in_bytes": 477761536,
|
||||
"used_in_bytes": 1621868544,
|
||||
"free_percent": 74,
|
||||
"used_percent": 25,
|
||||
"actual_free_in_bytes": 1565470720,
|
||||
"actual_used_in_bytes": 534159360
|
||||
},
|
||||
"swap": {
|
||||
"used_in_bytes": 0,
|
||||
"free_in_bytes": 487997440
|
||||
}
|
||||
},
|
||||
"process": {
|
||||
"timestamp": 1436460392945,
|
||||
"open_file_descriptors": 160,
|
||||
"cpu": {
|
||||
"percent": 2,
|
||||
"sys_in_millis": 1870,
|
||||
"user_in_millis": 13610,
|
||||
"total_in_millis": 15480
|
||||
},
|
||||
"mem": {
|
||||
"total_virtual_in_bytes": 4747890688
|
||||
}
|
||||
},
|
||||
"jvm": {
|
||||
"timestamp": 1436460392945,
|
||||
"uptime_in_millis": 202245,
|
||||
"mem": {
|
||||
"heap_used_in_bytes": 52709568,
|
||||
"heap_used_percent": 5,
|
||||
"heap_committed_in_bytes": 259522560,
|
||||
"heap_max_in_bytes": 1038876672,
|
||||
"non_heap_used_in_bytes": 39634576,
|
||||
"non_heap_committed_in_bytes": 40841216,
|
||||
"pools": {
|
||||
"young": {
|
||||
"used_in_bytes": 32685760,
|
||||
"max_in_bytes": 279183360,
|
||||
"peak_used_in_bytes": 71630848,
|
||||
"peak_max_in_bytes": 279183360
|
||||
},
|
||||
"survivor": {
|
||||
"used_in_bytes": 8912880,
|
||||
"max_in_bytes": 34865152,
|
||||
"peak_used_in_bytes": 8912888,
|
||||
"peak_max_in_bytes": 34865152
|
||||
},
|
||||
"old": {
|
||||
"used_in_bytes": 11110928,
|
||||
"max_in_bytes": 724828160,
|
||||
"peak_used_in_bytes": 14354608,
|
||||
"peak_max_in_bytes": 724828160
|
||||
}
|
||||
}
|
||||
},
|
||||
"threads": {
|
||||
"count": 44,
|
||||
"peak_count": 45
|
||||
},
|
||||
"gc": {
|
||||
"collectors": {
|
||||
"young": {
|
||||
"collection_count": 2,
|
||||
"collection_time_in_millis": 98
|
||||
},
|
||||
"old": {
|
||||
"collection_count": 1,
|
||||
"collection_time_in_millis": 24
|
||||
}
|
||||
}
|
||||
},
|
||||
"buffer_pools": {
|
||||
"direct": {
|
||||
"count": 40,
|
||||
"used_in_bytes": 6304239,
|
||||
"total_capacity_in_bytes": 6304239
|
||||
},
|
||||
"mapped": {
|
||||
"count": 0,
|
||||
"used_in_bytes": 0,
|
||||
"total_capacity_in_bytes": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"thread_pool": {
|
||||
"percolate": {
|
||||
"threads": 123,
|
||||
"queue": 23,
|
||||
"active": 13,
|
||||
"rejected": 235,
|
||||
"largest": 23,
|
||||
"completed": 33
|
||||
},
|
||||
"fetch_shard_started": {
|
||||
"threads": 3,
|
||||
"queue": 1,
|
||||
"active": 5,
|
||||
"rejected": 6,
|
||||
"largest": 4,
|
||||
"completed": 54
|
||||
},
|
||||
"listener": {
|
||||
"threads": 1,
|
||||
"queue": 2,
|
||||
"active": 4,
|
||||
"rejected": 8,
|
||||
"largest": 1,
|
||||
"completed": 1
|
||||
},
|
||||
"index": {
|
||||
"threads": 6,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 6
|
||||
},
|
||||
"refresh": {
|
||||
"threads": 23,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 4,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"suggest": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 1,
|
||||
"largest": 8,
|
||||
"completed": 3
|
||||
},
|
||||
"generic": {
|
||||
"threads": 1,
|
||||
"queue": 4,
|
||||
"active": 6,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 27
|
||||
},
|
||||
"warmer": {
|
||||
"threads": 2,
|
||||
"queue": 7,
|
||||
"active": 3,
|
||||
"rejected": 2,
|
||||
"largest": 3,
|
||||
"completed": 1
|
||||
},
|
||||
"search": {
|
||||
"threads": 5,
|
||||
"queue": 7,
|
||||
"active": 2,
|
||||
"rejected": 7,
|
||||
"largest": 2,
|
||||
"completed": 4
|
||||
},
|
||||
"flush": {
|
||||
"threads": 3,
|
||||
"queue": 8,
|
||||
"active": 0,
|
||||
"rejected": 1,
|
||||
"largest": 5,
|
||||
"completed": 3
|
||||
},
|
||||
"optimize": {
|
||||
"threads": 3,
|
||||
"queue": 4,
|
||||
"active": 1,
|
||||
"rejected": 2,
|
||||
"largest": 7,
|
||||
"completed": 3
|
||||
},
|
||||
"fetch_shard_store": {
|
||||
"threads": 1,
|
||||
"queue": 7,
|
||||
"active": 4,
|
||||
"rejected": 2,
|
||||
"largest": 4,
|
||||
"completed": 1
|
||||
},
|
||||
"management": {
|
||||
"threads": 2,
|
||||
"queue": 3,
|
||||
"active": 1,
|
||||
"rejected": 6,
|
||||
"largest": 2,
|
||||
"completed": 22
|
||||
},
|
||||
"get": {
|
||||
"threads": 1,
|
||||
"queue": 8,
|
||||
"active": 4,
|
||||
"rejected": 3,
|
||||
"largest": 2,
|
||||
"completed": 1
|
||||
},
|
||||
"merge": {
|
||||
"threads": 6,
|
||||
"queue": 4,
|
||||
"active": 5,
|
||||
"rejected": 2,
|
||||
"largest": 5,
|
||||
"completed": 1
|
||||
},
|
||||
"bulk": {
|
||||
"threads": 4,
|
||||
"queue": 5,
|
||||
"active": 7,
|
||||
"rejected": 3,
|
||||
"largest": 1,
|
||||
"completed": 4
|
||||
},
|
||||
"snapshot": {
|
||||
"threads": 8,
|
||||
"queue": 5,
|
||||
"active": 6,
|
||||
"rejected": 2,
|
||||
"largest": 1,
|
||||
"completed": 0
|
||||
}
|
||||
},
|
||||
"fs": {
|
||||
"timestamp": 1436460392946,
|
||||
"total": {
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
},
|
||||
"data": [
|
||||
{
|
||||
"path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0",
|
||||
"mount": "/usr/share/elasticsearch/data",
|
||||
"type": "ext4",
|
||||
"total_in_bytes": 19507089408,
|
||||
"free_in_bytes": 16909316096,
|
||||
"available_in_bytes": 15894814720
|
||||
}
|
||||
]
|
||||
},
|
||||
"transport": {
|
||||
"server_open": 13,
|
||||
"rx_count": 6,
|
||||
"rx_size_in_bytes": 1380,
|
||||
"tx_count": 6,
|
||||
"tx_size_in_bytes": 1380
|
||||
},
|
||||
"http": {
|
||||
"current_open": 3,
|
||||
"total_opened": 3
|
||||
},
|
||||
"breakers": {
|
||||
"fielddata": {
|
||||
"limit_size_in_bytes": 623326003,
|
||||
"limit_size": "594.4mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.03,
|
||||
"tripped": 0
|
||||
},
|
||||
"request": {
|
||||
"limit_size_in_bytes": 415550668,
|
||||
"limit_size": "396.2mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
},
|
||||
"parent": {
|
||||
"limit_size_in_bytes": 727213670,
|
||||
"limit_size": "693.5mb",
|
||||
"estimated_size_in_bytes": 0,
|
||||
"estimated_size": "0b",
|
||||
"overhead": 1.0,
|
||||
"tripped": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var indicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"suggest_total": float64(0),
|
||||
"suggest_time_in_millis": float64(0),
|
||||
"suggest_current": float64(0),
|
||||
"query_cache_memory_size_in_bytes": float64(0),
|
||||
"query_cache_evictions": float64(0),
|
||||
"query_cache_hit_count": float64(0),
|
||||
"query_cache_miss_count": float64(0),
|
||||
"store_size_in_bytes": float64(37715234),
|
||||
"store_throttle_time_in_millis": float64(215),
|
||||
"merges_current_docs": float64(0),
|
||||
"merges_current_size_in_bytes": float64(0),
|
||||
"merges_total": float64(133),
|
||||
"merges_total_time_in_millis": float64(21060),
|
||||
"merges_total_docs": float64(203672),
|
||||
"merges_total_size_in_bytes": float64(142900226),
|
||||
"merges_current": float64(0),
|
||||
"filter_cache_memory_size_in_bytes": float64(7384),
|
||||
"filter_cache_evictions": float64(0),
|
||||
"indexing_index_total": float64(84790),
|
||||
"indexing_index_time_in_millis": float64(29680),
|
||||
"indexing_index_current": float64(0),
|
||||
"indexing_noop_update_total": float64(0),
|
||||
"indexing_throttle_time_in_millis": float64(0),
|
||||
"indexing_delete_total": float64(13879),
|
||||
"indexing_delete_time_in_millis": float64(1139),
|
||||
"indexing_delete_current": float64(0),
|
||||
"get_exists_time_in_millis": float64(0),
|
||||
"get_missing_total": float64(1),
|
||||
"get_missing_time_in_millis": float64(2),
|
||||
"get_current": float64(0),
|
||||
"get_total": float64(1),
|
||||
"get_time_in_millis": float64(2),
|
||||
"get_exists_total": float64(0),
|
||||
"refresh_total": float64(1076),
|
||||
"refresh_total_time_in_millis": float64(20078),
|
||||
"percolate_current": float64(0),
|
||||
"percolate_memory_size_in_bytes": float64(-1),
|
||||
"percolate_queries": float64(0),
|
||||
"percolate_total": float64(0),
|
||||
"percolate_time_in_millis": float64(0),
|
||||
"translog_operations": float64(17702),
|
||||
"translog_size_in_bytes": float64(17),
|
||||
"recovery_current_as_source": float64(0),
|
||||
"recovery_current_as_target": float64(0),
|
||||
"recovery_throttle_time_in_millis": float64(0),
|
||||
"docs_count": float64(29652),
|
||||
"docs_deleted": float64(5229),
|
||||
"flush_total_time_in_millis": float64(2401),
|
||||
"flush_total": float64(115),
|
||||
"fielddata_memory_size_in_bytes": float64(12996),
|
||||
"fielddata_evictions": float64(0),
|
||||
"search_fetch_current": float64(0),
|
||||
"search_open_contexts": float64(0),
|
||||
"search_query_total": float64(1452),
|
||||
"search_query_time_in_millis": float64(5695),
|
||||
"search_query_current": float64(0),
|
||||
"search_fetch_total": float64(414),
|
||||
"search_fetch_time_in_millis": float64(146),
|
||||
"warmer_current": float64(0),
|
||||
"warmer_total": float64(2319),
|
||||
"warmer_total_time_in_millis": float64(448),
|
||||
"segments_count": float64(134),
|
||||
"segments_memory_in_bytes": float64(1285212),
|
||||
"segments_index_writer_memory_in_bytes": float64(0),
|
||||
"segments_index_writer_max_memory_in_bytes": float64(172368955),
|
||||
"segments_version_map_memory_in_bytes": float64(611844),
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var osExpected = map[string]interface{}{
|
||||
"load_average_0": float64(0.01),
|
||||
"load_average_1": float64(0.04),
|
||||
"load_average_2": float64(0.05),
|
||||
"swap_used_in_bytes": float64(0),
|
||||
"swap_free_in_bytes": float64(487997440),
|
||||
"timestamp": float64(1436460392944),
|
||||
"mem_free_percent": float64(74),
|
||||
"mem_used_percent": float64(25),
|
||||
"mem_actual_free_in_bytes": float64(1565470720),
|
||||
"mem_actual_used_in_bytes": float64(534159360),
|
||||
"mem_free_in_bytes": float64(477761536),
|
||||
"mem_used_in_bytes": float64(1621868544),
|
||||
}
|
||||
|
||||
var processExpected = map[string]interface{}{
|
||||
"mem_total_virtual_in_bytes": float64(4747890688),
|
||||
"timestamp": float64(1436460392945),
|
||||
"open_file_descriptors": float64(160),
|
||||
"cpu_total_in_millis": float64(15480),
|
||||
"cpu_percent": float64(2),
|
||||
"cpu_sys_in_millis": float64(1870),
|
||||
"cpu_user_in_millis": float64(13610),
|
||||
}
|
||||
|
||||
var jvmExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392945),
|
||||
"uptime_in_millis": float64(202245),
|
||||
"mem_non_heap_used_in_bytes": float64(39634576),
|
||||
"mem_non_heap_committed_in_bytes": float64(40841216),
|
||||
"mem_pools_young_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_peak_used_in_bytes": float64(71630848),
|
||||
"mem_pools_young_peak_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_used_in_bytes": float64(32685760),
|
||||
"mem_pools_survivor_peak_used_in_bytes": float64(8912888),
|
||||
"mem_pools_survivor_peak_max_in_bytes": float64(34865152),
|
||||
"mem_pools_survivor_used_in_bytes": float64(8912880),
|
||||
"mem_pools_survivor_max_in_bytes": float64(34865152),
|
||||
"mem_pools_old_peak_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_used_in_bytes": float64(11110928),
|
||||
"mem_pools_old_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_peak_used_in_bytes": float64(14354608),
|
||||
"mem_heap_used_in_bytes": float64(52709568),
|
||||
"mem_heap_used_percent": float64(5),
|
||||
"mem_heap_committed_in_bytes": float64(259522560),
|
||||
"mem_heap_max_in_bytes": float64(1038876672),
|
||||
"threads_peak_count": float64(45),
|
||||
"threads_count": float64(44),
|
||||
"gc_collectors_young_collection_count": float64(2),
|
||||
"gc_collectors_young_collection_time_in_millis": float64(98),
|
||||
"gc_collectors_old_collection_count": float64(1),
|
||||
"gc_collectors_old_collection_time_in_millis": float64(24),
|
||||
"buffer_pools_direct_count": float64(40),
|
||||
"buffer_pools_direct_used_in_bytes": float64(6304239),
|
||||
"buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
|
||||
"buffer_pools_mapped_count": float64(0),
|
||||
"buffer_pools_mapped_used_in_bytes": float64(0),
|
||||
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var threadPoolExpected = map[string]interface{}{
|
||||
"merge_threads": float64(6),
|
||||
"merge_queue": float64(4),
|
||||
"merge_active": float64(5),
|
||||
"merge_rejected": float64(2),
|
||||
"merge_largest": float64(5),
|
||||
"merge_completed": float64(1),
|
||||
"bulk_threads": float64(4),
|
||||
"bulk_queue": float64(5),
|
||||
"bulk_active": float64(7),
|
||||
"bulk_rejected": float64(3),
|
||||
"bulk_largest": float64(1),
|
||||
"bulk_completed": float64(4),
|
||||
"warmer_threads": float64(2),
|
||||
"warmer_queue": float64(7),
|
||||
"warmer_active": float64(3),
|
||||
"warmer_rejected": float64(2),
|
||||
"warmer_largest": float64(3),
|
||||
"warmer_completed": float64(1),
|
||||
"get_largest": float64(2),
|
||||
"get_completed": float64(1),
|
||||
"get_threads": float64(1),
|
||||
"get_queue": float64(8),
|
||||
"get_active": float64(4),
|
||||
"get_rejected": float64(3),
|
||||
"index_threads": float64(6),
|
||||
"index_queue": float64(8),
|
||||
"index_active": float64(4),
|
||||
"index_rejected": float64(2),
|
||||
"index_largest": float64(3),
|
||||
"index_completed": float64(6),
|
||||
"suggest_threads": float64(2),
|
||||
"suggest_queue": float64(7),
|
||||
"suggest_active": float64(2),
|
||||
"suggest_rejected": float64(1),
|
||||
"suggest_largest": float64(8),
|
||||
"suggest_completed": float64(3),
|
||||
"fetch_shard_store_queue": float64(7),
|
||||
"fetch_shard_store_active": float64(4),
|
||||
"fetch_shard_store_rejected": float64(2),
|
||||
"fetch_shard_store_largest": float64(4),
|
||||
"fetch_shard_store_completed": float64(1),
|
||||
"fetch_shard_store_threads": float64(1),
|
||||
"management_threads": float64(2),
|
||||
"management_queue": float64(3),
|
||||
"management_active": float64(1),
|
||||
"management_rejected": float64(6),
|
||||
"management_largest": float64(2),
|
||||
"management_completed": float64(22),
|
||||
"percolate_queue": float64(23),
|
||||
"percolate_active": float64(13),
|
||||
"percolate_rejected": float64(235),
|
||||
"percolate_largest": float64(23),
|
||||
"percolate_completed": float64(33),
|
||||
"percolate_threads": float64(123),
|
||||
"listener_active": float64(4),
|
||||
"listener_rejected": float64(8),
|
||||
"listener_largest": float64(1),
|
||||
"listener_completed": float64(1),
|
||||
"listener_threads": float64(1),
|
||||
"listener_queue": float64(2),
|
||||
"search_rejected": float64(7),
|
||||
"search_largest": float64(2),
|
||||
"search_completed": float64(4),
|
||||
"search_threads": float64(5),
|
||||
"search_queue": float64(7),
|
||||
"search_active": float64(2),
|
||||
"fetch_shard_started_threads": float64(3),
|
||||
"fetch_shard_started_queue": float64(1),
|
||||
"fetch_shard_started_active": float64(5),
|
||||
"fetch_shard_started_rejected": float64(6),
|
||||
"fetch_shard_started_largest": float64(4),
|
||||
"fetch_shard_started_completed": float64(54),
|
||||
"refresh_rejected": float64(4),
|
||||
"refresh_largest": float64(8),
|
||||
"refresh_completed": float64(3),
|
||||
"refresh_threads": float64(23),
|
||||
"refresh_queue": float64(7),
|
||||
"refresh_active": float64(3),
|
||||
"optimize_threads": float64(3),
|
||||
"optimize_queue": float64(4),
|
||||
"optimize_active": float64(1),
|
||||
"optimize_rejected": float64(2),
|
||||
"optimize_largest": float64(7),
|
||||
"optimize_completed": float64(3),
|
||||
"snapshot_largest": float64(1),
|
||||
"snapshot_completed": float64(0),
|
||||
"snapshot_threads": float64(8),
|
||||
"snapshot_queue": float64(5),
|
||||
"snapshot_active": float64(6),
|
||||
"snapshot_rejected": float64(2),
|
||||
"generic_threads": float64(1),
|
||||
"generic_queue": float64(4),
|
||||
"generic_active": float64(6),
|
||||
"generic_rejected": float64(3),
|
||||
"generic_largest": float64(2),
|
||||
"generic_completed": float64(27),
|
||||
"flush_threads": float64(3),
|
||||
"flush_queue": float64(8),
|
||||
"flush_active": float64(0),
|
||||
"flush_rejected": float64(1),
|
||||
"flush_largest": float64(5),
|
||||
"flush_completed": float64(3),
|
||||
}
|
||||
|
||||
var fsExpected = map[string]interface{}{
|
||||
"data_0_total_in_bytes": float64(19507089408),
|
||||
"data_0_free_in_bytes": float64(16909316096),
|
||||
"data_0_available_in_bytes": float64(15894814720),
|
||||
"timestamp": float64(1436460392946),
|
||||
"total_free_in_bytes": float64(16909316096),
|
||||
"total_available_in_bytes": float64(15894814720),
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var transportExpected = map[string]interface{}{
|
||||
"server_open": float64(13),
|
||||
"rx_count": float64(6),
|
||||
"rx_size_in_bytes": float64(1380),
|
||||
"tx_count": float64(6),
|
||||
"tx_size_in_bytes": float64(1380),
|
||||
}
|
||||
|
||||
var httpExpected = map[string]interface{}{
|
||||
"current_open": float64(3),
|
||||
"total_opened": float64(3),
|
||||
}
|
||||
|
||||
var breakersExpected = map[string]interface{}{
|
||||
"fielddata_estimated_size_in_bytes": float64(0),
|
||||
"fielddata_overhead": float64(1.03),
|
||||
"fielddata_tripped": float64(0),
|
||||
"fielddata_limit_size_in_bytes": float64(623326003),
|
||||
"request_estimated_size_in_bytes": float64(0),
|
||||
"request_overhead": float64(1.0),
|
||||
"request_tripped": float64(0),
|
||||
"request_limit_size_in_bytes": float64(415550668),
|
||||
"parent_overhead": float64(1.0),
|
||||
"parent_tripped": float64(0),
|
||||
"parent_limit_size_in_bytes": float64(727213670),
|
||||
"parent_estimated_size_in_bytes": float64(0),
|
||||
}
|
||||
180
plugins/inputs/exec/README.md
Normal file
180
plugins/inputs/exec/README.md
Normal file
@@ -0,0 +1,180 @@
|
||||
# Exec Input Plugin
|
||||
|
||||
The exec plugin can execute arbitrary commands which output:
|
||||
|
||||
* JSON
|
||||
* InfluxDB [line-protocol](https://docs.influxdata.com/influxdb/v0.9/write_protocols/line/)
|
||||
* Graphite [graphite-protocol](http://graphite.readthedocs.org/en/latest/feeding-carbon.html)
|
||||
|
||||
> Graphite understands messages with this format:
|
||||
|
||||
> ```
|
||||
metric_path value timestamp\n
|
||||
```
|
||||
|
||||
> __metric_path__ is the metric namespace that you want to populate.
|
||||
|
||||
> __value__ is the value that you want to assign to the metric at this time.
|
||||
|
||||
> __timestamp__ is the unix epoch time.
|
||||
|
||||
|
||||
If using JSON, only numeric values are parsed and turned into floats. Booleans
|
||||
and strings will be ignored.
|
||||
|
||||
### Configuration
|
||||
|
||||
```
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
commands = ["/tmp/test.sh", "/tmp/test2.sh"]
|
||||
|
||||
# Data format to consume. This can be "json", "influx" or "graphite" (line-protocol)
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "json"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Below configuration will be used for data_format = "graphite", can be ignored for other data_format
|
||||
## If matching multiple measurement files, this string will be used to join the matched values.
|
||||
#separator = "."
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. The can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
#templates = [
|
||||
# "*.app env.service.resource.measurement",
|
||||
# "stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
# "stats2.* .host.measurement.field",
|
||||
# "measurement*"
|
||||
#]
|
||||
```
|
||||
|
||||
Other options for modifying the measurement names are:
|
||||
|
||||
```
|
||||
name_prefix = "prefix_"
|
||||
```
|
||||
|
||||
### Example 1
|
||||
|
||||
Let's say that we have the above configuration, and mycollector outputs the
|
||||
following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": 0.1,
|
||||
"d": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be stored as fields under the measurement
|
||||
"exec_mycollector":
|
||||
|
||||
```
|
||||
exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567
|
||||
```
|
||||
|
||||
### Example 2
|
||||
|
||||
Now let's say we have the following configuration:
|
||||
|
||||
```
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
# compatible with old version
|
||||
# we can still use the old command configuration
|
||||
# command = "/usr/bin/line_protocol_collector"
|
||||
commands = ["/usr/bin/line_protocol_collector","/tmp/test2.sh"]
|
||||
|
||||
# Data format to consume. This can be "json" or "influx" (line-protocol)
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
And line_protocol_collector outputs the following line protocol:
|
||||
|
||||
```
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
```
|
||||
|
||||
You will get data in InfluxDB exactly as it is defined above,
|
||||
tags are cpu=cpuN, host=foo, and datacenter=us-east with fields usage_idle
|
||||
and usage_busy. They will receive a timestamp at collection time.
|
||||
|
||||
|
||||
### Example 3
|
||||
|
||||
We can also change the data_format to "graphite" to use the metrics collecting scripts such as (compatible with graphite):
|
||||
|
||||
* Nagios [Mertics Plugins] (https://exchange.nagios.org/directory/Plugins)
|
||||
* Sensu [Mertics Plugins] (https://github.com/sensu-plugins)
|
||||
|
||||
#### Configuration
|
||||
```
|
||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||
[[inputs.exec]]
|
||||
# Shell/commands array
|
||||
commands = ["/tmp/test.sh","/tmp/test2.sh"]
|
||||
|
||||
# Data format to consume. This can be "json", "influx" or "graphite" (line-protocol)
|
||||
# NOTE json only reads numerical measurements, strings and booleans are ignored.
|
||||
data_format = "graphite"
|
||||
|
||||
# measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Below configuration will be used for data_format = "graphite", can be ignored for other data_format
|
||||
## If matching multiple measurement files, this string will be used to join the matched values.
|
||||
separator = "."
|
||||
|
||||
## Each template line requires a template pattern. It can have an optional
|
||||
## filter before the template and separated by spaces. It can also have optional extra
|
||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||
## similar to the line protocol format. The can be only one default template.
|
||||
## Templates support below format:
|
||||
## 1. filter + template
|
||||
## 2. filter + template + extra tag
|
||||
## 3. filter + template with field key
|
||||
## 4. default template
|
||||
templates = [
|
||||
"*.app env.service.resource.measurement",
|
||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||
"stats2.* .host.measurement.field",
|
||||
"measurement*"
|
||||
]
|
||||
```
|
||||
|
||||
And test.sh/test2.sh will output:
|
||||
|
||||
```
|
||||
sensu.metric.net.server0.eth0.rx_packets 461295119435 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_bytes 1093086493388480 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_bytes 1015633926034834 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_errors 0 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_errors 0 1444234982
|
||||
sensu.metric.net.server0.eth0.tx_dropped 0 1444234982
|
||||
sensu.metric.net.server0.eth0.rx_dropped 0 1444234982
|
||||
```
|
||||
|
||||
The templates configuration will be used to parse the graphite metrics to support influxdb/opentsdb tagging store engines.
|
||||
|
||||
More detail information about templates, please refer to [The graphite Input] (https://github.com/influxdata/influxdb/blob/master/services/graphite/README.md)
|
||||
|
||||
133
plugins/inputs/exec/exec.go
Normal file
133
plugins/inputs/exec/exec.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
## Commands array
|
||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||
|
||||
## measurement name suffix (for separating different commands)
|
||||
name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
type Exec struct {
|
||||
Commands []string
|
||||
Command string
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
runner Runner
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
func NewExec() *Exec {
|
||||
return &Exec{
|
||||
runner: CommandRunner{},
|
||||
}
|
||||
}
|
||||
|
||||
type Runner interface {
|
||||
Run(*Exec, string) ([]byte, error)
|
||||
}
|
||||
|
||||
type CommandRunner struct{}
|
||||
|
||||
func (c CommandRunner) Run(e *Exec, command string) ([]byte, error) {
|
||||
split_cmd, err := shellquote.Split(command)
|
||||
if err != nil || len(split_cmd) == 0 {
|
||||
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command)
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator) {
|
||||
defer e.wg.Done()
|
||||
|
||||
out, err := e.runner.Run(e, command)
|
||||
if err != nil {
|
||||
e.errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
metrics, err := e.parser.Parse(out)
|
||||
if err != nil {
|
||||
e.errChan <- err
|
||||
} else {
|
||||
for _, metric := range metrics {
|
||||
acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Exec) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (e *Exec) Description() string {
|
||||
return "Read metrics from one or more commands that can output to stdout"
|
||||
}
|
||||
|
||||
func (e *Exec) SetParser(parser parsers.Parser) {
|
||||
e.parser = parser
|
||||
}
|
||||
|
||||
func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
||||
// Legacy single command support
|
||||
if e.Command != "" {
|
||||
e.Commands = append(e.Commands, e.Command)
|
||||
e.Command = ""
|
||||
}
|
||||
|
||||
e.errChan = make(chan error, len(e.Commands))
|
||||
|
||||
e.wg.Add(len(e.Commands))
|
||||
for _, command := range e.Commands {
|
||||
go e.ProcessCommand(command, acc)
|
||||
}
|
||||
e.wg.Wait()
|
||||
|
||||
select {
|
||||
default:
|
||||
close(e.errChan)
|
||||
return nil
|
||||
case err := <-e.errChan:
|
||||
close(e.errChan)
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("exec", func() telegraf.Input {
|
||||
return NewExec()
|
||||
})
|
||||
}
|
||||
170
plugins/inputs/exec/exec_test.go
Normal file
170
plugins/inputs/exec/exec_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package exec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Midnight 9/22/2015
|
||||
const baseTimeSeconds = 1442905200
|
||||
|
||||
const validJson = `
|
||||
{
|
||||
"status": "green",
|
||||
"num_processes": 82,
|
||||
"cpu": {
|
||||
"status": "red",
|
||||
"nil_status": null,
|
||||
"used": 8234,
|
||||
"free": 32
|
||||
},
|
||||
"percent": 0.81,
|
||||
"users": [0, 1, 2, 3]
|
||||
}`
|
||||
|
||||
const malformedJson = `
|
||||
{
|
||||
"status": "green",
|
||||
`
|
||||
|
||||
const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1"
|
||||
|
||||
const lineProtocolMulti = `
|
||||
cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
|
||||
`
|
||||
|
||||
type runnerMock struct {
|
||||
out []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newRunnerMock(out []byte, err error) Runner {
|
||||
return &runnerMock{
|
||||
out: out,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (r runnerMock) Run(e *Exec, command string) ([]byte, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
return r.out, nil
|
||||
}
|
||||
|
||||
func TestExec(t *testing.T) {
|
||||
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(validJson), nil),
|
||||
Commands: []string{"testcommand arg1"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"num_processes": float64(82),
|
||||
"cpu_used": float64(8234),
|
||||
"cpu_free": float64(32),
|
||||
"percent": float64(0.81),
|
||||
"users_0": float64(0),
|
||||
"users_1": float64(1),
|
||||
"users_2": float64(2),
|
||||
"users_3": float64(3),
|
||||
}
|
||||
acc.AssertContainsFields(t, "exec", fields)
|
||||
}
|
||||
|
||||
func TestExecMalformed(t *testing.T) {
|
||||
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(malformedJson), nil),
|
||||
Commands: []string{"badcommand arg1"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestCommandError(t *testing.T) {
|
||||
parser, _ := parsers.NewJSONParser("exec", []string{}, nil)
|
||||
e := &Exec{
|
||||
runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")),
|
||||
Commands: []string{"badcommand"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, acc.NFields(), 0, "No new points should have been added")
|
||||
}
|
||||
|
||||
func TestLineProtocolParse(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocol), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
|
||||
func TestLineProtocolParseMultiple(t *testing.T) {
|
||||
parser, _ := parsers.NewInfluxParser()
|
||||
e := &Exec{
|
||||
runner: newRunnerMock([]byte(lineProtocolMulti), nil),
|
||||
Commands: []string{"line-protocol"},
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := e.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage_idle": float64(99),
|
||||
"usage_busy": float64(1),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "foo",
|
||||
"datacenter": "us-east",
|
||||
}
|
||||
cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"}
|
||||
|
||||
for _, cpu := range cpuTags {
|
||||
tags["cpu"] = cpu
|
||||
acc.AssertContainsTaggedFields(t, "cpu", fields, tags)
|
||||
}
|
||||
}
|
||||
369
plugins/inputs/github_webhooks/README.md
Normal file
369
plugins/inputs/github_webhooks/README.md
Normal file
@@ -0,0 +1,369 @@
|
||||
# github_webhooks
|
||||
|
||||
This is a Telegraf service plugin that listens for events kicked off by Github's Webhooks service and persists data from them into configured outputs. To set up the listener first generate the proper configuration:
|
||||
```sh
|
||||
$ telegraf -sample-config -input-filter github_webhooks -output-filter influxdb > config.conf.new
|
||||
```
|
||||
Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete:
|
||||
```sh
|
||||
$ cp config.conf.new /etc/telegraf/telegraf.conf
|
||||
$ sudo service telegraf start
|
||||
```
|
||||
Once the server is running you should configure your Organization's Webhooks to point at the `github_webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://<my_ip>:1618`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me <b>everything</b>'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
|
||||
|
||||
## Events
|
||||
|
||||
The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows:
|
||||
```
|
||||
# TAGS
|
||||
* 'tagKey' = `tagValue` type
|
||||
# FIELDS
|
||||
* 'fieldKey' = `fieldValue` type
|
||||
```
|
||||
The tag values and field values show the place on the incoming JSON object where the data is sourced from.
|
||||
|
||||
#### [`commit_comment` event](https://developer.github.com/v3/activity/events/types/#commitcommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.comment.commit_id` string
|
||||
* 'comment' = `event.comment.body` string
|
||||
|
||||
#### [`create` event](https://developer.github.com/v3/activity/events/types/#createevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'issues' = `event.ref_type` string
|
||||
|
||||
#### [`delete` event](https://developer.github.com/v3/activity/events/types/#deleteevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'issues' = `event.ref_type` string
|
||||
|
||||
#### [`deployment` event](https://developer.github.com/v3/activity/events/types/#deploymentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.deployment.sha` string
|
||||
* 'task' = `event.deployment.task` string
|
||||
* 'environment' = `event.deployment.evnironment` string
|
||||
* 'description' = `event.deployment.description` string
|
||||
|
||||
#### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.deployment.sha` string
|
||||
* 'task' = `event.deployment.task` string
|
||||
* 'environment' = `event.deployment.evnironment` string
|
||||
* 'description' = `event.deployment.description` string
|
||||
* 'depState' = `event.deployment_status.state` string
|
||||
* 'depDescription' = `event.deployment_status.description` string
|
||||
|
||||
#### [`fork` event](https://developer.github.com/v3/activity/events/types/#forkevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'forkee' = `event.forkee.repository` string
|
||||
|
||||
#### [`gollum` event](https://developer.github.com/v3/activity/events/types/#gollumevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`issue_comment` event](https://developer.github.com/v3/activity/events/types/#issuecommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'issue' = `event.issue.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'title' = `event.issue.title` string
|
||||
* 'comments' = `event.issue.comments` int
|
||||
* 'body' = `event.comment.body` string
|
||||
|
||||
#### [`issues` event](https://developer.github.com/v3/activity/events/types/#issuesevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'issue' = `event.issue.number` int
|
||||
* 'action' = `event.action` string
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'title' = `event.issue.title` string
|
||||
* 'comments' = `event.issue.comments` int
|
||||
|
||||
#### [`member` event](https://developer.github.com/v3/activity/events/types/#memberevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'newMember' = `event.sender.login` string
|
||||
* 'newMemberStatus' = `event.sender.site_admin` bool
|
||||
|
||||
#### [`membership` event](https://developer.github.com/v3/activity/events/types/#membershipevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'action' = `event.action` string
|
||||
|
||||
**Fields:**
|
||||
* 'newMember' = `event.sender.login` string
|
||||
* 'newMemberStatus' = `event.sender.site_admin` bool
|
||||
|
||||
#### [`page_build` event](https://developer.github.com/v3/activity/events/types/#pagebuildevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`public` event](https://developer.github.com/v3/activity/events/types/#publicevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`pull_request_review_comment` event](https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'action' = `event.action` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'prNumber' = `event.pull_request.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'state' = `event.pull_request.state` string
|
||||
* 'title' = `event.pull_request.title` string
|
||||
* 'comments' = `event.pull_request.comments` int
|
||||
* 'commits' = `event.pull_request.commits` int
|
||||
* 'additions' = `event.pull_request.additions` int
|
||||
* 'deletions' = `event.pull_request.deletions` int
|
||||
* 'changedFiles' = `event.pull_request.changed_files` int
|
||||
* 'commentFile' = `event.comment.file` string
|
||||
* 'comment' = `event.comment.body` string
|
||||
|
||||
#### [`pull_request` event](https://developer.github.com/v3/activity/events/types/#pullrequestevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'action' = `event.action` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
* 'prNumber' = `event.pull_request.number` int
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'state' = `event.pull_request.state` string
|
||||
* 'title' = `event.pull_request.title` string
|
||||
* 'comments' = `event.pull_request.comments` int
|
||||
* 'commits' = `event.pull_request.commits` int
|
||||
* 'additions' = `event.pull_request.additions` int
|
||||
* 'deletions' = `event.pull_request.deletions` int
|
||||
* 'changedFiles' = `event.pull_request.changed_files` int
|
||||
|
||||
#### [`push` event](https://developer.github.com/v3/activity/events/types/#pushevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'ref' = `event.ref` string
|
||||
* 'before' = `event.before` string
|
||||
* 'after' = `event.after` string
|
||||
|
||||
#### [`repository` event](https://developer.github.com/v3/activity/events/types/#repositoryevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
|
||||
#### [`release` event](https://developer.github.com/v3/activity/events/types/#releaseevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'tagName' = `event.release.tag_name` string
|
||||
|
||||
#### [`status` event](https://developer.github.com/v3/activity/events/types/#statusevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'commit' = `event.sha` string
|
||||
* 'state' = `event.state` string
|
||||
|
||||
#### [`team_add` event](https://developer.github.com/v3/activity/events/types/#teamaddevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
* 'teamName' = `event.team.name` string
|
||||
|
||||
#### [`watch` event](https://developer.github.com/v3/activity/events/types/#watchevent)
|
||||
|
||||
**Tags:**
|
||||
* 'event' = `headers[X-Github-Event]` string
|
||||
* 'repository' = `event.repository.full_name` string
|
||||
* 'private' = `event.repository.private` bool
|
||||
* 'user' = `event.sender.login` string
|
||||
* 'admin' = `event.sender.site_admin` bool
|
||||
|
||||
**Fields:**
|
||||
* 'stars' = `event.repository.stargazers_count` int
|
||||
* 'forks' = `event.repository.forks_count` int
|
||||
* 'issues' = `event.repository.open_issues_count` int
|
||||
335
plugins/inputs/github_webhooks/github_webhooks.go
Normal file
335
plugins/inputs/github_webhooks/github_webhooks.go
Normal file
@@ -0,0 +1,335 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("github_webhooks", func() telegraf.Input { return &GithubWebhooks{} })
|
||||
}
|
||||
|
||||
type GithubWebhooks struct {
|
||||
ServiceAddress string
|
||||
// Lock for the struct
|
||||
sync.Mutex
|
||||
// Events buffer to store events between Gather calls
|
||||
events []Event
|
||||
}
|
||||
|
||||
func NewGithubWebhooks() *GithubWebhooks {
|
||||
return &GithubWebhooks{}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) SampleConfig() string {
|
||||
return `
|
||||
## Address and port to host Webhook listener on
|
||||
service_address = ":1618"
|
||||
`
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Description() string {
|
||||
return "A Github Webhook Event collector"
|
||||
}
|
||||
|
||||
// Writes the points from <-gh.in to the Accumulator
|
||||
func (gh *GithubWebhooks) Gather(acc telegraf.Accumulator) error {
|
||||
gh.Lock()
|
||||
defer gh.Unlock()
|
||||
for _, event := range gh.events {
|
||||
p := event.NewMetric()
|
||||
acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
|
||||
}
|
||||
gh.events = make([]Event, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Listen() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", gh.eventHandler).Methods("POST")
|
||||
err := http.ListenAndServe(fmt.Sprintf("%s", gh.ServiceAddress), r)
|
||||
if err != nil {
|
||||
log.Printf("Error starting server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Start(_ telegraf.Accumulator) error {
|
||||
go gh.Listen()
|
||||
log.Printf("Started the github_webhooks service on %s\n", gh.ServiceAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gh *GithubWebhooks) Stop() {
|
||||
log.Println("Stopping the ghWebhooks service")
|
||||
}
|
||||
|
||||
// Handles the / route
|
||||
func (gh *GithubWebhooks) eventHandler(w http.ResponseWriter, r *http.Request) {
|
||||
eventType := r.Header["X-Github-Event"][0]
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
e, err := NewEvent(data, eventType)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
gh.Lock()
|
||||
gh.events = append(gh.events, e)
|
||||
gh.Unlock()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func newCommitComment(data []byte) (Event, error) {
|
||||
commitCommentStruct := CommitCommentEvent{}
|
||||
err := json.Unmarshal(data, &commitCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return commitCommentStruct, nil
|
||||
}
|
||||
|
||||
func newCreate(data []byte) (Event, error) {
|
||||
createStruct := CreateEvent{}
|
||||
err := json.Unmarshal(data, &createStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return createStruct, nil
|
||||
}
|
||||
|
||||
func newDelete(data []byte) (Event, error) {
|
||||
deleteStruct := DeleteEvent{}
|
||||
err := json.Unmarshal(data, &deleteStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deleteStruct, nil
|
||||
}
|
||||
|
||||
func newDeployment(data []byte) (Event, error) {
|
||||
deploymentStruct := DeploymentEvent{}
|
||||
err := json.Unmarshal(data, &deploymentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deploymentStruct, nil
|
||||
}
|
||||
|
||||
func newDeploymentStatus(data []byte) (Event, error) {
|
||||
deploymentStatusStruct := DeploymentStatusEvent{}
|
||||
err := json.Unmarshal(data, &deploymentStatusStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deploymentStatusStruct, nil
|
||||
}
|
||||
|
||||
func newFork(data []byte) (Event, error) {
|
||||
forkStruct := ForkEvent{}
|
||||
err := json.Unmarshal(data, &forkStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return forkStruct, nil
|
||||
}
|
||||
|
||||
func newGollum(data []byte) (Event, error) {
|
||||
gollumStruct := GollumEvent{}
|
||||
err := json.Unmarshal(data, &gollumStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gollumStruct, nil
|
||||
}
|
||||
|
||||
func newIssueComment(data []byte) (Event, error) {
|
||||
issueCommentStruct := IssueCommentEvent{}
|
||||
err := json.Unmarshal(data, &issueCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return issueCommentStruct, nil
|
||||
}
|
||||
|
||||
func newIssues(data []byte) (Event, error) {
|
||||
issuesStruct := IssuesEvent{}
|
||||
err := json.Unmarshal(data, &issuesStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return issuesStruct, nil
|
||||
}
|
||||
|
||||
func newMember(data []byte) (Event, error) {
|
||||
memberStruct := MemberEvent{}
|
||||
err := json.Unmarshal(data, &memberStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return memberStruct, nil
|
||||
}
|
||||
|
||||
func newMembership(data []byte) (Event, error) {
|
||||
membershipStruct := MembershipEvent{}
|
||||
err := json.Unmarshal(data, &membershipStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return membershipStruct, nil
|
||||
}
|
||||
|
||||
func newPageBuild(data []byte) (Event, error) {
|
||||
pageBuildEvent := PageBuildEvent{}
|
||||
err := json.Unmarshal(data, &pageBuildEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pageBuildEvent, nil
|
||||
}
|
||||
|
||||
func newPublic(data []byte) (Event, error) {
|
||||
publicEvent := PublicEvent{}
|
||||
err := json.Unmarshal(data, &publicEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return publicEvent, nil
|
||||
}
|
||||
|
||||
func newPullRequest(data []byte) (Event, error) {
|
||||
pullRequestStruct := PullRequestEvent{}
|
||||
err := json.Unmarshal(data, &pullRequestStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pullRequestStruct, nil
|
||||
}
|
||||
|
||||
func newPullRequestReviewComment(data []byte) (Event, error) {
|
||||
pullRequestReviewCommentStruct := PullRequestReviewCommentEvent{}
|
||||
err := json.Unmarshal(data, &pullRequestReviewCommentStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pullRequestReviewCommentStruct, nil
|
||||
}
|
||||
|
||||
func newPush(data []byte) (Event, error) {
|
||||
pushStruct := PushEvent{}
|
||||
err := json.Unmarshal(data, &pushStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pushStruct, nil
|
||||
}
|
||||
|
||||
func newRelease(data []byte) (Event, error) {
|
||||
releaseStruct := ReleaseEvent{}
|
||||
err := json.Unmarshal(data, &releaseStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return releaseStruct, nil
|
||||
}
|
||||
|
||||
func newRepository(data []byte) (Event, error) {
|
||||
repositoryStruct := RepositoryEvent{}
|
||||
err := json.Unmarshal(data, &repositoryStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return repositoryStruct, nil
|
||||
}
|
||||
|
||||
func newStatus(data []byte) (Event, error) {
|
||||
statusStruct := StatusEvent{}
|
||||
err := json.Unmarshal(data, &statusStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statusStruct, nil
|
||||
}
|
||||
|
||||
func newTeamAdd(data []byte) (Event, error) {
|
||||
teamAddStruct := TeamAddEvent{}
|
||||
err := json.Unmarshal(data, &teamAddStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return teamAddStruct, nil
|
||||
}
|
||||
|
||||
func newWatch(data []byte) (Event, error) {
|
||||
watchStruct := WatchEvent{}
|
||||
err := json.Unmarshal(data, &watchStruct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watchStruct, nil
|
||||
}
|
||||
|
||||
type newEventError struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func (e *newEventError) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
func NewEvent(r []byte, t string) (Event, error) {
|
||||
log.Printf("New %v event recieved", t)
|
||||
switch t {
|
||||
case "commit_comment":
|
||||
return newCommitComment(r)
|
||||
case "create":
|
||||
return newCreate(r)
|
||||
case "delete":
|
||||
return newDelete(r)
|
||||
case "deployment":
|
||||
return newDeployment(r)
|
||||
case "deployment_status":
|
||||
return newDeploymentStatus(r)
|
||||
case "fork":
|
||||
return newFork(r)
|
||||
case "gollum":
|
||||
return newGollum(r)
|
||||
case "issue_comment":
|
||||
return newIssueComment(r)
|
||||
case "issues":
|
||||
return newIssues(r)
|
||||
case "member":
|
||||
return newMember(r)
|
||||
case "membership":
|
||||
return newMembership(r)
|
||||
case "page_build":
|
||||
return newPageBuild(r)
|
||||
case "public":
|
||||
return newPublic(r)
|
||||
case "pull_request":
|
||||
return newPullRequest(r)
|
||||
case "pull_request_review_comment":
|
||||
return newPullRequestReviewComment(r)
|
||||
case "push":
|
||||
return newPush(r)
|
||||
case "release":
|
||||
return newRelease(r)
|
||||
case "repository":
|
||||
return newRepository(r)
|
||||
case "status":
|
||||
return newStatus(r)
|
||||
case "team_add":
|
||||
return newTeamAdd(r)
|
||||
case "watch":
|
||||
return newWatch(r)
|
||||
}
|
||||
return nil, &newEventError{"Not a recgonized event type"}
|
||||
}
|
||||
3559
plugins/inputs/github_webhooks/github_webhooks_mock_json.go
Normal file
3559
plugins/inputs/github_webhooks/github_webhooks_mock_json.go
Normal file
File diff suppressed because it is too large
Load Diff
711
plugins/inputs/github_webhooks/github_webhooks_models.go
Normal file
711
plugins/inputs/github_webhooks/github_webhooks_models.go
Normal file
@@ -0,0 +1,711 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const meas = "github_webhooks"
|
||||
|
||||
type Event interface {
|
||||
NewMetric() telegraf.Metric
|
||||
}
|
||||
|
||||
type Repository struct {
|
||||
Repository string `json:"full_name"`
|
||||
Private bool `json:"private"`
|
||||
Stars int `json:"stargazers_count"`
|
||||
Forks int `json:"forks_count"`
|
||||
Issues int `json:"open_issues_count"`
|
||||
}
|
||||
|
||||
type Sender struct {
|
||||
User string `json:"login"`
|
||||
Admin bool `json:"site_admin"`
|
||||
}
|
||||
|
||||
type CommitComment struct {
|
||||
Commit string `json:"commit_id"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
type Deployment struct {
|
||||
Commit string `json:"sha"`
|
||||
Task string `json:"task"`
|
||||
Environment string `json:"environment"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type Page struct {
|
||||
Name string `json:"page_name"`
|
||||
Title string `json:"title"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
type Issue struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
Comments int `json:"comments"`
|
||||
}
|
||||
|
||||
type IssueComment struct {
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
type Team struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type PullRequest struct {
|
||||
Number int `json:"number"`
|
||||
State string `json:"state"`
|
||||
Title string `json:"title"`
|
||||
Comments int `json:"comments"`
|
||||
Commits int `json:"commits"`
|
||||
Additions int `json:"additions"`
|
||||
Deletions int `json:"deletions"`
|
||||
ChangedFiles int `json:"changed_files"`
|
||||
}
|
||||
|
||||
type PullRequestReviewComment struct {
|
||||
File string `json:"path"`
|
||||
Comment string `json:"body"`
|
||||
}
|
||||
|
||||
type Release struct {
|
||||
TagName string `json:"tag_name"`
|
||||
}
|
||||
|
||||
type DeploymentStatus struct {
|
||||
State string `json:"state"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CommitCommentEvent struct {
|
||||
Comment CommitComment `json:"comment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CommitCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "commit_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Comment.Commit,
|
||||
"comment": s.Comment.Body,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type CreateEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
RefType string `json:"ref_type"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CreateEvent) NewMetric() telegraf.Metric {
|
||||
event := "create"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type DeleteEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
RefType string `json:"ref_type"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeleteEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type DeploymentEvent struct {
|
||||
Deployment Deployment `json:"deployment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentEvent) NewMetric() telegraf.Metric {
|
||||
event := "deployment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Deployment.Commit,
|
||||
"task": s.Deployment.Task,
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type DeploymentStatusEvent struct {
|
||||
Deployment Deployment `json:"deployment"`
|
||||
DeploymentStatus DeploymentStatus `json:"deployment_status"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentStatusEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Deployment.Commit,
|
||||
"task": s.Deployment.Task,
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
"depState": s.DeploymentStatus.State,
|
||||
"depDescription": s.DeploymentStatus.Description,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type ForkEvent struct {
|
||||
Forkee Repository `json:"forkee"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ForkEvent) NewMetric() telegraf.Metric {
|
||||
event := "fork"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"fork": s.Forkee.Repository,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type GollumEvent struct {
|
||||
Pages []Page `json:"pages"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
// REVIEW: Going to be lazy and not deal with the pages.
|
||||
func (s GollumEvent) NewMetric() telegraf.Metric {
|
||||
event := "gollum"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type IssueCommentEvent struct {
|
||||
Issue Issue `json:"issue"`
|
||||
Comment IssueComment `json:"comment"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssueCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "issue_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"issue": fmt.Sprintf("%v", s.Issue.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
"body": s.Comment.Body,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type IssuesEvent struct {
|
||||
Action string `json:"action"`
|
||||
Issue Issue `json:"issue"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssuesEvent) NewMetric() telegraf.Metric {
|
||||
event := "issue"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"issue": fmt.Sprintf("%v", s.Issue.Number),
|
||||
"action": s.Action,
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type MemberEvent struct {
|
||||
Member Sender `json:"member"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s MemberEvent) NewMetric() telegraf.Metric {
|
||||
event := "member"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type MembershipEvent struct {
|
||||
Action string `json:"action"`
|
||||
Member Sender `json:"member"`
|
||||
Sender Sender `json:"sender"`
|
||||
Team Team `json:"team"`
|
||||
}
|
||||
|
||||
func (s MembershipEvent) NewMetric() telegraf.Metric {
|
||||
event := "membership"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"action": s.Action,
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PageBuildEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PageBuildEvent) NewMetric() telegraf.Metric {
|
||||
event := "page_build"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PublicEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PublicEvent) NewMetric() telegraf.Metric {
|
||||
event := "public"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PullRequestEvent struct {
|
||||
Action string `json:"action"`
|
||||
PullRequest PullRequest `json:"pull_request"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestEvent) NewMetric() telegraf.Metric {
|
||||
event := "pull_request"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"action": s.Action,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"state": s.PullRequest.State,
|
||||
"title": s.PullRequest.Title,
|
||||
"comments": s.PullRequest.Comments,
|
||||
"commits": s.PullRequest.Commits,
|
||||
"additions": s.PullRequest.Additions,
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PullRequestReviewCommentEvent struct {
|
||||
Comment PullRequestReviewComment `json:"comment"`
|
||||
PullRequest PullRequest `json:"pull_request"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "pull_request_review_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
"prNumber": fmt.Sprintf("%v", s.PullRequest.Number),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"state": s.PullRequest.State,
|
||||
"title": s.PullRequest.Title,
|
||||
"comments": s.PullRequest.Comments,
|
||||
"commits": s.PullRequest.Commits,
|
||||
"additions": s.PullRequest.Additions,
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
"commentFile": s.Comment.File,
|
||||
"comment": s.Comment.Comment,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type PushEvent struct {
|
||||
Ref string `json:"ref"`
|
||||
Before string `json:"before"`
|
||||
After string `json:"after"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PushEvent) NewMetric() telegraf.Metric {
|
||||
event := "push"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"ref": s.Ref,
|
||||
"before": s.Before,
|
||||
"after": s.After,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type ReleaseEvent struct {
|
||||
Release Release `json:"release"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ReleaseEvent) NewMetric() telegraf.Metric {
|
||||
event := "release"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"tagName": s.Release.TagName,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type RepositoryEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s RepositoryEvent) NewMetric() telegraf.Metric {
|
||||
event := "repository"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type StatusEvent struct {
|
||||
Commit string `json:"sha"`
|
||||
State string `json:"state"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s StatusEvent) NewMetric() telegraf.Metric {
|
||||
event := "status"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"commit": s.Commit,
|
||||
"state": s.State,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type TeamAddEvent struct {
|
||||
Team Team `json:"team"`
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s TeamAddEvent) NewMetric() telegraf.Metric {
|
||||
event := "team_add"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
"teamName": s.Team.Name,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type WatchEvent struct {
|
||||
Repository Repository `json:"repository"`
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s WatchEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
"repository": s.Repository.Repository,
|
||||
"private": fmt.Sprintf("%v", s.Repository.Private),
|
||||
"user": s.Sender.User,
|
||||
"admin": fmt.Sprintf("%v", s.Sender.Admin),
|
||||
}
|
||||
f := map[string]interface{}{
|
||||
"stars": s.Repository.Stars,
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return m
|
||||
}
|
||||
237
plugins/inputs/github_webhooks/github_webhooks_test.go
Normal file
237
plugins/inputs/github_webhooks/github_webhooks_test.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package github_webhooks
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCommitCommentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := CommitCommentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "commit_comment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := DeleteEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "delete")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := DeploymentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "deployment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentStatusEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := DeploymentStatusEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "deployment_status")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := ForkEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "fork")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGollumEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := GollumEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "gollum")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssueCommentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := IssueCommentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "issue_comment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssuesEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := IssuesEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "issues")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := MemberEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "member")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMembershipEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := MembershipEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "membership")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPageBuildEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PageBuildEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "page_build")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublicEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PublicEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "public")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullRequestReviewCommentEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PullRequestReviewCommentEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "pull_request_review_comment")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPushEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := PushEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "push")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReleaseEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := ReleaseEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "release")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepositoryEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := RepositoryEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "repository")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
|
||||
jsonString := StatusEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "status")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTeamAddEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := TeamAddEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "team_add")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchEvent(t *testing.T) {
|
||||
gh := NewGithubWebhooks()
|
||||
jsonString := WatchEventJSON()
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(jsonString))
|
||||
req.Header.Add("X-Github-Event", "watch")
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("POST commit_comment returned HTTP status code %v.\nExpected %v", w.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
365
plugins/inputs/haproxy/haproxy.go
Normal file
365
plugins/inputs/haproxy/haproxy.go
Normal file
@@ -0,0 +1,365 @@
|
||||
package haproxy
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
|
||||
const (
|
||||
HF_PXNAME = 0 // 0. pxname [LFBS]: proxy name
|
||||
HF_SVNAME = 1 // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
|
||||
HF_QCUR = 2 //2. qcur [..BS]: current queued requests. For the backend this reports the number queued without a server assigned.
|
||||
HF_QMAX = 3 //3. qmax [..BS]: max value of qcur
|
||||
HF_SCUR = 4 // 4. scur [LFBS]: current sessions
|
||||
HF_SMAX = 5 //5. smax [LFBS]: max sessions
|
||||
HF_SLIM = 6 //6. slim [LFBS]: configured session limit
|
||||
HF_STOT = 7 //7. stot [LFBS]: cumulative number of connections
|
||||
HF_BIN = 8 //8. bin [LFBS]: bytes in
|
||||
HF_BOUT = 9 //9. bout [LFBS]: bytes out
|
||||
HF_DREQ = 10 //10. dreq [LFB.]: requests denied because of security concerns.
|
||||
HF_DRESP = 11 //11. dresp [LFBS]: responses denied because of security concerns.
|
||||
HF_EREQ = 12 //12. ereq [LF..]: request errors. Some of the possible causes are:
|
||||
HF_ECON = 13 //13. econ [..BS]: number of requests that encountered an error trying to
|
||||
HF_ERESP = 14 //14. eresp [..BS]: response errors. srv_abrt will be counted here also. Some other errors are: - write error on the client socket (won't be counted for the server stat) - failure applying filters to the response.
|
||||
HF_WRETR = 15 //15. wretr [..BS]: number of times a connection to a server was retried.
|
||||
HF_WREDIS = 16 //16. wredis [..BS]: number of times a request was redispatched to another server. The server value counts the number of times that server was switched away from.
|
||||
HF_STATUS = 17 //17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...)
|
||||
HF_WEIGHT = 18 //18. weight [..BS]: total weight (backend), server weight (server)
|
||||
HF_ACT = 19 //19. act [..BS]: number of active servers (backend), server is active (server)
|
||||
HF_BCK = 20 //20. bck [..BS]: number of backup servers (backend), server is backup (server)
|
||||
HF_CHKFAIL = 21 //21. chkfail [...S]: number of failed checks. (Only counts checks failed when the server is up.)
|
||||
HF_CHKDOWN = 22 //22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts transitions to the whole backend being down, rather than the sum of the counters for each server.
|
||||
HF_LASTCHG = 23 //23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition
|
||||
HF_DOWNTIME = 24 //24. downtime [..BS]: total downtime (in seconds). The value for the backend is the downtime for the whole backend, not the sum of the server downtime.
|
||||
HF_QLIMIT = 25 //25. qlimit [...S]: configured maxqueue for the server, or nothing in the value is 0 (default, meaning no limit)
|
||||
HF_PID = 26 //26. pid [LFBS]: process id (0 for first instance, 1 for second, ...)
|
||||
HF_IID = 27 //27. iid [LFBS]: unique proxy id
|
||||
HF_SID = 28 //28. sid [L..S]: server id (unique inside a proxy)
|
||||
HF_THROTTLE = 29 //29. throttle [...S]: current throttle percentage for the server, when slowstart is active, or no value if not in slowstart.
|
||||
HF_LBTOT = 30 //30. lbtot [..BS]: total number of times a server was selected, either for new sessions, or when re-dispatching. The server counter is the number of times that server was selected.
|
||||
HF_TRACKED = 31 //31. tracked [...S]: id of proxy/server if tracking is enabled.
|
||||
HF_TYPE = 32 //32. type [LFBS]: (0 = frontend, 1 = backend, 2 = server, 3 = socket/listener)
|
||||
HF_RATE = 33 //33. rate [.FBS]: number of sessions per second over last elapsed second
|
||||
HF_RATE_LIM = 34 //34. rate_lim [.F..]: configured limit on new sessions per second
|
||||
HF_RATE_MAX = 35 //35. rate_max [.FBS]: max number of new sessions per second
|
||||
HF_CHECK_STATUS = 36 //36. check_status [...S]: status of last health check, one of:
|
||||
HF_CHECK_CODE = 37 //37. check_code [...S]: layer5-7 code, if available
|
||||
HF_CHECK_DURATION = 38 //38. check_duration [...S]: time in ms took to finish last health check
|
||||
HF_HRSP_1xx = 39 //39. hrsp_1xx [.FBS]: http responses with 1xx code
|
||||
HF_HRSP_2xx = 40 //40. hrsp_2xx [.FBS]: http responses with 2xx code
|
||||
HF_HRSP_3xx = 41 //41. hrsp_3xx [.FBS]: http responses with 3xx code
|
||||
HF_HRSP_4xx = 42 //42. hrsp_4xx [.FBS]: http responses with 4xx code
|
||||
HF_HRSP_5xx = 43 //43. hrsp_5xx [.FBS]: http responses with 5xx code
|
||||
HF_HRSP_OTHER = 44 //44. hrsp_other [.FBS]: http responses with other codes (protocol error)
|
||||
HF_HANAFAIL = 45 //45. hanafail [...S]: failed health checks details
|
||||
HF_REQ_RATE = 46 //46. req_rate [.F..]: HTTP requests per second over last elapsed second
|
||||
HF_REQ_RATE_MAX = 47 //47. req_rate_max [.F..]: max number of HTTP requests per second observed
|
||||
HF_REQ_TOT = 48 //48. req_tot [.F..]: total number of HTTP requests received
|
||||
HF_CLI_ABRT = 49 //49. cli_abrt [..BS]: number of data transfers aborted by the client
|
||||
HF_SRV_ABRT = 50 //50. srv_abrt [..BS]: number of data transfers aborted by the server (inc. in eresp)
|
||||
HF_COMP_IN = 51 //51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor
|
||||
HF_COMP_OUT = 52 //52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor
|
||||
HF_COMP_BYP = 53 //53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor (CPU/BW limit)
|
||||
HF_COMP_RSP = 54 //54. comp_rsp [.FB.]: number of HTTP responses that were compressed
|
||||
HF_LASTSESS = 55 //55. lastsess [..BS]: number of seconds since last session assigned to server/backend
|
||||
HF_LAST_CHK = 56 //56. last_chk [...S]: last health check contents or textual error
|
||||
HF_LAST_AGT = 57 //57. last_agt [...S]: last agent check contents or textual error
|
||||
HF_QTIME = 58 //58. qtime [..BS]:
|
||||
HF_CTIME = 59 //59. ctime [..BS]:
|
||||
HF_RTIME = 60 //60. rtime [..BS]: (0 for TCP)
|
||||
HF_TTIME = 61 //61. ttime [..BS]: the average total session time in ms over the 1024 last requests
|
||||
)
|
||||
|
||||
type haproxy struct {
|
||||
Servers []string
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of address to gather stats about. Specify an ip on hostname
|
||||
## with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
|
||||
## If no servers are specified, then default to 127.0.0.1:1936
|
||||
servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
|
||||
## Or you can also use local socket(not work yet)
|
||||
## servers = ["socket://run/haproxy/admin.sock"]
|
||||
`
|
||||
|
||||
func (r *haproxy) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (r *haproxy) Description() string {
|
||||
return "Read metrics of haproxy, via socket or csv stats page"
|
||||
}
|
||||
|
||||
// Reads stats from all configured servers accumulates stats.
|
||||
// Returns one of the errors encountered while gather stats (if any).
|
||||
func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
||||
if len(g.Servers) == 0 {
|
||||
return g.gatherServer("http://127.0.0.1:1936", acc)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var outerr error
|
||||
|
||||
for _, serv := range g.Servers {
|
||||
wg.Add(1)
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
outerr = g.gatherServer(serv, acc)
|
||||
}(serv)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
if g.client == nil {
|
||||
|
||||
client := &http.Client{}
|
||||
g.client = client
|
||||
}
|
||||
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable parse server address '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s/;csv", u.Scheme, u.Host, u.Path), nil)
|
||||
if u.User != nil {
|
||||
p, _ := u.User.Password()
|
||||
req.SetBasicAuth(u.User.Username(), p)
|
||||
}
|
||||
|
||||
res, err := g.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to connect to haproxy server '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("Unable to get valid stat result from '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
return importCsvResult(res.Body, acc, u.Host)
|
||||
}
|
||||
|
||||
func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
|
||||
csv := csv.NewReader(r)
|
||||
result, err := csv.ReadAll()
|
||||
now := time.Now()
|
||||
|
||||
for _, row := range result {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"proxy": row[HF_PXNAME],
|
||||
"sv": row[HF_SVNAME],
|
||||
}
|
||||
for field, v := range row {
|
||||
switch field {
|
||||
case HF_QCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["qcur"] = ival
|
||||
}
|
||||
case HF_QMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["qmax"] = ival
|
||||
}
|
||||
case HF_SCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["scur"] = ival
|
||||
}
|
||||
case HF_SMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["smax"] = ival
|
||||
}
|
||||
case HF_STOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["stot"] = ival
|
||||
}
|
||||
case HF_BIN:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["bin"] = ival
|
||||
}
|
||||
case HF_BOUT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["bout"] = ival
|
||||
}
|
||||
case HF_DREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["dreq"] = ival
|
||||
}
|
||||
case HF_DRESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["dresp"] = ival
|
||||
}
|
||||
case HF_EREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["ereq"] = ival
|
||||
}
|
||||
case HF_ECON:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["econ"] = ival
|
||||
}
|
||||
case HF_ERESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["eresp"] = ival
|
||||
}
|
||||
case HF_WRETR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["wretr"] = ival
|
||||
}
|
||||
case HF_WREDIS:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["wredis"] = ival
|
||||
}
|
||||
case HF_ACT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["active_servers"] = ival
|
||||
}
|
||||
case HF_BCK:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["backup_servers"] = ival
|
||||
}
|
||||
case HF_DOWNTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["downtime"] = ival
|
||||
}
|
||||
case HF_THROTTLE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["throttle"] = ival
|
||||
}
|
||||
case HF_LBTOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["lbtot"] = ival
|
||||
}
|
||||
case HF_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["rate"] = ival
|
||||
}
|
||||
case HF_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["rate_max"] = ival
|
||||
}
|
||||
case HF_CHECK_DURATION:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["check_duration"] = ival
|
||||
}
|
||||
case HF_HRSP_1xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.1xx"] = ival
|
||||
}
|
||||
case HF_HRSP_2xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.2xx"] = ival
|
||||
}
|
||||
case HF_HRSP_3xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.3xx"] = ival
|
||||
}
|
||||
case HF_HRSP_4xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.4xx"] = ival
|
||||
}
|
||||
case HF_HRSP_5xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["http_response.5xx"] = ival
|
||||
}
|
||||
case HF_REQ_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["req_rate"] = ival
|
||||
}
|
||||
case HF_REQ_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["req_rate_max"] = ival
|
||||
}
|
||||
case HF_REQ_TOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["req_tot"] = ival
|
||||
}
|
||||
case HF_CLI_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["cli_abort"] = ival
|
||||
}
|
||||
case HF_SRV_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["srv_abort"] = ival
|
||||
}
|
||||
case HF_QTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["qtime"] = ival
|
||||
}
|
||||
case HF_CTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["ctime"] = ival
|
||||
}
|
||||
case HF_RTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["rtime"] = ival
|
||||
}
|
||||
case HF_TTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["ttime"] = ival
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("haproxy", fields, tags, now)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("haproxy", func() telegraf.Input {
|
||||
return &haproxy{}
|
||||
})
|
||||
}
|
||||
179
plugins/inputs/haproxy/haproxy_test.go
Normal file
179
plugins/inputs/haproxy/haproxy_test.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package haproxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
||||
func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
||||
//We create a fake server to return test data
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprint(w, "Unauthorized")
|
||||
return
|
||||
}
|
||||
|
||||
if username == "user" && password == "password" {
|
||||
fmt.Fprint(w, csvOutputSample)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprint(w, "Unauthorized")
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
//Now we tested again above server, with our authentication data
|
||||
r := &haproxy{
|
||||
Servers: []string{strings.Replace(ts.URL, "http://", "http://user:password@", 1)},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"server": ts.Listener.Addr().String(),
|
||||
"proxy": "be_app",
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
|
||||
//Here, we should get error because we don't pass authentication data
|
||||
r = &haproxy{
|
||||
Servers: []string{ts.URL},
|
||||
}
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, csvOutputSample)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
r := &haproxy{
|
||||
Servers: []string{ts.URL},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"proxy": "be_app",
|
||||
"server": ts.Listener.Addr().String(),
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
}
|
||||
|
||||
//When not passing server config, we default to localhost
|
||||
//We just want to make sure we did request stat from localhost
|
||||
func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
||||
r := &haproxy{}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "127.0.0.1:1936/;csv")
|
||||
}
|
||||
|
||||
const csvOutputSample = `
|
||||
# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
|
||||
fe_app,FRONTEND,,81,288,713,2000,1094063,5557055817,24096715169,1102,80,95740,,,17,19,OPEN,,,,,,,,,2,16,113,13,114,,0,18,0,102,,,,0,1314093,537036,123452,11966,1360,,35,140,1987928,,,0,0,0,0,,,,,,,,
|
||||
be_static,host0,0,0,0,3,,3209,1141294,17389596,,0,,0,0,0,0,no check,1,1,0,,,,,,2,17,1,,3209,,2,0,,7,,,,0,218,1497,1494,0,0,0,,,,0,0,,,,,2,,,0,2,23,545,
|
||||
be_static,BACKEND,0,0,0,3,200,3209,1141294,17389596,0,0,,0,0,0,0,UP,1,1,0,,0,70698,0,,2,17,0,,3209,,1,0,,7,,,,0,218,1497,1494,0,0,,,,,0,0,0,0,0,0,2,,,0,2,23,545,
|
||||
be_static,host0,0,0,0,1,,28,17313,466003,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,1,,28,,2,0,,1,L4OK,,1,0,17,6,5,0,0,0,,,,0,0,,,,,2103,,,0,1,1,36,
|
||||
be_static,host4,0,0,0,1,,28,15358,1281073,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,2,,28,,2,0,,1,L4OK,,1,0,20,5,3,0,0,0,,,,0,0,,,,,2076,,,0,1,1,54,
|
||||
be_static,host5,0,0,0,1,,28,17547,1970404,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,3,,28,,2,0,,1,L4OK,,0,0,20,5,3,0,0,0,,,,0,0,,,,,1495,,,0,1,1,53,
|
||||
be_static,host6,0,0,0,1,,28,14105,1328679,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,4,,28,,2,0,,1,L4OK,,0,0,18,8,2,0,0,0,,,,0,0,,,,,1418,,,0,0,1,49,
|
||||
be_static,host7,0,0,0,1,,28,15258,1965185,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,5,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,935,,,0,0,1,28,
|
||||
be_static,host8,0,0,0,1,,28,12934,1034779,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,6,,28,,2,0,,1,L4OK,,0,0,17,9,2,0,0,0,,,,0,0,,,,,582,,,0,1,1,66,
|
||||
be_static,host9,0,0,0,1,,28,13434,134063,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,7,,28,,2,0,,1,L4OK,,0,0,17,8,3,0,0,0,,,,0,0,,,,,539,,,0,0,1,80,
|
||||
be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,8,,28,,2,0,,1,L4OK,,0,0,22,6,0,0,0,0,,,,0,0,,,,,487,,,0,0,1,36,
|
||||
be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38,
|
||||
be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17,
|
||||
be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381,
|
||||
be_app,host0,0,0,1,32,,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
|
||||
be_app,host4,0,0,2,29,,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
|
||||
`
|
||||
158
plugins/inputs/httpjson/README.md
Normal file
158
plugins/inputs/httpjson/README.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# HTTP JSON Plugin
|
||||
|
||||
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
|
||||
|
||||
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON
|
||||
plugin like this:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
name = "mycollector"
|
||||
|
||||
servers = [
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
```
|
||||
|
||||
`name` is used as a prefix for the measurements.
|
||||
|
||||
`method` specifies HTTP method to use for requests.
|
||||
|
||||
You can also specify which keys from server response should be considered tags:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
...
|
||||
|
||||
tag_keys = [
|
||||
"role",
|
||||
"version"
|
||||
]
|
||||
```
|
||||
|
||||
You can also specify additional request parameters for the service:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
...
|
||||
|
||||
[httpjson.services.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
```
|
||||
|
||||
You can also specify additional request header parameters for the service:
|
||||
|
||||
```
|
||||
[[httpjson.services]]
|
||||
...
|
||||
|
||||
[httpjson.services.headers]
|
||||
X-Auth-Token = "my-xauth-token"
|
||||
apiVersion = "v1"
|
||||
```
|
||||
|
||||
# Example:
|
||||
|
||||
Let's say that we have a service named "mycollector" configured like this:
|
||||
|
||||
```
|
||||
[httpjson]
|
||||
[[httpjson.services]]
|
||||
name = "mycollector"
|
||||
|
||||
servers = [
|
||||
"http://my.service.com/_stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
tag_keys = ["service"]
|
||||
```
|
||||
|
||||
which responds with the following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"service": "service01",
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
```
|
||||
httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5
|
||||
```
|
||||
|
||||
# Example 2, Multiple Services:
|
||||
|
||||
There is also the option to collect JSON from multiple services, here is an
|
||||
example doing that.
|
||||
|
||||
```
|
||||
[httpjson]
|
||||
[[httpjson.services]]
|
||||
name = "mycollector1"
|
||||
|
||||
servers = [
|
||||
"http://my.service1.com/_stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
[[httpjson.services]]
|
||||
name = "mycollector2"
|
||||
|
||||
servers = [
|
||||
"http://service.net/json/stats"
|
||||
]
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "POST"
|
||||
```
|
||||
|
||||
The services respond with the following JSON:
|
||||
|
||||
mycollector1:
|
||||
```json
|
||||
{
|
||||
"a": 0.5,
|
||||
"b": {
|
||||
"c": "some text",
|
||||
"d": 0.1,
|
||||
"e": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
mycollector2:
|
||||
```json
|
||||
{
|
||||
"load": 100,
|
||||
"users": 1335
|
||||
}
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
|
||||
```
|
||||
httpjson_mycollector1_a,server='http://my.service.com/_stats' value=0.5
|
||||
httpjson_mycollector1_b_d,server='http://my.service.com/_stats' value=0.1
|
||||
httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5
|
||||
|
||||
httpjson_mycollector2_load,server='http://service.net/json/stats' value=100
|
||||
httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335
|
||||
```
|
||||
249
plugins/inputs/httpjson/httpjson.go
Normal file
249
plugins/inputs/httpjson/httpjson.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
type HttpJson struct {
|
||||
Name string
|
||||
Servers []string
|
||||
Method string
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
type HTTPClient interface {
|
||||
// Returns the result of an http request
|
||||
//
|
||||
// Parameters:
|
||||
// req: HTTP request object
|
||||
//
|
||||
// Returns:
|
||||
// http.Response: HTTP respons object
|
||||
// error : Any error that may have occurred
|
||||
MakeRequest(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
type RealHTTPClient struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## NOTE This plugin only reads numerical measurements, strings and booleans
|
||||
## will be ignored.
|
||||
|
||||
## a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
## URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
## HTTP method to use: GET or POST (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
## List of tag names to extract from top-level of JSON server response
|
||||
# tag_keys = [
|
||||
# "my_tag_1",
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
## HTTP parameters (all values must be strings)
|
||||
[inputs.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
|
||||
## HTTP Header parameters (all values must be strings)
|
||||
# [inputs.httpjson.headers]
|
||||
# X-Auth-Token = "my-xauth-token"
|
||||
# apiVersion = "v1"
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (h *HttpJson) Description() string {
|
||||
return "Read flattened metrics from one or more JSON HTTP endpoints"
|
||||
}
|
||||
|
||||
// Gathers data for all servers.
|
||||
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, server := range h.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherServer(acc, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
// Gathers data from a particular server
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
// serverURL: endpoint to send request to
|
||||
// service : the service being queried
|
||||
//
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (h *HttpJson) gatherServer(
|
||||
acc telegraf.Accumulator,
|
||||
serverURL string,
|
||||
) error {
|
||||
resp, responseTime, err := h.sendRequest(serverURL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var msrmnt_name string
|
||||
if h.Name == "" {
|
||||
msrmnt_name = "httpjson"
|
||||
} else {
|
||||
msrmnt_name = "httpjson_" + h.Name
|
||||
}
|
||||
tags := map[string]string{
|
||||
"server": serverURL,
|
||||
}
|
||||
|
||||
parser, err := parsers.NewJSONParser(msrmnt_name, h.TagKeys, tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metrics, err := parser.Parse([]byte(resp))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range metric.Fields() {
|
||||
fields[k] = v
|
||||
}
|
||||
fields["response_time"] = responseTime
|
||||
acc.AddFields(metric.Name(), fields, metric.Tags())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sends an HTTP request to the server using the HttpJson object's HTTPClient.
|
||||
// This request can be either a GET or a POST.
|
||||
// Parameters:
|
||||
// serverURL: endpoint to send request to
|
||||
//
|
||||
// Returns:
|
||||
// string: body of the response
|
||||
// error : Any error that may have occurred
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
// Prepare URL
|
||||
requestURL, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
}
|
||||
|
||||
data := url.Values{}
|
||||
switch {
|
||||
case h.Method == "GET":
|
||||
params := requestURL.Query()
|
||||
for k, v := range h.Parameters {
|
||||
params.Add(k, v)
|
||||
}
|
||||
requestURL.RawQuery = params.Encode()
|
||||
|
||||
case h.Method == "POST":
|
||||
requestURL.RawQuery = ""
|
||||
for k, v := range h.Parameters {
|
||||
data.Add(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Create + send request
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(),
|
||||
strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
// Add header parameters
|
||||
for k, v := range h.Headers {
|
||||
if strings.ToLower(k) == "host" {
|
||||
req.Host = v
|
||||
} else {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
resp, err := h.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
responseTime := time.Since(start).Seconds()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
// Process response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestURL.String(),
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("httpjson", func() telegraf.Input {
|
||||
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
506
plugins/inputs/httpjson/httpjson_test.go
Normal file
506
plugins/inputs/httpjson/httpjson_test.go
Normal file
@@ -0,0 +1,506 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const validJSON = `
|
||||
{
|
||||
"parent": {
|
||||
"child": 3.0,
|
||||
"ignored_child": "hi"
|
||||
},
|
||||
"ignored_null": null,
|
||||
"integer": 4,
|
||||
"list": [3, 4],
|
||||
"ignored_parent": {
|
||||
"another_ignored_null": null,
|
||||
"ignored_string": "hello, world!"
|
||||
},
|
||||
"another_list": [4]
|
||||
}`
|
||||
|
||||
const validJSON2 = `{
|
||||
"user":{
|
||||
"hash_rate":0,
|
||||
"expected_24h_rewards":0,
|
||||
"total_rewards":0.000595109232,
|
||||
"paid_rewards":0,
|
||||
"unpaid_rewards":0.000595109232,
|
||||
"past_24h_rewards":0,
|
||||
"total_work":"5172625408",
|
||||
"blocks_found":0
|
||||
},
|
||||
"workers":{
|
||||
"brminer.1":{
|
||||
"hash_rate":0,
|
||||
"hash_rate_24h":0,
|
||||
"valid_shares":"6176",
|
||||
"stale_shares":"0",
|
||||
"invalid_shares":"0",
|
||||
"rewards":4.5506464e-5,
|
||||
"rewards_24h":0,
|
||||
"reset_time":1455409950
|
||||
},
|
||||
"brminer.2":{
|
||||
"hash_rate":0,
|
||||
"hash_rate_24h":0,
|
||||
"valid_shares":"0",
|
||||
"stale_shares":"0",
|
||||
"invalid_shares":"0",
|
||||
"rewards":0,
|
||||
"rewards_24h":0,
|
||||
"reset_time":1455936726
|
||||
},
|
||||
"brminer.3":{
|
||||
"hash_rate":0,
|
||||
"hash_rate_24h":0,
|
||||
"valid_shares":"0",
|
||||
"stale_shares":"0",
|
||||
"invalid_shares":"0",
|
||||
"rewards":0,
|
||||
"rewards_24h":0,
|
||||
"reset_time":1455936733
|
||||
}
|
||||
},
|
||||
"pool":{
|
||||
"hash_rate":114100000,
|
||||
"active_users":843,
|
||||
"total_work":"5015346808842682368",
|
||||
"pps_ratio":1.04,
|
||||
"pps_rate":7.655e-9
|
||||
},
|
||||
"network":{
|
||||
"hash_rate":1426117703,
|
||||
"block_number":944895,
|
||||
"time_per_block":156,
|
||||
"difficulty":51825.72835216,
|
||||
"next_difficulty":51916.15249019,
|
||||
"retarget_time":95053
|
||||
},
|
||||
"market":{
|
||||
"ltc_btc":0.00798,
|
||||
"ltc_usd":3.37801,
|
||||
"ltc_eur":3.113,
|
||||
"ltc_gbp":2.32807,
|
||||
"ltc_rub":241.796,
|
||||
"ltc_cny":21.3883,
|
||||
"btc_usd":422.852
|
||||
}
|
||||
}`
|
||||
|
||||
const validJSONTags = `
|
||||
{
|
||||
"value": 15,
|
||||
"role": "master",
|
||||
"build": "123"
|
||||
}`
|
||||
|
||||
var expectedFields = map[string]interface{}{
|
||||
"parent_child": float64(3),
|
||||
"list_0": float64(3),
|
||||
"list_1": float64(4),
|
||||
"another_list_0": float64(4),
|
||||
"integer": float64(4),
|
||||
}
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
|
||||
const empty = ""
|
||||
|
||||
type mockHTTPClient struct {
|
||||
responseBody string
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// Mock implementation of MakeRequest. Usually returns an http.Response with
|
||||
// hard-coded responseBody and statusCode. However, if the request uses a
|
||||
// nonstandard method, it uses status code 405 (method not allowed)
|
||||
func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
resp := http.Response{}
|
||||
resp.StatusCode = c.statusCode
|
||||
|
||||
// basic error checking on request method
|
||||
allowedMethods := []string{"GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"}
|
||||
methodValid := false
|
||||
for _, method := range allowedMethods {
|
||||
if req.Method == method {
|
||||
methodValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !methodValid {
|
||||
resp.StatusCode = 405 // Method not allowed
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
// statusCode: HTTP status code the mock HTTP client should return
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genMockHttpJson(response string, statusCode int) []*HttpJson {
|
||||
return []*HttpJson{
|
||||
&HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server1.example.com/metrics/",
|
||||
"http://server2.example.com/metrics/",
|
||||
},
|
||||
Name: "my_webapp",
|
||||
Method: "GET",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"X-Auth-Token": "the-first-parameter",
|
||||
"apiVersion": "v1",
|
||||
},
|
||||
},
|
||||
&HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server3.example.com/metrics/",
|
||||
"http://server4.example.com/metrics/",
|
||||
},
|
||||
Name: "other_webapp",
|
||||
Method: "POST",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"X-Auth-Token": "the-first-parameter",
|
||||
"apiVersion": "v1",
|
||||
},
|
||||
TagKeys: []string{
|
||||
"role",
|
||||
"build",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, acc.NFields())
|
||||
// Set responsetime
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv}
|
||||
mname := "httpjson_" + service.Name
|
||||
expectedFields["response_time"] = 1.0
|
||||
acc.AssertContainsTaggedFields(t, mname, expectedFields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that GET Parameters from the url string are applied properly
|
||||
func TestHttpJsonGET_URL(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
key := r.FormValue("api_key")
|
||||
assert.Equal(t, "mykey", key)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, validJSON2)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
Servers: []string{ts.URL + "?api_key=mykey"},
|
||||
Name: "",
|
||||
Method: "GET",
|
||||
client: RealHTTPClient{client: &http.Client{}},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
delete(acc.Metrics[0].Fields, "response_time")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"market_btc_usd": float64(422.852),
|
||||
"market_ltc_btc": float64(0.00798),
|
||||
"market_ltc_cny": float64(21.3883),
|
||||
"market_ltc_eur": float64(3.113),
|
||||
"market_ltc_gbp": float64(2.32807),
|
||||
"market_ltc_rub": float64(241.796),
|
||||
"market_ltc_usd": float64(3.37801),
|
||||
"network_block_number": float64(944895),
|
||||
"network_difficulty": float64(51825.72835216),
|
||||
"network_hash_rate": float64(1.426117703e+09),
|
||||
"network_next_difficulty": float64(51916.15249019),
|
||||
"network_retarget_time": float64(95053),
|
||||
"network_time_per_block": float64(156),
|
||||
"pool_active_users": float64(843),
|
||||
"pool_hash_rate": float64(1.141e+08),
|
||||
"pool_pps_rate": float64(7.655e-09),
|
||||
"pool_pps_ratio": float64(1.04),
|
||||
"user_blocks_found": float64(0),
|
||||
"user_expected_24h_rewards": float64(0),
|
||||
"user_hash_rate": float64(0),
|
||||
"user_paid_rewards": float64(0),
|
||||
"user_past_24h_rewards": float64(0),
|
||||
"user_total_rewards": float64(0.000595109232),
|
||||
"user_unpaid_rewards": float64(0.000595109232),
|
||||
"workers_brminer.1_hash_rate": float64(0),
|
||||
"workers_brminer.1_hash_rate_24h": float64(0),
|
||||
"workers_brminer.1_reset_time": float64(1.45540995e+09),
|
||||
"workers_brminer.1_rewards": float64(4.5506464e-05),
|
||||
"workers_brminer.1_rewards_24h": float64(0),
|
||||
"workers_brminer.2_hash_rate": float64(0),
|
||||
"workers_brminer.2_hash_rate_24h": float64(0),
|
||||
"workers_brminer.2_reset_time": float64(1.455936726e+09),
|
||||
"workers_brminer.2_rewards": float64(0),
|
||||
"workers_brminer.2_rewards_24h": float64(0),
|
||||
"workers_brminer.3_hash_rate": float64(0),
|
||||
"workers_brminer.3_hash_rate_24h": float64(0),
|
||||
"workers_brminer.3_reset_time": float64(1.455936733e+09),
|
||||
"workers_brminer.3_rewards": float64(0),
|
||||
"workers_brminer.3_rewards_24h": float64(0),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "httpjson", fields)
|
||||
}
|
||||
|
||||
// Test that GET Parameters are applied properly
|
||||
func TestHttpJsonGET(t *testing.T) {
|
||||
params := map[string]string{
|
||||
"api_key": "mykey",
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
key := r.FormValue("api_key")
|
||||
assert.Equal(t, "mykey", key)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, validJSON2)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
Servers: []string{ts.URL},
|
||||
Name: "",
|
||||
Method: "GET",
|
||||
Parameters: params,
|
||||
client: RealHTTPClient{client: &http.Client{}},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
delete(acc.Metrics[0].Fields, "response_time")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"market_btc_usd": float64(422.852),
|
||||
"market_ltc_btc": float64(0.00798),
|
||||
"market_ltc_cny": float64(21.3883),
|
||||
"market_ltc_eur": float64(3.113),
|
||||
"market_ltc_gbp": float64(2.32807),
|
||||
"market_ltc_rub": float64(241.796),
|
||||
"market_ltc_usd": float64(3.37801),
|
||||
"network_block_number": float64(944895),
|
||||
"network_difficulty": float64(51825.72835216),
|
||||
"network_hash_rate": float64(1.426117703e+09),
|
||||
"network_next_difficulty": float64(51916.15249019),
|
||||
"network_retarget_time": float64(95053),
|
||||
"network_time_per_block": float64(156),
|
||||
"pool_active_users": float64(843),
|
||||
"pool_hash_rate": float64(1.141e+08),
|
||||
"pool_pps_rate": float64(7.655e-09),
|
||||
"pool_pps_ratio": float64(1.04),
|
||||
"user_blocks_found": float64(0),
|
||||
"user_expected_24h_rewards": float64(0),
|
||||
"user_hash_rate": float64(0),
|
||||
"user_paid_rewards": float64(0),
|
||||
"user_past_24h_rewards": float64(0),
|
||||
"user_total_rewards": float64(0.000595109232),
|
||||
"user_unpaid_rewards": float64(0.000595109232),
|
||||
"workers_brminer.1_hash_rate": float64(0),
|
||||
"workers_brminer.1_hash_rate_24h": float64(0),
|
||||
"workers_brminer.1_reset_time": float64(1.45540995e+09),
|
||||
"workers_brminer.1_rewards": float64(4.5506464e-05),
|
||||
"workers_brminer.1_rewards_24h": float64(0),
|
||||
"workers_brminer.2_hash_rate": float64(0),
|
||||
"workers_brminer.2_hash_rate_24h": float64(0),
|
||||
"workers_brminer.2_reset_time": float64(1.455936726e+09),
|
||||
"workers_brminer.2_rewards": float64(0),
|
||||
"workers_brminer.2_rewards_24h": float64(0),
|
||||
"workers_brminer.3_hash_rate": float64(0),
|
||||
"workers_brminer.3_hash_rate_24h": float64(0),
|
||||
"workers_brminer.3_reset_time": float64(1.455936733e+09),
|
||||
"workers_brminer.3_rewards": float64(0),
|
||||
"workers_brminer.3_rewards_24h": float64(0),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "httpjson", fields)
|
||||
}
|
||||
|
||||
// Test that POST Parameters are applied properly
|
||||
func TestHttpJsonPOST(t *testing.T) {
|
||||
params := map[string]string{
|
||||
"api_key": "mykey",
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "api_key=mykey", string(body))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, validJSON2)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
Servers: []string{ts.URL},
|
||||
Name: "",
|
||||
Method: "POST",
|
||||
Parameters: params,
|
||||
client: RealHTTPClient{client: &http.Client{}},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
delete(acc.Metrics[0].Fields, "response_time")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"market_btc_usd": float64(422.852),
|
||||
"market_ltc_btc": float64(0.00798),
|
||||
"market_ltc_cny": float64(21.3883),
|
||||
"market_ltc_eur": float64(3.113),
|
||||
"market_ltc_gbp": float64(2.32807),
|
||||
"market_ltc_rub": float64(241.796),
|
||||
"market_ltc_usd": float64(3.37801),
|
||||
"network_block_number": float64(944895),
|
||||
"network_difficulty": float64(51825.72835216),
|
||||
"network_hash_rate": float64(1.426117703e+09),
|
||||
"network_next_difficulty": float64(51916.15249019),
|
||||
"network_retarget_time": float64(95053),
|
||||
"network_time_per_block": float64(156),
|
||||
"pool_active_users": float64(843),
|
||||
"pool_hash_rate": float64(1.141e+08),
|
||||
"pool_pps_rate": float64(7.655e-09),
|
||||
"pool_pps_ratio": float64(1.04),
|
||||
"user_blocks_found": float64(0),
|
||||
"user_expected_24h_rewards": float64(0),
|
||||
"user_hash_rate": float64(0),
|
||||
"user_paid_rewards": float64(0),
|
||||
"user_past_24h_rewards": float64(0),
|
||||
"user_total_rewards": float64(0.000595109232),
|
||||
"user_unpaid_rewards": float64(0.000595109232),
|
||||
"workers_brminer.1_hash_rate": float64(0),
|
||||
"workers_brminer.1_hash_rate_24h": float64(0),
|
||||
"workers_brminer.1_reset_time": float64(1.45540995e+09),
|
||||
"workers_brminer.1_rewards": float64(4.5506464e-05),
|
||||
"workers_brminer.1_rewards_24h": float64(0),
|
||||
"workers_brminer.2_hash_rate": float64(0),
|
||||
"workers_brminer.2_hash_rate_24h": float64(0),
|
||||
"workers_brminer.2_reset_time": float64(1.455936726e+09),
|
||||
"workers_brminer.2_rewards": float64(0),
|
||||
"workers_brminer.2_rewards_24h": float64(0),
|
||||
"workers_brminer.3_hash_rate": float64(0),
|
||||
"workers_brminer.3_hash_rate_24h": float64(0),
|
||||
"workers_brminer.3_reset_time": float64(1.455936733e+09),
|
||||
"workers_brminer.3_rewards": float64(0),
|
||||
"workers_brminer.3_rewards_24h": float64(0),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "httpjson", fields)
|
||||
}
|
||||
|
||||
// Test response to HTTP 500
|
||||
func TestHttpJson500(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test response to HTTP 405
|
||||
func TestHttpJsonBadMethod(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
httpjson[0].Method = "NOT_A_REAL_METHOD"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test response to malformed JSON
|
||||
func TestHttpJsonBadJson(t *testing.T) {
|
||||
httpjson := genMockHttpJson(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test response to empty string as response objectgT
|
||||
func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
httpjson := genMockHttpJson(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson[0].Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, 0, acc.NFields())
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200Tags(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSONTags, 200)
|
||||
|
||||
for _, service := range httpjson {
|
||||
if service.Name == "other_webapp" {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
// Set responsetime
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 4, acc.NFields())
|
||||
for _, srv := range service.Servers {
|
||||
tags := map[string]string{"server": srv, "role": "master", "build": "123"}
|
||||
fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)}
|
||||
mname := "httpjson_" + service.Name
|
||||
acc.AssertContainsTaggedFields(t, mname, fields, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
72
plugins/inputs/influxdb/README.md
Normal file
72
plugins/inputs/influxdb/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# influxdb plugin
|
||||
|
||||
The influxdb plugin collects InfluxDB-formatted data from JSON endpoints.
|
||||
|
||||
With a configuration of:
|
||||
|
||||
```toml
|
||||
[[inputs.influxdb]]
|
||||
urls = [
|
||||
"http://127.0.0.1:8086/debug/vars",
|
||||
"http://192.168.2.1:8086/debug/vars"
|
||||
]
|
||||
```
|
||||
|
||||
And if 127.0.0.1 responds with this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"k1": {
|
||||
"name": "fruit",
|
||||
"tags": {
|
||||
"kind": "apple"
|
||||
},
|
||||
"values": {
|
||||
"inventory": 371,
|
||||
"sold": 112
|
||||
}
|
||||
},
|
||||
"k2": {
|
||||
"name": "fruit",
|
||||
"tags": {
|
||||
"kind": "banana"
|
||||
},
|
||||
"values": {
|
||||
"inventory": 1000,
|
||||
"sold": 403
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
And if 192.168.2.1 responds like so:
|
||||
|
||||
```json
|
||||
{
|
||||
"k3": {
|
||||
"name": "transactions",
|
||||
"tags": {},
|
||||
"values": {
|
||||
"total": 100,
|
||||
"balance": 184.75
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then the collected metrics will be:
|
||||
|
||||
```
|
||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='apple' inventory=371.0,sold=112.0
|
||||
influxdb_fruit,url='http://127.0.0.1:8086/debug/vars',kind='banana' inventory=1000.0,sold=403.0
|
||||
|
||||
influxdb_transactions,url='http://192.168.2.1:8086/debug/vars' total=100.0,balance=184.75
|
||||
```
|
||||
|
||||
There are two important details to note about the collected metrics:
|
||||
|
||||
1. Even though the values in JSON are being displayed as integers, the metrics are reported as floats.
|
||||
JSON encoders usually don't print the fractional part for round floats.
|
||||
Because you cannot change the type of an existing field in InfluxDB, we assume all numbers are floats.
|
||||
|
||||
2. The top-level keys' names (in the example above, `"k1"`, `"k2"`, and `"k3"`) are not considered when recording the metrics.
|
||||
147
plugins/inputs/influxdb/influxdb.go
Normal file
147
plugins/inputs/influxdb/influxdb.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
URLs []string `toml:"urls"`
|
||||
}
|
||||
|
||||
func (*InfluxDB) Description() string {
|
||||
return "Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints"
|
||||
}
|
||||
|
||||
func (*InfluxDB) SampleConfig() string {
|
||||
return `
|
||||
## Works with InfluxDB debug endpoints out of the box,
|
||||
## but other services can use this format too.
|
||||
## See the influxdb plugin's README for more details.
|
||||
|
||||
## Multiple URLs from which to read InfluxDB-formatted JSON
|
||||
urls = [
|
||||
"http://localhost:8086/debug/vars"
|
||||
]
|
||||
`
|
||||
}
|
||||
|
||||
func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
||||
errorChannel := make(chan error, len(i.URLs))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range i.URLs {
|
||||
wg.Add(1)
|
||||
go func(url string) {
|
||||
defer wg.Done()
|
||||
if err := i.gatherURL(acc, url); err != nil {
|
||||
errorChannel <- fmt.Errorf("[url=%s]: %s", url, err)
|
||||
}
|
||||
}(u)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// If there weren't any errors, we can return nil now.
|
||||
if len(errorChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// There were errors, so join them all together as one big error.
|
||||
errorStrings := make([]string, 0, len(errorChannel))
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
type point struct {
|
||||
Name string `json:"name"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Values map[string]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
// Gathers data from a particular URL
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
// url : endpoint to send request to
|
||||
//
|
||||
// Returns:
|
||||
// error: Any error that may have occurred
|
||||
func (i *InfluxDB) gatherURL(
|
||||
acc telegraf.Accumulator,
|
||||
url string,
|
||||
) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// It would be nice to be able to decode into a map[string]point, but
|
||||
// we'll get a decoder error like:
|
||||
// `json: cannot unmarshal array into Go value of type influxdb.point`
|
||||
// if any of the values aren't objects.
|
||||
// To avoid that error, we decode by hand.
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
|
||||
// Parse beginning of object
|
||||
if t, err := dec.Token(); err != nil {
|
||||
return err
|
||||
} else if t != json.Delim('{') {
|
||||
return errors.New("document root must be a JSON object")
|
||||
}
|
||||
|
||||
// Loop through rest of object
|
||||
for {
|
||||
// Nothing left in this object, we're done
|
||||
if !dec.More() {
|
||||
break
|
||||
}
|
||||
|
||||
// Read in a string key. We don't do anything with the top-level keys, so it's discarded.
|
||||
_, err := dec.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Attempt to parse a whole object into a point.
|
||||
// It might be a non-object, like a string or array.
|
||||
// If we fail to decode it into a point, ignore it and move on.
|
||||
var p point
|
||||
if err := dec.Decode(&p); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the object was a point, but was not fully initialized, ignore it and move on.
|
||||
if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add a tag to indicate the source of the data.
|
||||
p.Tags["url"] = url
|
||||
|
||||
acc.AddFields(
|
||||
"influxdb_"+p.Name,
|
||||
p.Values,
|
||||
p.Tags,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("influxdb", func() telegraf.Input {
|
||||
return &InfluxDB{}
|
||||
})
|
||||
}
|
||||
97
plugins/inputs/influxdb/influxdb_test.go
Normal file
97
plugins/inputs/influxdb/influxdb_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package influxdb_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/inputs/influxdb"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
js := `
|
||||
{
|
||||
"_1": {
|
||||
"name": "foo",
|
||||
"tags": {
|
||||
"id": "ex1"
|
||||
},
|
||||
"values": {
|
||||
"i": -1,
|
||||
"f": 0.5,
|
||||
"b": true,
|
||||
"s": "string"
|
||||
}
|
||||
},
|
||||
"ignored": {
|
||||
"willBeRecorded": false
|
||||
},
|
||||
"ignoredAndNested": {
|
||||
"hash": {
|
||||
"is": "nested"
|
||||
}
|
||||
},
|
||||
"array": [
|
||||
"makes parsing more difficult than necessary"
|
||||
],
|
||||
"string": "makes parsing more difficult than necessary",
|
||||
"_2": {
|
||||
"name": "bar",
|
||||
"tags": {
|
||||
"id": "ex2"
|
||||
},
|
||||
"values": {
|
||||
"x": "x"
|
||||
}
|
||||
},
|
||||
"pointWithoutFields_willNotBeIncluded": {
|
||||
"name": "asdf",
|
||||
"tags": {
|
||||
"id": "ex3"
|
||||
},
|
||||
"values": {}
|
||||
}
|
||||
}
|
||||
`
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(js))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
plugin := &influxdb.InfluxDB{
|
||||
URLs: []string{fakeServer.URL + "/endpoint"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Metrics, 2)
|
||||
fields := map[string]interface{}{
|
||||
// JSON will truncate floats to integer representations.
|
||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
||||
"i": -1.0,
|
||||
"f": 0.5,
|
||||
"b": true,
|
||||
"s": "string",
|
||||
}
|
||||
tags := map[string]string{
|
||||
"id": "ex1",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "influxdb_foo", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"x": "x",
|
||||
}
|
||||
tags = map[string]string{
|
||||
"id": "ex2",
|
||||
"url": fakeServer.URL + "/endpoint",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "influxdb_bar", fields, tags)
|
||||
}
|
||||
51
plugins/inputs/jolokia/README.md
Normal file
51
plugins/inputs/jolokia/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Telegraf plugin: Jolokia
|
||||
|
||||
#### Plugin arguments:
|
||||
- **context** string: Context root used of jolokia url
|
||||
- **servers** []Server: List of servers
|
||||
+ **name** string: Server's logical name
|
||||
+ **host** string: Server's ip address or hostname
|
||||
+ **port** string: Server's listening port
|
||||
- **metrics** []Metric
|
||||
+ **name** string: Name of the measure
|
||||
+ **jmx** string: Jmx path that identifies mbeans attributes
|
||||
+ **pass** []string: Attributes to retain when collecting values
|
||||
+ **drop** []string: Attributes to drop when collecting values
|
||||
|
||||
#### Description
|
||||
|
||||
The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics
|
||||
are collected for each server configured.
|
||||
|
||||
See: https://jolokia.org/
|
||||
|
||||
# Measurements:
|
||||
Jolokia plugin produces one measure for each metric configured, adding Server's `name`, `host` and `port` as tags.
|
||||
|
||||
Given a configuration like:
|
||||
|
||||
```ini
|
||||
[jolokia]
|
||||
|
||||
[[jolokia.servers]]
|
||||
name = "as-service-1"
|
||||
host = "127.0.0.1"
|
||||
port = "8080"
|
||||
|
||||
[[jolokia.servers]]
|
||||
name = "as-service-2"
|
||||
host = "127.0.0.1"
|
||||
port = "8180"
|
||||
|
||||
[[jolokia.metrics]]
|
||||
name = "heap_memory_usage"
|
||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
pass = ["used", "max"]
|
||||
```
|
||||
|
||||
The collected metrics will be:
|
||||
|
||||
```
|
||||
jolokia_heap_memory_usage name=as-service-1,host=127.0.0.1,port=8080 used=xxx,max=yyy
|
||||
jolokia_heap_memory_usage name=as-service-2,host=127.0.0.1,port=8180 used=vvv,max=zzz
|
||||
```
|
||||
165
plugins/inputs/jolokia/jolokia.go
Normal file
165
plugins/inputs/jolokia/jolokia.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package jolokia
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Name string
|
||||
Host string
|
||||
Username string
|
||||
Password string
|
||||
Port string
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Name string
|
||||
Jmx string
|
||||
}
|
||||
|
||||
type JolokiaClient interface {
|
||||
MakeRequest(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
type JolokiaClientImpl struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
type Jolokia struct {
|
||||
jClient JolokiaClient
|
||||
Context string
|
||||
Servers []Server
|
||||
Metrics []Metric
|
||||
}
|
||||
|
||||
func (j *Jolokia) SampleConfig() string {
|
||||
return `
|
||||
## This is the context root used to compose the jolokia url
|
||||
context = "/jolokia/read"
|
||||
|
||||
## List of servers exposing jolokia read service
|
||||
[[inputs.jolokia.servers]]
|
||||
name = "stable"
|
||||
host = "192.168.103.2"
|
||||
port = "8180"
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
|
||||
## List of metrics collected on above servers
|
||||
## Each metric consists in a name, a jmx path and either
|
||||
## a pass or drop slice attribute.
|
||||
## This collect all heap memory usage metrics.
|
||||
[[inputs.jolokia.metrics]]
|
||||
name = "heap_memory_usage"
|
||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
`
|
||||
}
|
||||
|
||||
func (j *Jolokia) Description() string {
|
||||
return "Read JMX metrics through Jolokia"
|
||||
}
|
||||
|
||||
func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
||||
// Create + send request
|
||||
req, err := http.NewRequest("GET", requestUrl.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Process response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)",
|
||||
requestUrl,
|
||||
resp.StatusCode,
|
||||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read body
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal json
|
||||
var jsonOut map[string]interface{}
|
||||
if err = json.Unmarshal([]byte(body), &jsonOut); err != nil {
|
||||
return nil, errors.New("Error decoding JSON response")
|
||||
}
|
||||
|
||||
return jsonOut, nil
|
||||
}
|
||||
|
||||
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||
context := j.Context //"/jolokia/read"
|
||||
servers := j.Servers
|
||||
metrics := j.Metrics
|
||||
tags := make(map[string]string)
|
||||
|
||||
for _, server := range servers {
|
||||
tags["server"] = server.Name
|
||||
tags["port"] = server.Port
|
||||
tags["host"] = server.Host
|
||||
fields := make(map[string]interface{})
|
||||
for _, metric := range metrics {
|
||||
|
||||
measurement := metric.Name
|
||||
jmxPath := metric.Jmx
|
||||
|
||||
// Prepare URL
|
||||
requestUrl, err := url.Parse("http://" + server.Host + ":" +
|
||||
server.Port + context + jmxPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if server.Username != "" || server.Password != "" {
|
||||
requestUrl.User = url.UserPassword(server.Username, server.Password)
|
||||
}
|
||||
|
||||
out, _ := j.getAttr(requestUrl)
|
||||
|
||||
if values, ok := out["value"]; ok {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
fields[measurement+"_"+k] = v
|
||||
}
|
||||
case interface{}:
|
||||
fields[measurement] = t
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n",
|
||||
requestUrl.String())
|
||||
}
|
||||
}
|
||||
acc.AddFields("jolokia", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("jolokia", func() telegraf.Input {
|
||||
return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}}
|
||||
})
|
||||
}
|
||||
116
plugins/inputs/jolokia/jolokia_test.go
Normal file
116
plugins/inputs/jolokia/jolokia_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package jolokia
|
||||
|
||||
import (
|
||||
_ "fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
_ "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const validMultiValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":{
|
||||
"init":67108864,
|
||||
"committed":456130560,
|
||||
"max":477626368,
|
||||
"used":203288528
|
||||
},
|
||||
"timestamp":1446129191,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const validSingleValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"path":"used",
|
||||
"mbean":"java.lang:type=Memory",
|
||||
"attribute":"HeapMemoryUsage",
|
||||
"type":"read"
|
||||
},
|
||||
"value":209274376,
|
||||
"timestamp":1446129256,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const invalidJSON = "I don't think this is JSON"
|
||||
|
||||
const empty = ""
|
||||
|
||||
var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}}
|
||||
var HeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
|
||||
var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"}
|
||||
|
||||
type jolokiaClientStub struct {
|
||||
responseBody string
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
resp := http.Response{}
|
||||
resp.StatusCode = c.statusCode
|
||||
resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody))
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Generates a pointer to an HttpJson object that uses a mock HTTP client.
|
||||
// Parameters:
|
||||
// response : Body of the response that the mock HTTP client should return
|
||||
// statusCode: HTTP status code the mock HTTP client should return
|
||||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genJolokiaClientStub(response string, statusCode int, servers []Server, metrics []Metric) *Jolokia {
|
||||
return &Jolokia{
|
||||
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
|
||||
Servers: servers,
|
||||
Metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonMultiValue(t *testing.T) {
|
||||
jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"heap_memory_usage_init": 67108864.0,
|
||||
"heap_memory_usage_committed": 456130560.0,
|
||||
"heap_memory_usage_max": 477626368.0,
|
||||
"heap_memory_usage_used": 203288528.0,
|
||||
}
|
||||
tags := map[string]string{
|
||||
"host": "127.0.0.1",
|
||||
"port": "8080",
|
||||
"server": "as1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
|
||||
jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers,
|
||||
[]Metric{UsedHeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
}
|
||||
41
plugins/inputs/kafka_consumer/README.md
Normal file
41
plugins/inputs/kafka_consumer/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Kafka Consumer Input Plugin
|
||||
|
||||
The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka
|
||||
topic and adds messages to InfluxDB. The plugin assumes messages follow the
|
||||
line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup)
|
||||
is used to talk to the Kafka cluster so multiple instances of telegraf can read
|
||||
from the same topic in parallel.
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
# Read metrics from Kafka topic(s)
|
||||
[[inputs.kafka_consumer]]
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Maximum number of metrics to buffer between collection intervals
|
||||
metric_buffer = 100000
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Running integration tests requires running Zookeeper & Kafka. The following
|
||||
commands assume you're on OS X & using [boot2docker](http://boot2docker.io/) or docker-machine through [Docker Toolbox](https://www.docker.com/docker-toolbox).
|
||||
|
||||
To start Kafka & Zookeeper:
|
||||
|
||||
```
|
||||
docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip || docker-machine ip <your_machine_name>` --env ADVERTISED_PORT=9092 spotify/kafka
|
||||
```
|
||||
167
plugins/inputs/kafka_consumer/kafka_consumer.go
Normal file
167
plugins/inputs/kafka_consumer/kafka_consumer.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package kafka_consumer
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/wvanbergen/kafka/consumergroup"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
ConsumerGroup string
|
||||
Topics []string
|
||||
ZookeeperPeers []string
|
||||
Consumer *consumergroup.ConsumerGroup
|
||||
|
||||
// Legacy metric buffer support
|
||||
MetricBuffer int
|
||||
// TODO remove PointBuffer, legacy support
|
||||
PointBuffer int
|
||||
|
||||
Offset string
|
||||
parser parsers.Parser
|
||||
|
||||
sync.Mutex
|
||||
|
||||
// channel for all incoming kafka messages
|
||||
in <-chan *sarama.ConsumerMessage
|
||||
// channel for all kafka consumer errors
|
||||
errs <-chan *sarama.ConsumerError
|
||||
done chan struct{}
|
||||
|
||||
// keep the accumulator internally:
|
||||
acc telegraf.Accumulator
|
||||
|
||||
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
|
||||
// this is mostly for test purposes, but there may be a use-case for it later.
|
||||
doNotCommitMsgs bool
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## topic(s) to consume
|
||||
topics = ["telegraf"]
|
||||
## an array of Zookeeper connection strings
|
||||
zookeeper_peers = ["localhost:2181"]
|
||||
## the name of the consumer group
|
||||
consumer_group = "telegraf_metrics_consumers"
|
||||
## Offset (must be either "oldest" or "newest")
|
||||
offset = "oldest"
|
||||
|
||||
## Data format to consume. This can be "json", "influx" or "graphite"
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
`
|
||||
|
||||
func (k *Kafka) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (k *Kafka) Description() string {
|
||||
return "Read metrics from Kafka topic(s)"
|
||||
}
|
||||
|
||||
func (k *Kafka) SetParser(parser parsers.Parser) {
|
||||
k.parser = parser
|
||||
}
|
||||
|
||||
func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
var consumerErr error
|
||||
|
||||
k.acc = acc
|
||||
|
||||
config := consumergroup.NewConfig()
|
||||
switch strings.ToLower(k.Offset) {
|
||||
case "oldest", "":
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
case "newest":
|
||||
config.Offsets.Initial = sarama.OffsetNewest
|
||||
default:
|
||||
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
k.Offset)
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
}
|
||||
|
||||
if k.Consumer == nil || k.Consumer.Closed() {
|
||||
k.Consumer, consumerErr = consumergroup.JoinConsumerGroup(
|
||||
k.ConsumerGroup,
|
||||
k.Topics,
|
||||
k.ZookeeperPeers,
|
||||
config,
|
||||
)
|
||||
if consumerErr != nil {
|
||||
return consumerErr
|
||||
}
|
||||
|
||||
// Setup message and error channels
|
||||
k.in = k.Consumer.Messages()
|
||||
k.errs = k.Consumer.Errors()
|
||||
}
|
||||
|
||||
k.done = make(chan struct{})
|
||||
|
||||
// Start the kafka message reader
|
||||
go k.receiver()
|
||||
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
k.ZookeeperPeers, k.Topics)
|
||||
return nil
|
||||
}
|
||||
|
||||
// receiver() reads all incoming messages from the consumer, and parses them into
|
||||
// influxdb metric points.
|
||||
func (k *Kafka) receiver() {
|
||||
for {
|
||||
select {
|
||||
case <-k.done:
|
||||
return
|
||||
case err := <-k.errs:
|
||||
log.Printf("Kafka Consumer Error: %s\n", err.Error())
|
||||
case msg := <-k.in:
|
||||
metrics, err := k.parser.Parse(msg.Value)
|
||||
if err != nil {
|
||||
log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s",
|
||||
string(msg.Value), err.Error())
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
|
||||
if !k.doNotCommitMsgs {
|
||||
// TODO(cam) this locking can be removed if this PR gets merged:
|
||||
// https://github.com/wvanbergen/kafka/pull/84
|
||||
k.Lock()
|
||||
k.Consumer.CommitUpto(msg)
|
||||
k.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Stop() {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
close(k.done)
|
||||
if err := k.Consumer.Close(); err != nil {
|
||||
log.Printf("Error closing kafka consumer: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("kafka_consumer", func() telegraf.Input {
|
||||
return &Kafka{}
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
package kafka_consumer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
)
|
||||
|
||||
func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
|
||||
zkPeers := []string{testutil.GetLocalHost() + ":2181"}
|
||||
testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())
|
||||
|
||||
// Send a Kafka message to the kafka host
|
||||
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
|
||||
producer, err := sarama.NewSyncProducer(brokerPeers, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = producer.SendMessage(
|
||||
&sarama.ProducerMessage{
|
||||
Topic: testTopic,
|
||||
Value: sarama.StringEncoder(msg),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
defer producer.Close()
|
||||
|
||||
// Start the Kafka Consumer
|
||||
k := &Kafka{
|
||||
ConsumerGroup: "telegraf_test_consumers",
|
||||
Topics: []string{testTopic},
|
||||
ZookeeperPeers: zkPeers,
|
||||
PointBuffer: 100000,
|
||||
Offset: "oldest",
|
||||
}
|
||||
p, _ := parsers.NewInfluxParser()
|
||||
k.SetParser(p)
|
||||
|
||||
// Verify that we can now gather the sent message
|
||||
var acc testutil.Accumulator
|
||||
|
||||
// Sanity check
|
||||
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
||||
if err := k.Start(&acc); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
} else {
|
||||
defer k.Stop()
|
||||
}
|
||||
|
||||
waitForPoint(&acc, t)
|
||||
|
||||
// Gather points
|
||||
err = k.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
if len(acc.Metrics) == 1 {
|
||||
point := acc.Metrics[0]
|
||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||
assert.Equal(t, map[string]string{
|
||||
"host": "server01",
|
||||
"direction": "in",
|
||||
"region": "us-west",
|
||||
}, point.Tags)
|
||||
assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
|
||||
} else {
|
||||
t.Errorf("No points found in accumulator, expected 1")
|
||||
}
|
||||
}
|
||||
|
||||
// Waits for the metric that was sent to the kafka broker to arrive at the kafka
|
||||
// consumer
|
||||
func waitForPoint(acc *testutil.Accumulator, t *testing.T) {
|
||||
// Give the kafka container up to 2 seconds to get the point to the consumer
|
||||
ticker := time.NewTicker(5 * time.Millisecond)
|
||||
counter := 0
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
counter++
|
||||
if counter > 1000 {
|
||||
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||
} else if acc.NFields() == 1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
133
plugins/inputs/kafka_consumer/kafka_consumer_test.go
Normal file
133
plugins/inputs/kafka_consumer/kafka_consumer_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package kafka_consumer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257"
|
||||
testMsgGraphite = "cpu.load.short.graphite 23422 1454780029"
|
||||
testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n"
|
||||
invalidMsg = "cpu_load_short,host=server01 1422568543702900257"
|
||||
)
|
||||
|
||||
func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
in := make(chan *sarama.ConsumerMessage, 1000)
|
||||
k := Kafka{
|
||||
ConsumerGroup: "test",
|
||||
Topics: []string{"telegraf"},
|
||||
ZookeeperPeers: []string{"localhost:2181"},
|
||||
Offset: "oldest",
|
||||
in: in,
|
||||
doNotCommitMsgs: true,
|
||||
errs: make(chan *sarama.ConsumerError, 1000),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
return &k, in
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
func TestRunParser(t *testing.T) {
|
||||
k, in := newTestKafka()
|
||||
acc := testutil.Accumulator{}
|
||||
k.acc = &acc
|
||||
defer close(k.done)
|
||||
|
||||
k.parser, _ = parsers.NewInfluxParser()
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, acc.NFields(), 1)
|
||||
}
|
||||
|
||||
// Test that the parser ignores invalid messages
|
||||
func TestRunParserInvalidMsg(t *testing.T) {
|
||||
k, in := newTestKafka()
|
||||
acc := testutil.Accumulator{}
|
||||
k.acc = &acc
|
||||
defer close(k.done)
|
||||
|
||||
k.parser, _ = parsers.NewInfluxParser()
|
||||
go k.receiver()
|
||||
in <- saramaMsg(invalidMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, acc.NFields(), 0)
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
func TestRunParserAndGather(t *testing.T) {
|
||||
k, in := newTestKafka()
|
||||
acc := testutil.Accumulator{}
|
||||
k.acc = &acc
|
||||
defer close(k.done)
|
||||
|
||||
k.parser, _ = parsers.NewInfluxParser()
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
k.Gather(&acc)
|
||||
|
||||
assert.Equal(t, acc.NFields(), 1)
|
||||
acc.AssertContainsFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(23422)})
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
func TestRunParserAndGatherGraphite(t *testing.T) {
|
||||
k, in := newTestKafka()
|
||||
acc := testutil.Accumulator{}
|
||||
k.acc = &acc
|
||||
defer close(k.done)
|
||||
|
||||
k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsgGraphite)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
k.Gather(&acc)
|
||||
|
||||
assert.Equal(t, acc.NFields(), 1)
|
||||
acc.AssertContainsFields(t, "cpu_load_short_graphite",
|
||||
map[string]interface{}{"value": float64(23422)})
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
func TestRunParserAndGatherJSON(t *testing.T) {
|
||||
k, in := newTestKafka()
|
||||
acc := testutil.Accumulator{}
|
||||
k.acc = &acc
|
||||
defer close(k.done)
|
||||
|
||||
k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil)
|
||||
go k.receiver()
|
||||
in <- saramaMsg(testMsgJSON)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
k.Gather(&acc)
|
||||
|
||||
assert.Equal(t, acc.NFields(), 2)
|
||||
acc.AssertContainsFields(t, "kafka_json_test",
|
||||
map[string]interface{}{
|
||||
"a": float64(5),
|
||||
"b_c": float64(6),
|
||||
})
|
||||
}
|
||||
|
||||
func saramaMsg(val string) *sarama.ConsumerMessage {
|
||||
return &sarama.ConsumerMessage{
|
||||
Key: nil,
|
||||
Value: []byte(val),
|
||||
Offset: 0,
|
||||
Partition: 0,
|
||||
}
|
||||
}
|
||||
230
plugins/inputs/leofs/leofs.go
Normal file
230
plugins/inputs/leofs/leofs.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package leofs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const oid = ".1.3.6.1.4.1.35450"
|
||||
|
||||
// For Manager Master
|
||||
const defaultEndpoint = "127.0.0.1:4020"
|
||||
|
||||
type ServerType int
|
||||
|
||||
const (
|
||||
ServerTypeManagerMaster ServerType = iota
|
||||
ServerTypeManagerSlave
|
||||
ServerTypeStorage
|
||||
ServerTypeGateway
|
||||
)
|
||||
|
||||
type LeoFS struct {
|
||||
Servers []string
|
||||
}
|
||||
|
||||
var KeyMapping = map[ServerType][]string{
|
||||
ServerTypeManagerMaster: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
ServerTypeManagerSlave: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
ServerTypeStorage: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"num_of_writes",
|
||||
"num_of_reads",
|
||||
"num_of_deletes",
|
||||
"num_of_writes_5min",
|
||||
"num_of_reads_5min",
|
||||
"num_of_deletes_5min",
|
||||
"num_of_active_objects",
|
||||
"total_objects",
|
||||
"total_size_of_active_objects",
|
||||
"total_size",
|
||||
"num_of_replication_messages",
|
||||
"num_of_sync-vnode_messages",
|
||||
"num_of_rebalance_messages",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
ServerTypeGateway: {
|
||||
"num_of_processes",
|
||||
"total_memory_usage",
|
||||
"system_memory_usage",
|
||||
"processes_memory_usage",
|
||||
"ets_memory_usage",
|
||||
"num_of_processes_5min",
|
||||
"total_memory_usage_5min",
|
||||
"system_memory_usage_5min",
|
||||
"processes_memory_usage_5min",
|
||||
"ets_memory_usage_5min",
|
||||
"num_of_writes",
|
||||
"num_of_reads",
|
||||
"num_of_deletes",
|
||||
"num_of_writes_5min",
|
||||
"num_of_reads_5min",
|
||||
"num_of_deletes_5min",
|
||||
"count_of_cache-hit",
|
||||
"count_of_cache-miss",
|
||||
"total_of_files",
|
||||
"total_cached_size",
|
||||
"used_allocated_memory",
|
||||
"allocated_memory",
|
||||
"used_allocated_memory_5min",
|
||||
"allocated_memory_5min",
|
||||
},
|
||||
}
|
||||
|
||||
var serverTypeMapping = map[string]ServerType{
|
||||
"4020": ServerTypeManagerMaster,
|
||||
"4021": ServerTypeManagerSlave,
|
||||
"4010": ServerTypeStorage,
|
||||
"4011": ServerTypeStorage,
|
||||
"4012": ServerTypeStorage,
|
||||
"4013": ServerTypeStorage,
|
||||
"4000": ServerTypeGateway,
|
||||
"4001": ServerTypeGateway,
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of URI to gather stats about LeoFS.
|
||||
## Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||
servers = ["127.0.0.1:4021"]
|
||||
`
|
||||
|
||||
func (l *LeoFS) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (l *LeoFS) Description() string {
|
||||
return "Read metrics from a LeoFS Server via SNMP"
|
||||
}
|
||||
|
||||
func (l *LeoFS) Gather(acc telegraf.Accumulator) error {
|
||||
if len(l.Servers) == 0 {
|
||||
l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc)
|
||||
return nil
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
var outerr error
|
||||
for _, endpoint := range l.Servers {
|
||||
_, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse the address:%s, err:%s", endpoint, err)
|
||||
}
|
||||
port, err := retrieveTokenAfterColon(endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st, ok := serverTypeMapping[port]
|
||||
if !ok {
|
||||
st = ServerTypeStorage
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(endpoint string, st ServerType) {
|
||||
defer wg.Done()
|
||||
outerr = l.gatherServer(endpoint, st, acc)
|
||||
}(endpoint, st)
|
||||
}
|
||||
wg.Wait()
|
||||
return outerr
|
||||
}
|
||||
|
||||
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc telegraf.Accumulator) error {
|
||||
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.Start()
|
||||
defer cmd.Wait()
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
if !scanner.Scan() {
|
||||
return fmt.Errorf("Unable to retrieve the node name")
|
||||
}
|
||||
nodeName, err := retrieveTokenAfterColon(scanner.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeNameTrimmed := strings.Trim(nodeName, "\"")
|
||||
tags := map[string]string{
|
||||
"node": nodeNameTrimmed,
|
||||
}
|
||||
i := 0
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
key := KeyMapping[serverType][i]
|
||||
val, err := retrieveTokenAfterColon(scanner.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fVal, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err)
|
||||
}
|
||||
fields[key] = fVal
|
||||
i++
|
||||
}
|
||||
acc.AddFields("leofs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
func retrieveTokenAfterColon(line string) (string, error) {
|
||||
tokens := strings.Split(line, ":")
|
||||
if len(tokens) != 2 {
|
||||
return "", fmt.Errorf("':' not found in the line:%s", line)
|
||||
}
|
||||
return strings.TrimSpace(tokens[1]), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("leofs", func() telegraf.Input {
|
||||
return &LeoFS{}
|
||||
})
|
||||
}
|
||||
173
plugins/inputs/leofs/leofs_test.go
Normal file
173
plugins/inputs/leofs/leofs_test.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package leofs
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var fakeSNMP4Manager = `
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1"
|
||||
iso.3.6.1.4.1.35450.15.2.0 = Gauge32: 186
|
||||
iso.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519
|
||||
iso.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525
|
||||
iso.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068
|
||||
iso.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968
|
||||
iso.3.6.1.4.1.35450.15.7.0 = Gauge32: 186
|
||||
iso.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006
|
||||
iso.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867
|
||||
iso.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995
|
||||
iso.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634
|
||||
iso.3.6.1.4.1.35450.15.12.0 = Gauge32: 60
|
||||
iso.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904
|
||||
iso.3.6.1.4.1.35450.15.14.0 = Gauge32: 60
|
||||
iso.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
}
|
||||
`
|
||||
|
||||
var fakeSNMP4Storage = `
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1"
|
||||
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 512
|
||||
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307
|
||||
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716
|
||||
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448
|
||||
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008
|
||||
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 512
|
||||
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176
|
||||
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398
|
||||
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779
|
||||
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315
|
||||
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 824
|
||||
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654
|
||||
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052
|
||||
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296
|
||||
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 35
|
||||
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 898
|
||||
iso.3.6.1.4.1.35450.34.22.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.23.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.24.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 51
|
||||
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328
|
||||
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 51
|
||||
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
}
|
||||
`
|
||||
|
||||
var fakeSNMP4Gateway = `
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1"
|
||||
iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 465
|
||||
iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335
|
||||
iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415
|
||||
iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011
|
||||
iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855
|
||||
iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 465
|
||||
iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426
|
||||
iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358
|
||||
iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002
|
||||
iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125
|
||||
iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191
|
||||
iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 827
|
||||
iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105
|
||||
iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650
|
||||
iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0
|
||||
iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256
|
||||
iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158
|
||||
iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 34
|
||||
iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 1
|
||||
iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 53
|
||||
iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240
|
||||
iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 53
|
||||
iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" +
|
||||
`
|
||||
func main() {
|
||||
fmt.Println(output)
|
||||
}
|
||||
`
|
||||
|
||||
func makeFakeSNMPSrc(code string) string {
|
||||
path := os.TempDir() + "/test.go"
|
||||
err := ioutil.WriteFile(path, []byte(code), 0600)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func buildFakeSNMPCmd(src string) {
|
||||
err := exec.Command("go", "build", "-o", "snmpwalk", src).Run()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testMain(t *testing.T, code string, endpoint string, serverType ServerType) {
|
||||
// Build the fake snmpwalk for test
|
||||
src := makeFakeSNMPSrc(code)
|
||||
defer os.Remove(src)
|
||||
buildFakeSNMPCmd(src)
|
||||
defer os.Remove("./snmpwalk")
|
||||
envPathOrigin := os.Getenv("PATH")
|
||||
// Refer to the fake snmpwalk
|
||||
os.Setenv("PATH", ".")
|
||||
defer os.Setenv("PATH", envPathOrigin)
|
||||
|
||||
l := &LeoFS{
|
||||
Servers: []string{endpoint},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
acc.SetDebug(true)
|
||||
|
||||
err := l.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
floatMetrics := KeyMapping[serverType]
|
||||
|
||||
for _, metric := range floatMetrics {
|
||||
assert.True(t, acc.HasFloatField("leofs", metric), metric)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeoFSManagerMasterMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Manager, "localhost:4020", ServerTypeManagerMaster)
|
||||
}
|
||||
|
||||
func TestLeoFSManagerSlaveMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Manager, "localhost:4021", ServerTypeManagerSlave)
|
||||
}
|
||||
|
||||
func TestLeoFSStorageMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Storage, "localhost:4010", ServerTypeStorage)
|
||||
}
|
||||
|
||||
func TestLeoFSGatewayMetrics(t *testing.T) {
|
||||
testMain(t, fakeSNMP4Gateway, "localhost:4000", ServerTypeGateway)
|
||||
}
|
||||
254
plugins/inputs/lustre2/lustre2.go
Normal file
254
plugins/inputs/lustre2/lustre2.go
Normal file
@@ -0,0 +1,254 @@
|
||||
/*
|
||||
Lustre 2.x telegraf plugin
|
||||
|
||||
Lustre (http://lustre.org/) is an open-source, parallel file system
|
||||
for HPC environments. It stores statistics about its activity in
|
||||
/proc
|
||||
|
||||
*/
|
||||
package lustre2
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Lustre proc files can change between versions, so we want to future-proof
|
||||
// by letting people choose what to look at.
|
||||
type Lustre2 struct {
|
||||
Ost_procfiles []string
|
||||
Mds_procfiles []string
|
||||
|
||||
// allFields maps and OST name to the metric fields associated with that OST
|
||||
allFields map[string]map[string]interface{}
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## An array of /proc globs to search for Lustre stats
|
||||
## If not specified, the default will work on Lustre 2.5.x
|
||||
##
|
||||
# ost_procfiles = [
|
||||
# "/proc/fs/lustre/obdfilter/*/stats",
|
||||
# "/proc/fs/lustre/osd-ldiskfs/*/stats"
|
||||
# ]
|
||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||
`
|
||||
|
||||
/* The wanted fields would be a []string if not for the
|
||||
lines that start with read_bytes/write_bytes and contain
|
||||
both the byte count and the function call count
|
||||
*/
|
||||
type mapping struct {
|
||||
inProc string // What to look for at the start of a line in /proc/fs/lustre/*
|
||||
field uint32 // which field to extract from that line
|
||||
reportAs string // What measurement name to use
|
||||
tag string // Additional tag to add for this metric
|
||||
}
|
||||
|
||||
var wanted_ost_fields = []*mapping{
|
||||
{
|
||||
inProc: "write_bytes",
|
||||
field: 6,
|
||||
reportAs: "write_bytes",
|
||||
},
|
||||
{ // line starts with 'write_bytes', but value write_calls is in second column
|
||||
inProc: "write_bytes",
|
||||
field: 1,
|
||||
reportAs: "write_calls",
|
||||
},
|
||||
{
|
||||
inProc: "read_bytes",
|
||||
field: 6,
|
||||
reportAs: "read_bytes",
|
||||
},
|
||||
{ // line starts with 'read_bytes', but value read_calls is in second column
|
||||
inProc: "read_bytes",
|
||||
field: 1,
|
||||
reportAs: "read_calls",
|
||||
},
|
||||
{
|
||||
inProc: "cache_hit",
|
||||
},
|
||||
{
|
||||
inProc: "cache_miss",
|
||||
},
|
||||
{
|
||||
inProc: "cache_access",
|
||||
},
|
||||
}
|
||||
|
||||
var wanted_mds_fields = []*mapping{
|
||||
{
|
||||
inProc: "open",
|
||||
},
|
||||
{
|
||||
inProc: "close",
|
||||
},
|
||||
{
|
||||
inProc: "mknod",
|
||||
},
|
||||
{
|
||||
inProc: "link",
|
||||
},
|
||||
{
|
||||
inProc: "unlink",
|
||||
},
|
||||
{
|
||||
inProc: "mkdir",
|
||||
},
|
||||
{
|
||||
inProc: "rmdir",
|
||||
},
|
||||
{
|
||||
inProc: "rename",
|
||||
},
|
||||
{
|
||||
inProc: "getattr",
|
||||
},
|
||||
{
|
||||
inProc: "setattr",
|
||||
},
|
||||
{
|
||||
inProc: "getxattr",
|
||||
},
|
||||
{
|
||||
inProc: "setxattr",
|
||||
},
|
||||
{
|
||||
inProc: "statfs",
|
||||
},
|
||||
{
|
||||
inProc: "sync",
|
||||
},
|
||||
{
|
||||
inProc: "samedir_rename",
|
||||
},
|
||||
{
|
||||
inProc: "crossdir_rename",
|
||||
},
|
||||
}
|
||||
|
||||
func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc telegraf.Accumulator) error {
|
||||
files, err := filepath.Glob(fileglob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
/* Turn /proc/fs/lustre/obdfilter/<ost_name>/stats and similar
|
||||
* into just the object store target name
|
||||
* Assumpion: the target name is always second to last,
|
||||
* which is true in Lustre 2.1->2.5
|
||||
*/
|
||||
path := strings.Split(file, "/")
|
||||
name := path[len(path)-2]
|
||||
var fields map[string]interface{}
|
||||
fields, ok := l.allFields[name]
|
||||
if !ok {
|
||||
fields = make(map[string]interface{})
|
||||
l.allFields[name] = fields
|
||||
}
|
||||
|
||||
lines, err := internal.ReadLines(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
parts := strings.Fields(line)
|
||||
for _, wanted := range wanted_fields {
|
||||
var data uint64
|
||||
if parts[0] == wanted.inProc {
|
||||
wanted_field := wanted.field
|
||||
// if not set, assume field[1]. Shouldn't be field[0], as
|
||||
// that's a string
|
||||
if wanted_field == 0 {
|
||||
wanted_field = 1
|
||||
}
|
||||
data, err = strconv.ParseUint((parts[wanted_field]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
report_name := wanted.inProc
|
||||
if wanted.reportAs != "" {
|
||||
report_name = wanted.reportAs
|
||||
}
|
||||
fields[report_name] = data
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration message
|
||||
func (l *Lustre2) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
// Description returns description of Lustre2 plugin
|
||||
func (l *Lustre2) Description() string {
|
||||
return "Read metrics from local Lustre service on OST, MDS"
|
||||
}
|
||||
|
||||
// Gather reads stats from all lustre targets
|
||||
func (l *Lustre2) Gather(acc telegraf.Accumulator) error {
|
||||
l.allFields = make(map[string]map[string]interface{})
|
||||
|
||||
if len(l.Ost_procfiles) == 0 {
|
||||
// read/write bytes are in obdfilter/<ost_name>/stats
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats",
|
||||
wanted_ost_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// cache counters are in osd-ldiskfs/<ost_name>/stats
|
||||
err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats",
|
||||
wanted_ost_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(l.Mds_procfiles) == 0 {
|
||||
// Metadata server stats
|
||||
err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats",
|
||||
wanted_mds_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, procfile := range l.Ost_procfiles {
|
||||
err := l.GetLustreProcStats(procfile, wanted_ost_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, procfile := range l.Mds_procfiles {
|
||||
err := l.GetLustreProcStats(procfile, wanted_mds_fields, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for name, fields := range l.allFields {
|
||||
tags := map[string]string{
|
||||
"name": name,
|
||||
}
|
||||
acc.AddFields("lustre2", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("lustre2", func() telegraf.Input {
|
||||
return &Lustre2{}
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user